	 kq hBhBby name         by name         BUILD           d   	k`g      A            hBhB                            ./               A          A          
k 8Z4@m            hBhBhB                   ./lpp_name               m          m          4 R S gpfs.docs {
gpfs.docs.data 2.3.0.4 01 N H en_US GPFS Server Manpages and Documentation
[
*ifreq gpfs.base (2.3.0.0) 2.3.0.3
%
/usr/share/man/man1 104
/usr/share/lpp/SAVESPACE 1384
/usr/share/lib/objrepos 40
/usr/share/man/man3 400
/usr/share/man/man5 24
/usr/share/man/man8 856
INSTWORK 104 80
%
%
%
IY63969  3 LX: Assert exp(ofP->inodeFlushHolder in line 7900 of metadata.C
IY66584  2 Debugging bad magic for 491116
IY70341  2  DOCUMENTATION ADDITION REQUEST IN THE CONCEPTS, PLANNING AND I
IY73359  3 ERRLOG ENTRY FROM METADATA-VFS.C RC=-1 ERRNO=2
IY72920  3 ASSERT FAILED: ADVLKOBJP == 0, OPENFILE.C, LI
IY73512  3 SLES 9 - out of memory when opening a dir
IY73513  3 Release and PTF version updates
%
]
}
     
k       A            hBhB                            ./usr/share              A          A          
k@      A            hBhB                            ./usr/share/lpp          A          A          kܗ      A            hBhB                            ./usr/share/lpp/gpfs.docs/gpfs.docs.data/2.3.0.4                 A          A          l4 Y4@m            hBhBhB             I      ./usr/share/lpp/gpfs.docs/gpfs.docs.data/2.3.0.4/liblpp.a                m          m            	  as
/=emnopr-.34cdfghiltuy",0128_b5679:EFILkvwz	DS\ #ACMNOPRTqx>Y`$&HU(BGV[]j|!)<JKQX H *_	b_0P@Qc<B/Bj&|MI``dZQ ,``yB
_/ϑI26L' 8 JO
n)4c
K KFE0u38``18B  20obb

,`'(P_
$ؙdI29@   =02Z)AAAAAAAAAAAAAAAAAAA,U@HBf R3*ܰ
AAAAAAAAAAAAAAAAAAAA@HB`4 (	 HPZ)AAAAAAAAAAAAAAAAAAA,ӤbJUP4)>SpkNM"t\{:i+ψkΜ_.n6fIY8AK9`zd_$vO&aIےuI`A۠u]A؋Bi$ꂒ%`` (5o1'TIcݸ::0$5MJI%s4N#sOE$E<|AATF
AAAAAAAAAAAAAAAAAAAA@HBcX͖MIFXP= 5:>GY26)K />n@Ҕ"n ^b#vP
\(X 8v @XP JA_ך䜛# A8\)Dր,' VAD (ůkrNMFӌRMhX
padI,`ZEq5'&JY ;={βOlLM VAD (Ů&$ݑS@ҔBY'6&Y&u+pk Ic  ְ
AAAAAAAAAAAAAAAAAAAA u@HBb)AAAAAAAAAAAAAAAAAAA,  	
 0
@@]k AAAAAAAAAAAAAAAAAAA S1k AAAAAAAAAAAAAAAAAAA S,acE0
7,``E
/_#bem'D A8P[Щ1l{BNd_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ,_X(@!kb6Z71>-yJH`AۏF%ۖYkՓ!AaJI*%	j>GY&$N{A -G,g3K-
PǽƢ"*Kn=/],'mB%W{zTJ0t|MI/m
)b02~@eq[Щ1l{rtbD_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ(h8Qqŋ,g	Zإ͖M}Acޕȋ`AۏF%ۖYkՓ!AaJI*%	j>GY&$N{_P1E1[Щ1l{ҰSEOn=/ϜTz0_&nYeVNۄ/J)$`'$d,i:_!ۂ
1j?n Xa&!kb6Z71>-zWpZL`AۏF%ۖYkՓ!AaJI*%	j>GY&$N{ /AZۈ
,X'V!kb6Z71>-|NH`AۏF%ۖYkՓ!AaJI*%	j>GY&$N{偁1j?n P@<B.l*nc}@#ce@L*Kn=/],'mB%W{zTJ0t|MI/mŃ1j?n P0 B.l*nc}@#r@F	(:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvXU8Qqb)seSsPc0`'TQIAÓ$uCǣۏFm,jpUޥ$]5D#bem'KD=qcEAZۈ41B.l*nc}@#	
A (:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvܲ
_(`<B.l*nc}@#	
AI%On=/ϜTz0_&nYeVNۄ/J)$`'$d,i:_!ۂ0b02~@`)seSsPc0`M'@R
N2%On=/ϜTz0_&nYeVNۄ/J)$`'$d,i:_!ی(X"`eE++K-
P)	2	 Q(:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIv`0E8QqZإ͖M}Au92J_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷXѫq
,P3K-
P)
$2Iv*rdt=`>qR[qeMY;n1*ԤQkfulLMhnXAkq-lRB>
a:mÑMuP$uCǣۏFm,jpUޥ$]5D#bem'KD=qe_P`_f!kb6Z71>S8qp"i%On=/ϜTz0_&nYeVNۄ/J)$`'$d,i:_!ی(8Qqb)seSsPc0cFn`<rdt=`>qR[qeMY;n1*ԤQkfulLMhn, -GYB1[Щ1|$uCǣۏFm,jpUޥ$]5D#bem'KD=qA -GPѿX\hTL>GXrtsJ_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷXU8Qq(PB.l*nc}@#qQ:M9%On=/ϜTz0_&nYeVNۄ/J)$`'$d,i:_!ی,YKq /+K-
P)t]¤@ԜI@AۏF%ۖYkՓ!AaJI*%	j>GY&$N{
1j?n +Zإ͖M}Au:!TIÓ$uCǣۏFm,jpUޥ$]5D#bem'KD=qeAZۈ_V!kb6Z71>S8T"h(:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvX -G4@?f!kb6Z71>S8N$uCǣۏFm,jpUޥ$]5D#bem'KD=q  -G
4gV!kb6Z71>S8&8=Î^8T(:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvX(02`e`1[Щ1|ǹ1]¤@Ԝ8zII(:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvXѥ/AZۈ(`B.l*nc}@#qLqp!P5'SI@AۏF%ۖYkՓ!AaJI*%	j>GY&$N{
-b02~@P@1_X\hTL>GXܘRBjN=QҤ$uCǣۏFm,jpUޥ$]5D#bem'KD=q1j?n `(0(B.l*nc}@#qLqp"h:$*J_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ,``eFZإ͖M}Au=Ɏ&)t]¤@ԜI@AۏF%ۖYkՓ!AaJI*%	j>GY&$N{_W3K-
P){pOq%On=/ϜTz0_&nYeVNۄ/J)$`'$d,i:_!ی4R`e``X\hTL>GXܘJ:qxRBjN$uCǣۏFm,jpUޥ$]5D#bem'KD=qJ_Y`cEb)seSsPc0crtbDt=`>qR[qeMY;n1*ԤQkfulLMhn0XKq/+K-
P)&&I@AۏF%ۖYkՓ!AaJI*%	j>GY&$N{)b02~AcE)seSsPc0c1(ۉ(:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvX2`e1E
b)seSsPc0c992J_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ)b02~@1x\hTL>GXOrtsJ_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ0b02~@-lRB>
a:#:t=`>qR[qeMY;n1*ԤQkfulLMhn0h -G(h)X\hTL>GXf4*Kn=/],'mB%W{zTJ0t|MI/m
-b02~@ b[Щ1|i̒*Kn=/],'mB%W{zTJ0t|MI/m(Z`ec/+K-
P)F	(:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIv` -GP(B.l*nc}@#r3u(:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvܱAZۈ(P<B.l*nc}@#r6e@rdt=`>qR[qeMY;n1*ԤQkfulLMhn
4b`ecAAx\hTL>GX89DP:!tтImǣkZ5dHPXBĪoRJFqOI26@K_X`)seSsPc0c*8u(:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvX((8Qq1[Щ1|ѓ9DYJ_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ,j`eW	Zإ͖M}AuI%	(:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvܱBƯAZۈ,`OB.l*nc}@#tNH*Kn=/],'mB%W{zTJ0t|MI/mŃ1j?n c1[Щ1|F5Ĕt=`>qR[qeMY;n1*ԤQkfulLMhn0XKq/+K-
P)idt=`>qR[qeMY;n1*ԤQkfulLMhn0Xe8Qq -lRB> ٳF)"@*Kn=/],'mB%W{zTJ0t|MI/mƌ1j?n Y@B.l*nc}@Mpj&:_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ0e8Qq `@񘅭\hT-Xu䁃On=/ϜTz0_&nYeVNۄ/J)$`'$d,i:_!ۃ
)b02~@ @x\hT=ia_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ(h*`e
WX\hT=i$h:!tтImǣkZ5dHPXBĪoRJFqOI26`/AZۈ(1@!kb6Z71>5}(_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ-b02~@`B.l*nc}@k`b$h:!tтImǣkZ5dHPXBĪoRJFqOI26  E8Qqq[Щ1{@Ĕ2FOn=/ϜTz0_&nYeVNۄ/J)$`'$d,i:_!ۂ1j?n P`\B.l*nc}@k209t=`>qR[qeMY;n1*ԤQkfulLMhn,akq/X.!kb6Z71>5y4:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvX0Aq AK,f!kb6Z71>5y8SFOn=/ϜTz0_&nYeVNۄ/J)$`'$d,i:_!ۖ
Z`ecE[Щ1{@$h:!tтImǣkZ5dHPXBĪoRJFqOI260(8Qq e,V!kb6Z71>5y<4:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIv`(E8Qq4+K-
PǼ!f4:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIv`08Qqb)seSsPcX3Rtt=`>qR[qeMY;n1*ԤQkfulLMhnQeAZۈ(Y@b)seSsPcX3AۏF%ۖYkՓ!AaJI*%	j>GY&$N{5b02~@@ł)seSsPcX8Y_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ(E/AZۈ,/K-
PǼ*VS$h:!tтImǣkZ5dHPXBĪoRJFqOI26`cW -G	b)seSsPcX04AۏF%ۖYkՓ!AaJI*%	j>GY&$N{ŋR`e(_x\hT=tHuCǣۏFm,jpUޥ$]5D#bem'KD=r_0OK-
PǼu$h:!tтImǣkZ5dHPXBĪoRJFqOI26BAZۈQb[Щ1{q9HQ#A*Kn=/],'mB%W{zTJ0t|MI/mX8QqWZإ͖M}Acku4:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvhU8Qq 1[Щ1{rFa_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ1j?n ` 񸅭\hT=#AۏF%ۖYkՓ!AaJI*%	j>GY&$N{_,P[Щ1{rP$h:!tтImǣkZ5dHPXBĪoRJFqOI26AKq[Щ1{rPIt=`>qR[qeMY;n1*ԤQkfulLMhnX_,B<B.l*nc}@k\¥hE2FOn=/ϜTz0_&nYeVNۄ/J)$`'$d,i:_!۔P 8Qq(XB.l*nc}@kN$h:!tтImǣkZ5dHPXBĪoRJFqOI26@W -G(o	Zإ͖M}Ac@rNt=`>qR[qeMY;n1*ԤQkfulLMhnZ`e1cAK1[Щ1{u4:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvܢŁAZۈ,)seSsPcXf4:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvX2_(`@oc1[Щ1{γHAۏF%ۖYkՓ!AaJI*%	j>GY&$N{1j?n AZإ͖M}Ac{74:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvܰbƯAZۈ(`K-
Pǽ3L$h:!tтImǣkZ5dHPXBĪoRJFqOI26!b/AZۈ(POK-
Pǽ+1%-7LAۏF%ۖYkՓ!AaJI*%	j>GY&$N{
,8Qq,gV!kb6Z71>5zVd`r'4:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIv`akq
-lRB>JF	t=`>qR[qeMY;n1*ԤQkfulLMhn1j?n Qe	b)seSsPcXgY#A*Kn=/],'mB%W{zTJ0t|MI/m_(a_&!kb6Z71>5zVHuCǣۏFm,jpUޥ$]5D#bem'KD=r/AZۈ4Qb[Щ1{ҸY_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷQkq Eb)seSsPcX#A*Kn=/],'mB%W{zTJ0t|MI/m_4Ŋ-lRB>E:4:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvܡb -G(/K-
PǼ)0$h:!tтImǣkZ5dHPXBĪoRJFqOI26E1j?n X((/^!kb6Z71>5|Rtt=`>qR[qeMY;n1*ԤQkfulLMhnP8Qq `e)seSsPcXϊn:t=`>qR[qeMY;n1*ԤQkfulLMhnX -G,K-
PǾnR)#A*Kn=/],'mB%W{zTJ0t|MI/m_,X`_K-
PǾn7d_:z0_8-`|Mvܲ_	
_]RIQ(5N3TI:6&Y&tCݷ1j?n _Zإ͖M}Ac7ZsFN:t=`>qR[qeMY;n1*ԤQkfulLMhn
0j`eZإ͖M}Ac9$j6HuCǣۏFm,jpUޥ$]5D#bem'KD=paAW -GXOb[Щ1{7JHuCǣۏFm,jpUޥ$]5D#bem'KD=r1j?n `1@b)seSsPcX
&lCHuCǣۏFm,jpUޥ$]5D#bem'KD=p0aW -GPK-
PǾR54:{q|⤶т5r-v$(,!bUwI%D A8Q'$ؙdIvܰ0`58Qq[Щ1{:jHAۏF%ۖYkՓ!AaJI*%	j>GY&$N{_,X^!k3
0X
7X+(P/,X7#bem'D! )seSsPcb[ЩJL0  P #P.l*nbHdG7J7)lRB>
)lRB> 񅭊\hTh"(oB00PAyB
_ulLMH :b6Z71>-yRȋk%\hT=*1"E)seSsPcS$Zb6Z71>-yNHk%\hT=]&X͖M}Acޕ"-dK-
Pǽ+TH.l*nc}@[("KЩ1|a92JY,RB>
a:=@$ `LKЩ1|a(F ɒRb6Z71>St EHrdX͖M}Au4H# ):c$.l*nc}@#	
AIC$.l*nc}@#$&Fc$.l*nc}@#&npɒRb6Z71>SI4d1:Tp)dK-
P)bQF5e@KЩ1|>CII)dK-
P)j3t$.l*nc}@#qA4)dK-
P)qNNvIK%\hTL>GX㤢tsJY,RB>
a:78qp!P5'Rb6Z71>S8T!P5'LKЩ1|]¤9G&IK%\hTL>GX:LKЩ1|ǹ1\)t]¤@ԜIK%\hTL>GXܘRBjN=Q]¤X͖M}Au=Ɏ:$*
{)dK-
P){u*HTIÇ:TX͖M}Au=Ɏ:$Mt]¤@ԜIK%\hTL>GXܘRhBN/u*HTIĔX͖M}Au=ɎTFpKЩ1|ǹ1
)t]¤@ԜIK%\hTL>GXܝ%,)seSsPc0cӘ)dK-
P)FIK%\hTL>GXNE'LKЩ1|Ȟ4dX͖M}AuF2t	)dK-
P)FRb6Z71>S95Rb6Z71>S9IRb6Z71>S9䔲X͖M}AuF̨LKЩ1|pLr0$.l*nc}@#te@9G%,)seSsPc0c'@r0*X͖M}AuI%	)dK-
P)%$.l*nc}@#uMq%,)seSsPc0c:LKЩ1 6=lсH5.l*nc}@Mpj&:Y,RB> jìO$dK-
PǾ4l0%\hT=i$jb6Z71>5}(Y,RB>F)"F)seSsPcX)idY,RB>1#<%\hT= j6HՒ.l*nc}@kq9HDY,RB>1%\hT= HՒ.l*nc}@kHՒ.l*nc}@k&bJZn#VKЩ1{̙HՒ.l*nc}@k'4%\hT=NR"#VKЩ1{̘mHՒ.l*nc}@k\sE:$jb6Z71>5y8)'HՒ.l*nc}@k\pR)`HՒ.l*nc}@k\t󬑫%\hT=#QF)seSsPcX䎲F)seSsPcX\HՒ.l*nc}@k\,F)seSsPcX+B)5dK-
PǼӉX͖M}Ac@rNX͖M}Ac(3%\hT=YY,RB>:M"F)seSsPcX:MčY,RB>E:X͖M}AcޕHՒ.l*nc}@kO$jb6Z71>5zVj6HՒ.l*nc}@k$jb6Z71>5zVHՒ.l*nc}@ki#VKЩ1{%\hT=9HQ8F)seSsPcX"FY,RB>bJZn#VKЩ1{7dY,RB>tHՒ.l*nc}@kӌu5dK-
PǾn挜u5dK-
PǾrHl0%\hT=PH$jb6Z71>5}f:Y,RB>TFdY,RB>i"FX`OQ,XB,(P/,`>GY&$N9m'@!Ov(
XC9щOp 8 &%%- E#]=i#`q;{i:$MY;nrIn dp8J$Q"k>GY&$NQ"Y:}K8 P8J$Q"k"|-g"BP-`EF'\=CFz&gFy`ŋXv"A:VqHFp|MI$E0-W8n Br <`D)ۅ 0("sH
 	 $`N$(  	np  X,, @0	p	'mXr):ܝ#nID$. ZϑI26qHJBUN/ۈX
{@
Q@`(}:1=@`( l  H@# 4 b 	   ^ `	M!5dE'[u>	(DY:6&Y&t0I`\ #$EP -bZq~@@x:Sݷ
 `Q,
 `@0PX@`$ p ( P `/kqBp HHPXA N!5dE'[u>	(DY:6&Y&t0I`\ #$EP -bZq~@@x:Sݷ
 `RŌXC9щ+|Nj:d#FkVNۜ Ru;ۈ>GY#0I`\ #bem'@	(DY:}K8 P8J$Q%p!`E'ZD=p(YK(}:1=rNwP7`4'tRFn	'mXr):ܝ#nID$. ZϑI26qHKd% (a%(k"|-c
P`W``
7` 1c
0X0N&bhI26L'>GY&$Nm1 CulLM`'I26m CulLMH#bem'D6 %k vI4@$          AAA                   ./usr/share/man/man1/mmchattr.1          $          $          .TH mmchattr 11/01/04
mmchattr Command
.SH "Name"
.PP
\fBmmchattr\fR - Changes the replication attributes of and the I/O
caching policy for one or more GPFS files.
.SH "Synopsis"
.PP
\fBmmchattr\fR [\fB-D {\fR\fByes\fR\fB | no}\fR
] [\fB-m\fR \fIMetadataReplicas\fR] [\fB-M\fR
\fIMaxMetadataReplicas\fR] [\fB-r\fR
\fIDataReplicas\fR] [\fB-R\fR \fIMaxDataReplicas\fR]
\fIFilename\fR [\fIFilename\fR ...]
.SH "Description"
.PP
Use the \fBmmchattr\fR command to change the replication attributes of
and the I/O caching policy for files in the GPFS file system.
.PP
The replication factor must be less than or equal to the maximum
replication factor for the file. If insufficient space is available in
the file system to increase the number of replicas to the value requested, the
\fBmmchattr\fR command ends. However, some blocks of the file may
have their replication factor increased after the \fBmmchattr\fR command
ends. If additional free space becomes available in the file system at
a later time (when, for example, you add another disk to the file system), you
can then issue the \fBmmrestripefs\fR command with
the \fB-r\fR or \fB-b\fR option to complete the replication of the
file. You can use the \fBmmlsattr\fR command
to display the replication values.
.PP
The Direct I/O caching policy bypasses file cache and transfers data
directly from disk into the user space buffer, as opposed to using the normal
cache policy of placing pages in kernel memory. Applications with poor
cache hit rates or very large I/Os may benefit from the use of Direct
I/O.
.PP
The \fBmmchattr\fR command can be run against a file in use.
.PP
You must have write permission for the files whose attributes you are
changing.
.SH "Parameters"
.PP
.RS +3
\fB\fIFilename\fR [\fIFilename\fR ...]
\fR
.RE
.RS +9
The name of one or more files to be changed. Delimit each file name
by a space. Wildcard characters are supported in file names, for
example, \fBproject*.sched\fR.
.RE
.SH "Options"
.PP
.RS +3
\fB-D {yes | no}
\fR
.RE
.RS +9
Enable or disable the Direct I/O caching policy for files.
.RE
.PP
.RS +3
\fB-m \fIMetadataReplicas\fR
\fR
.RE
.RS +9
Specifies how many copies of the file system's metadata to
create. Enter a value of 1 or 2, but not greater than the value of the
\fIMaxMetadataReplicas\fR attribute of the file.
.RE
.PP
.RS +3
\fB-M \fIMaxMetadataReplicas\fR
\fR
.RE
.RS +9
The maximum number of copies of indirect blocks for a file. Space
is reserved in the inode for all possible copies of pointers to indirect
blocks. Valid values are 1 and 2, but cannot be less than \fIDefaultMetadataReplicas\fR. The default is
1.
.RE
.PP
.RS +3
\fB-r \fIDataReplicas\fR
\fR
.RE
.RS +9
Specifies how many copies of the file data to create. Enter a value
of 1 or 2, but not greater than the value of the \fIMaxDataReplicas\fR
attribute of the file.
.RE
.PP
.RS +3
\fB-R \fIMaxDataReplicas\fR
\fR
.RE
.RS +9
The maximum number of copies of data blocks for a file. Space is
reserved in the inode and indirect blocks for all possible copies of pointers
to data blocks. Valid values are 1 and 2 but cannot be less than \fIDefaultDataReplicas\fR. The default is
1.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have write access to the file to run the \fBmmchattr\fR
command.
.PP
You may issue the \fBmmchattr\fR command only from a node in the
GPFS cluster where the file system is mounted.
.PP
When considering data replication for files accessible to SANergy, 
see \fIAppendix B,  SANergy export: restrictions and considerations\fR
in \fIGPFS: Administration and Programming Reference\fR.
.SH "Examples"
.PP
To change the metadata replication factor to 2 and the data replication
factor to 2 for the \fBproject7.resource\fR file in file system
\fBfs1\fR, enter:
.sp
.nf
mmchattr -m 2 -r 2 /fs1/project7.resource
.fi
.sp
.PP
To confirm the change, enter:
.sp
.nf
mmlsattr project7.resource
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
  replication factors
metadata(max) data(max) file    [flags]
------------- --------- ---------------
      2 (  2)   2 (  2) /fs1/project7.resource
.fi
.sp
.SH "See also"
.PP
mmcrfs Command
.PP
mmlsattr Command
.PP
mmlsfs Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
kg xI4@$          AAA                   ./usr/share/man/man1/mmdelacl.1          $          $          .TH mmdelacl 11/01/04
mmdelacl Command
.SH "Name"
.PP
\fBmmdelacl\fR - Deletes a GPFS access control list.
.SH "Synopsis"
.PP
\fBmmdelacl\fR [\fB-d\fR] \fIFilename\fR
.SH "Description"
.PP
Use the \fBmmdelacl\fR command to delete the extended entries of an
access ACL of a file or directory, or to delete the default ACL of a
directory.
.SH "Parameters"
.PP
.RS +3
\fB\fIFilename\fR
\fR
.RE
.RS +9
The path name of the file or directory for which the ACL is to be
deleted. If the \fB-d\fR option is specified, \fIFilename\fR must
contain the name of a directory.
.RE
.SH "Options"
.PP
.RS +3
\fB-d
\fR
.RE
.RS +9
Specifies that the default ACL of a directory is to be deleted. 
.PP
Since there can be only one NFS V4 ACL (no separate default),
specifying the \fB-d\fR flag for a file with an NFS V4 ACL is an
error. Deleting an NFS V4 ACL necessarily removes both the ACL and any
inheritable entries contained in it.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
The \fBmmdelacl\fR command may be issued only by the file or directory
owner, the root user, or by someone with control (c) authority in the ACL for
the file.
.PP
You may issue the \fBmmdelacl\fR command only from a node in the GPFS
cluster where the file system is mounted.
.SH "Examples"
.PP
To delete the default ACL for a directory named \fBproject2\fR,
enter:
.sp
.nf
mmdelacl -d project2
.fi
.sp
.PP
To confirm the deletion, enter 
.sp
.nf
mmgetacl -d project2
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
#owner:uno
#group:system
.fi
.sp
.SH "See also"
.PP
mmeditacl Command
.PP
mmgetacl Command
.PP
mmputacl Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
 c...]k zI4@$        ;  AAA             ;      ./usr/share/man/man1/mmeditacl.1                 $          $          .TH mmeditacl 11/01/04
mmeditacl Command
.SH "Name"
.PP
\fBmmeditacl\fR - Creates or changes a GPFS access control
list.
.SH "Synopsis"
.PP
\fBmmeditacl\fR [\fB-d\fR] [\fB-k {nfs4 |\fR \fBposix\fR \fB|
native}\fR] \fIFilename\fR
.SH "Description"
.PP
Use the \fBmmeditacl\fR command for interactive editing of the ACL of a
file or directory. This command uses the default editor, specified in
the EDITOR environment variable, to display the current access control
information, and allows the file owner to change it. The command
verifies the change request with the user before making permanent
changes.
.PP
The EDITOR environment variable must contain a complete path name, for
example: 
.sp
.nf
export EDITOR=/bin/vi
.fi
.sp
.PP
For information about NFS V4 ACLs, see \fIManaging GPFS access control lists and NFS export and NFS and GPFS\fR
in \fIGPFS: Administration and Programming Reference\fR.
.PP
Users may need to see ACLs in their true form as well as how they
are translated for access evaluations. There are four cases:
.RS +3
.HP 3
1. By default, \fBmmeditacl\fR returns the ACL in a format consistent with
the file system setting, specified using the \fB-k\fR flag on the \fBmmcrfs\fR or \fBmmchfs\fR commands. 
.RS +3
.HP 3
\(bu If the setting is \fBposix\fR, the ACL is shown as a traditional
ACL.
.HP 3
\(bu If the setting is \fBnfs4\fR, the ACL is shown as an NFS V4 ACL.
.HP 3
\(bu If the setting is \fBall\fR, the ACL is returned in its true
form.
.RE
.HP 3
2. The command \fBmmeditacl -k nfs4\fR always produces an NFS V4
ACL.
.HP 3
3. The command \fBmmeditacl -k posix\fR always produces a traditional
ACL.
.HP 3
4. The command \fBmmeditacl -k native\fR always shows the ACL in its true
form regardless of the file system setting.
.RE
.PP
This table describes how \fBmmeditacl\fR works.
.br
.PP
\fBTable 17. The mmeditacl command for POSIX and NFS V4 ACLs\fR
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l l l l.
Command~ACL~mmcrfs -k~Display~-d (default)
\fBmmeditacl\fR~posix~posix~Access ACL~Default ACL
\fBmmeditacl\fR~posix~nfs4~NFS V4 ACL~Error[1]
\fBmmeditacl\fR~posix~all~Access ACL~Default ACL
\fBmmeditacl\fR~nfs4~posix~Access ACL[2]~Default ACL[2]
\fBmmeditacl\fR~nfs4~nfs4~NFS V4 ACL~Error[1]
\fBmmeditacl\fR~nfs4~all~NFS V4 ACL~Error[1]
\fBmmeditacl -k native\fR~posix~any~Access ACL~Default ACL
\fBmmeditacl -k native\fR~nfs4~any~NFS V4 ACL~Error[1]
\fBmmeditacl -k posix\fR~posix~any~Access ACL~Default ACL
\fBmmeditacl -k posix\fR~nfs4~any~Access ACL[2]~Default ACL[2]
\fBmmeditacl -k nfs4\fR~any~any~NFS V4 ACL~Error[1]
.TE
.sp
.fi
.PP
[1] NFS V4 ACLs include inherited entries. Consequently, there cannot
be a separate default ACL.
.PP
[2] Only the mode entries (owner, group, everyone) are translated. 
The \fBrwx\fR values are derived from the NFS V4 file mode attribute.
Since the NFS V4 ACL is more granular in nature, some information is
lost in this translation.
.RE
.PP
In the case of NFS V4 ACLs, there is no concept of a default
ACL. Instead, there is a single ACL and the individual access control
entries can be flagged as being inherited (either by files, directories, both,
or neither). Consequently, specifying the \fB-d\fR flag for an NFS
V4 ACL is an error. By its nature, storing an NFS V4 ACL implies
changing the inheritable entries (the GPFS default ACL) as well.
.PP
Depending on the file system's \fB-k\fR setting
(\fBposix\fR, \fBnfs4\fR, or \fBall\fR), \fBmmeditacl\fR may be
restricted. The \fBmmeditacl\fR command is not allowed to store an
NFS V4 ACL if \fB-k posix\fR is in effect, and is not allowed to store a
POSIX ACL if \fB-k nfs4\fR is in effect. For more information, see
the description of the \fB-k\fR flag for the \fBmmdeldisk\fR, \fBmmcrfs\fR,
and \fBmmlsfs\fR commands.
.SH "Parameters"
.PP
.RS +3
\fB\fIFilename\fR
\fR
.RE
.RS +9
The path name of the file or directory for which the ACL is to be
edited. If the \fB-d\fR option is specified, \fIFilename\fR must
contain the name of a directory.
.RE
.SH "Options"
.PP
.RS +3
\fB-d
\fR
.RE
.RS +9
Specifies that the default ACL of a directory is to be edited.
.RE
.PP
.RS +3
\fB-k {nfs4 | \fBposix\fR | native}
\fR
.RE
.RS +9
.PP
.RS +3
\fBnfs4
\fR
.RE
.RS +9
Always produces an NFS V4 ACL.
.RE
.PP
.RS +3
\fBposix
\fR
.RE
.RS +9
Always produces a traditional ACL.
.RE
.PP
.RS +3
\fBnative
\fR
.RE
.RS +9
Always shows the ACL in its true form regardless of the file system
setting.
.RE
.PP
This option should not be used for routine ACL manipulation. It is
intended to provide a way to show the translations that are done. For
example, if a \fBposix\fR ACL is translated by NFS V4. Beware that
if the \fB-k nfs4\fR flag is used, but the file system does not allow NFS
V4 ACLs, you will not be able to store the ACL that is returned. If the
file system does support NFS V4 ACLs, the \fB-k nfs4\fR flag is an easy way
to convert an existing \fBposix\fR ACL to \fBnfs4\fR format.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You may issue the \fBmmeditacl\fR command only from a node in the GPFS
cluster where the file system is mounted.
.PP
The \fBmmeditacl\fR command may be used to display an ACL. POSIX
ACLs may be displayed by any user with access to the file or directory.
NFSv4 ACLs have a READ_ACL permission that is required for non-privileged
users to be able to see an ACL. To change an existing ACL, the user
must either be the owner, the root user, or someone with control permission
(WRITE_ACL is required where the existing ACL is of type NFS
V4).
.SH "Examples"
.PP
To edit the ACL for a file named \fBproject2.history\fR,
enter:
.sp
.nf
mmeditacl project2.history
.fi
.sp
.PP
The current ACL entries are displayed using the default editor, provided
that the EDITOR environment variable specifies a complete path name.
When the file is saved, the system displays information similar to:
.sp
.nf
mmeditacl: 6027-967 Should the modified ACL be applied? (yes) or (no)\ 
.fi
.sp
.PP
After responding \fByes\fR, the ACLs are applied.
.SH "See also"
.PP
mmdelacl Command
.PP
mmgetacl Command
.PP
mmputacl Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
ault kꮟ |I4@$          AAA                   ./usr/share/man/man1/mmgetacl.1          $          $          .TH mmgetacl 11/01/04
mmgetacl Command
.SH "Name"
.PP
\fBmmgetacl\fR - Displays the GPFS access control list of a file
or directory.
.SH "Synopsis"
.PP
\fBmmgetacl\fR [\fB-d\fR] [\fB-o\fR
\fIOutFilename\fR] [\fB-k {nfs4 |\fR\fBposix\fR\fB |
native}\fR] \fIFilename\fR
.SH "Description"
.PP
Use the \fBmmgetacl\fR command to display the ACL of a file or
directory.
.PP
For information about NFS V4 ACLs, see \fIManaging GPFS access control lists and NFS export and NFS and GPFS\fR
in \fIGPFS: Administration and Programming Reference\fR.
.PP
Users may need to see ACLs in their true form as well as how they
are translated for access evaluations. There are four cases:
.RS +3
.HP 3
1. By default, \fBmmgetacl\fR returns the ACL in a format consistent with
the file system setting, specified using the \fB-k\fR flag on the \fBmmcrfs\fR or \fBmmchfs\fR commands. 
.sp
If the setting is \fBposix\fR, the ACL is shown as a traditional
ACL.
.sp
If the setting is \fBnfs4\fR, the ACL is shown as an NFS V4 ACL.
.sp
If the setting is \fBall\fR, the ACL is returned in its true
form.
.HP 3
2. The command \fBmmgetacl -k nfs4\fR always produces an NFS V4
ACL.
.HP 3
3. The command \fBmmgetacl -k posix\fR always produces a traditional
ACL.
.HP 3
4. The command \fBmmgetacl -k native\fR always shows the ACL in its true
form regardless of the file system setting.
.RE
.PP
This table describes how \fBmmgetacl\fR works.
.br
.PP
\fBTable 18. The mmgetacl command for POSIX and NFS V4 ACLs\fR
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l l l l.
Command~ACL~mmcrfs -k~Display~-d (default)
\fBmmgetacl\fR~posix~posix~Access ACL~Default ACL
\fBmmgetacl\fR~posix~nfs4~NFS V4 ACL~Error[1]
\fBmmgetacl\fR~posix~all~Access ACL~Default ACL
\fBmmgetacl\fR~nfs4~posix~Access ACL[2]~Default ACL[2]
\fBmmgetacl\fR~nfs4~nfs4~NFS V4 ACL~Error[1]
\fBmmgetacl\fR~nfs4~all~NFS V4 ACL~Error[1]
\fBmmgetacl -k native\fR~posix~any~Access ACL~Default ACL
\fBmmgetacl -k native\fR~nfs4~any~NFS V4 ACL~Error[1]
\fBmmgetacl -k posix\fR~posix~any~Access ACL~Default ACL
\fBmmgetacl -k posix\fR~nfs4~any~Access ACL[2]~Default ACL[2]
\fBmmgetacl -k nfs4\fR~any~any~NFS V4 ACL~Error[1]
.TE
.sp
.fi
.PP
[1] NFS V4 ACLs include inherited entries. Consequently, there cannot
be a separate default ACL.
.PP
[2] Only the mode entries (owner, group, everyone) are translated.
The fBrwx\fR values are derived from the NFS V4 file mode attribute.
Since the NFS V4 ACL is more granular in nature, some information is
lost in this translation.
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIFilename\fR
\fR
.RE
.RS +9
The path name of the file or directory for which the ACL is to be
displayed. If the \fB-d\fR option is specified, \fIFilename\fR
must contain the name of a directory.
.RE
.SH "Options"
.PP
.RS +3
\fB-d
\fR
.RE
.RS +9
Specifies that the default ACL of a directory is to be displayed.
.RE
.PP
.RS +3
\fB-k {nfs4 | \fBposix\fR | native}
\fR
.RE
.RS +9
.PP
.RS +3
\fBnfs4
\fR
.RE
.RS +9
Always produces an NFS V4 ACL.
.RE
.PP
.RS +3
\fBposix
\fR
.RE
.RS +9
Always produces a traditional ACL.
.RE
.PP
.RS +3
\fBnative
\fR
.RE
.RS +9
Always shows the ACL in its true form regardless of the file system
setting.
.RE
.RE
.PP
.RS +3
\fB-o \fIOutFilename\fR
\fR
.RE
.RS +9
The path name of a file to which the ACL is to be written.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have read access to the directory where the file exists to run the
\fBmmgetacl\fR command.
.PP
You may issue the \fBmmgetacl\fR command only from a node in the
GPFS cluster where the file system is mounted.
.SH "Examples"
.RS +3
.HP 3
1. To display the ACL for a file named \fBproject2.history\fR,
enter:
.sp
.nf
mmgetacl project2.history
.fi
.sp
The system displays information similar to:
.sp
.nf
#owner:paul
#group:design
user::rwxc
group::r-x-
other::r-x-
.fi
.sp
.HP 3
2. This is an example of an NFS V4 ACL displayed using
\fBmmgetacl\fR. Each entry consists of three lines reflecting the
greater number of permissions in a text format. An entry is either an
\fBallow\fR entry or a \fBdeny\fR entry. An \fBX\fR indicates
that the particular permission is selected, a minus sign (\fB-\fR)
indicates that is it not selected. The following access control entry
explicitly allows \fBREAD\fR, \fBEXECUTE\fR and \fBREAD_ATTR\fR to
the \fBstaff\fR group on a file:
.sp
.nf
group:staff:r-x-:allow
 (X)READ/LIST (-)WRITE/CREATE (-)MKDIR (-)SYNCHRONIZE (-)READ_ACL  (X)READ_ATTR  (-)READ_NAMED
 (-)DELETE    (-)DELETE_CHILD (-)CHOWN (X)EXEC/SEARCH (-)WRITE_ACL (-)WRITE_ATTR (-)WRITE_NAMED
.fi
.sp
.HP 3
3. This is an example of a directory ACLs, which may include
\fIinherit\fR entries (the equivalent of a default ACL). These do
not apply to the directory itself, but instead become the initial ACL for any
objects created within the directory. The following access control
entry explicitly denies \fBREAD/LIST\fR, \fBREAD_ATTR\fR, and
\fBEXEC/SEARCH\fR to the \fBsys\fR group.
.sp
.nf
group:sys:----:deny:DirInherit
 (X)READ/LIST (-)WRITE/CREATE (-)MKDIR (-)SYNCHRONIZE (-)READ_ACL  (X)READ_ATTR  (-)READ_NAMED
 (-)DELETE    (-)DELETE_CHILD (-)CHOWN (X)EXEC/SEARCH (-)WRITE_ACL (-)WRITE_ATTR (-)WRITE_NAMED
.fi
.sp
.RE
.SH "See also"
.PP
mmeditacl Command
.PP
mmdelacl Command
.PP
mmputacl Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
 ACLk ~I4@$          AAA                   ./usr/share/man/man1/mmlsattr.1          $          $          .TH mmlsattr 11/01/04
mmlsattr Command
.SH "Name"
.PP
\fBmmlsattr\fR - Queries file attributes.
.SH "Synopsis"
.PP
\fBmmlsattr\fR \fIFileName\fR[ \fIFileName\fR...]
.SH "Description"
.PP
Use the \fBmmlsattr\fR command to display replication attributes of a
file.
.PP
\fBResults\fR
.PP
For the specified file, the \fBmmlsattr\fR command lists:
.RS +3
.HP 3
\(bu Current number of copies of data for a file and the maximum value
.HP 3
\(bu Number of copies of the metadata for a file and the maximum value
.HP 3
\(bu Whether the Direct I/O caching policy is in effect for a file
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIFileName\fR
\fR
.RE
.RS +9
The name of the file to be queried. You must enter at least one
file name. Wildcard characters are supported for file names, for
example, \fBproject*.sched\fR.
.RE
.SH "Options"
.PP
NONE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have read access to run the \fBmmlsattr\fR command.
.PP
You may issue the \fBmmlsattr\fR command only from a node in the
GPFS cluster where the file system is mounted.
.SH "Examples"
.RS +3
.HP 3
1. To list the attributes of a file named \fBproject4.sched\fR,
enter: 
.sp
.nf
mmlsattr /fs0/project4.sched
.fi
.sp
The system displays information similar to:
.sp
.nf
  replication factors
metadata(max) data(max) file    [flags]
------------- --------- ---------------
      1 (  2)   1 (  2)  /fs0/project4.sched
.fi
.sp
.HP 3
2. To show the attributes for all files in the root directory of file system
\fBfs0\fR, enter:
.sp
.nf
mmlsattr /fs0/*
.fi
.sp
The system displays information similar to:
.sp
.nf
  replication factors
metadata(max) data(max) file    [flags]
------------- --------- ---------------
      1 (  1)   1 (  1) /fs0/project4.sched
      1 (  1)   1 (  1) /fs0/project4.hist
      1 (  1)   1 (  1)  /fs0/project5.plan
.fi
.sp
.RE
.SH "See also"
.PP
mmchattr Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
mgek I4@$          AAA                   ./usr/share/man/man1/mmlsquota.1                 $          $          .TH mmlsquota 11/01/04
mmlsquota Command
.SH "Name"
.PP
\fBmmlsquota\fR - Displays quota information for a user or
group.
.SH "Synopsis"
.PP
\fBmmlsquota\fR [\fB-u\fR \fIUser\fR | \fB-g\fR
\fIGroup\fR] [\fB-v\fR | \fB-q\fR]
[\fB-e\fR]
.PP
Or,
.PP
\fBmmlsquota\fR \fB-d\fR {\fB-u\fR | \fB-g\fR }
.SH "Description"
.PP
For the specified \fIUser\fR or \fIGroup\fR, the
\fBmmlsquota\fR command displays information about quota limits and current
usage on each file system in the cluster. This information is displayed
only if quota limits have been established and the user has consumed some amount of storage.
If you want quota information for a \fIUser\fR or \fIGroup\fR that has no file system storage 
allocated at the present time, you must specify the \fB-v\fR option.
.PP
If neither \fB-g\fR nor \fB-u\fR is specified, the default is to
display only user quotas for the user who issues the command.
.PP
For each file system in the cluster, the \fBmmlsquota\fR command
displays:
.RS +3
.HP 3
1. Block limits:
.RS +3
.HP 3
\(bu quota type (USR or GROUP)
.HP 3
\(bu current usage in KB
.HP 3
\(bu soft limit in KB
.HP 3
\(bu hard limit in KB
.HP 3
\(bu space in doubt
.HP 3
\(bu grace period
.RE
.HP 3
2. File limits:
.RS +3
.HP 3
\(bu current number of files
.HP 3
\(bu soft limit
.HP 3
\(bu hard limit
.HP 3
\(bu files in doubt
.HP 3
\(bu grace period
.RE
.RE
.PP
Because the sum of the \fIin doubt\fR value and the current usage may
not exceed the hard limit, the actual block space and number of files
available to the user or the group may be constrained by the in doubt
value. If the in doubt value approaches a significant percentage of the
quota, run the \fBmmcheckquota\fR command to account
for the lost space and files.
.PP
GPFS quota management takes replication into account when reporting on and
determining if quota limits have been exceeded for both block and file
usage. In a file system that has either type of replication set to a
value of two, the values reported on by both the \fBmmlsquota\fR command
and the \fBmmrepquota\fR command are double the value
reported by the \fBls\fR command.
.PP
When issuing the \fBmmlsquota\fR command on a mounted file system,
negative indoubt values may be reported if the quota server processes a
combination of up-to-date and back-level information. This is a
transient situation and may be ignored.
.PP
When a quota management enabled file system is SANergy exported, the block
usage accounting of a file accessed through SANergy includes the blocks
actually used by the file and the extra blocks temporarily allocated (hyper
allocation) by SANergy. Hyper allocation is a SANergy performance
feature and can be tuned using SANergy configuration tools. For more
information, see \fITivoli SANergy: Administrator's Guide\fR at
publib.boulder.ibm.com/tividd/td/SANergy2.2.4.html.
.SH "Parameters"
.PP
NONE
.SH "Options"
.PP
.RS +3
\fB-d
\fR
.RE
.RS +9
Display the default quota limits for either user or group quotas.
.RE
.PP
.RS +3
\fB-e
\fR
.RE
.RS +9
Specifies that \fBmmlsquota\fR is to collect updated quota usage data
from all nodes before displaying results. If this option is not
specified, there is the potential to display negative usage values as the
quota server may process a combination of up-to-date and back-level
information.
.RE
.PP
.RS +3
\fB-g \fIGroup\fR
\fR
.RE
.RS +9
Display quota information for the user group or group id specified in the
\fIGroup\fR parameter.
.RE
.PP
.RS +3
\fB-q
\fR
.RE
.RS +9
Prints a terse message containing information only about file systems with
usage over quota.
.RE
.PP
.RS +3
\fB-u \fIUser\fR
\fR
.RE
.RS +9
Display quota information for the user name or user id specified in the
\fIUser\fR parameter.
.RE
.PP
.RS +3
\fB-v
\fR
.RE
.RS +9
Display quota information on file systems where the \fIUser\fR or
\fIGroup\fR limit has been set, but the storage has not been
allocated.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
If you are a root user, you may view quota information for all users and
groups.
.PP
If you are a non-root user, you may view only your own quota information
and quota information for any groups to which you belong.
.PP
You must be a root user to use the \fB-d\fR option.
.PP
GPFS must be running on the node from which the \fBmmlsquota\fR command
is issued.
.SH "Examples"
.PP
Userid \fBpaul\fR enters:
.sp
.nf
mmlsquota
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
                        Block Limits          |     File Limits
Filesystem type KB quota limit in_doubt grace |files quota limit in_doubt grace
fsn       USR 728 100096 200192  4880    none |   35   30   50       10   6days\ 
.fi
.sp
.PP
This output shows the quotas for user \fBpaul\fR in file system
\fBfsn\fR set to a soft limit of 100096 KB, and a hard limit of
200192 KB. 728 KB is currently allocated to him. 4880 KB is also
in doubt, meaning that the quota system has not yet been updated as to whether
this space has been used by the nodes, or whether it is still
available. No grace period appears because the user has not exceeded
his quota. If the user had exceeded the soft limit, the grace period
would be set and the user would have that amount of time to bring his usage
below the quota values. If he failed to do so, he would not be
allocated any more space.
.PP
The soft limit for files (inodes) is set at 30 and the hard limit is
50. 35 files are currently allocated to this user, and the quota system
does not yet know whether the 10 in doubt have been used or are still
available. A grace period of six days appears because the user has
exceeded his quota. The user would have this amount of time to bring
his usage below the quota values. If he fails to do so, he is not
allocated any more space.
.SH "See also"
.PP
mmcheckquota Command
.PP
mmdefedquota Command
.PP
mmdefquotaoff Command
.PP
mmdefquotaon Command
.PP
mmedquota Command
.PP
mmrepquota Command
.PP
mmquotaon Command
.PP
mmquotaoff Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
\fR ck I4@$        	  AAA             	      ./usr/share/man/man1/mmlssnapshot.1              $          $          .TH mmlssnapshot 11/01/04
mmlssnapshot Command
.SH "Name"
.PP
\fBmmlssnapshot\fR - Displays GPFS snapshot information for the
specified file system.
.SH "Synopsis"
.PP
\fBmmlssnapshot\fR \fIDevice\fR [\fB-d\fR]
[\fB-Q\fR]
.SH "Description"
.PP
Use the \fBmmlssnapshot\fR command to display GPFS snapshot information
for the specified file system. You may optionally display the amount of
storage used by the snapshot and if quotas were set for automatic activation
upon mounting of the file system at the time the snapshot was taken.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system for which snapshot information is to be
displayed. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.SH "Options"
.PP
.RS +3
\fB-d
\fR
.RE
.RS +9
Display the amount of storage used by the snapshot.
.RE
.PP
.RS +3
\fB-Q
\fR
.RE
.RS +9
Display whether quotas were set to be automatically activated upon
mounting of the file system at the time the snapshot was taken.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmlssnapshot\fR
command.
.PP
You may issue the \fBmmlssnapshot\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To display the snapshot information for the file system \fBfs1\fR
additionally requesting storage information, enter:
.sp
.nf
mmlssnapshot fs1 -d
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
Snapshots in file system fs1: [data and metadata in KB]
Directory  SnapId    Status     Created               Data  Metadata
snap1       1        Valid   Fri Oct 17 10:56:22 2003   0       512
.fi
.sp
.SH "See also"
.PP
mmcrsnapshot Command
.PP
mmdelsnapshot Command
.PP
mmrestorefs Command
.PP
mmsnapdir Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
k  I4@$          AAA                   ./usr/share/man/man1/mmputacl.1          $          $          .TH mmputacl 11/01/04
mmputacl Command
.SH "Name"
.PP
\fBmmputacl\fR - Sets the GPFS access control list for the
specified file or directory.
.SH "Synopsis"
.PP
\fBmmputacl\fR [\fB-d\fR] [\fB-i\fR
\fIInFilename\fR] \fIFilename\fR
.SH "Description"
.PP
Use the \fBmmputacl\fR command to set the ACL of a file or
directory.
.PP
If the \fB-i\fR option is not used, the command expects the input to be
supplied through standard input, and waits for your response to the
prompt.
.PP
For information about NFS V4 ACLs, see \fIManaging GPFS access control lists and NFS export\fR
in \fIGPFS: Administration and Programming Reference\fR.
.PP
Any output from the \fBmmgetacl\fR command can be used as input
to \fBmmputacl\fR. The command is extended to support NFS V4
ACLs. In the case of NFS V4 ACLs, there is no concept of a default
ACL. Instead, there is a single ACL and the individual access control
entries can be flagged as being inherited (either by files, directories, both,
or neither). Consequently, specifying the \fB-d\fR flag for an NFS
V4 ACL is an error. By its nature, storing an NFS V4 ACL implies
changing the inheritable entries (the GPFS default ACL) as well.
.PP
.br
.PP
\fBTable 20. The mmputacl command for POSIX and NFS V4 ACLs\fR
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l l.
Command~POSIX ACL~NFS V4 ACL
\fBmmputacl\fR~Access ACL (Error if default~Stores the ACL (implies
~ACL is NFS V4 [1])~default as well)
\fBmmputacl -d\fR~Default ACL (Error if access~Error: NFS V4 ACL (has
~ACL is NFS V4 [1])~no default ACL)
.TE
.sp
.fi
[1] The default and access ACLs are not permitted to be mixed types
because NFS V4 ACLs include inherited entries, which are the
equivalent of a default ACL. An \fBmmdelacl\fR of the NFS V4 ACL is
required before an ACL is converted back to POSIX.
.RE
.PP
Depending on the file system's \fB-k\fR setting \fB(posix\fR,
\fBnfs4\fR, or \fBall\fR), \fBmmputacl\fR may be restricted.
The \fBmmputacl\fR command is not allowed to store an NFS V4 ACL if \fB-k
posix\fR is in effect. The \fBmmputacl\fR command is not allowed
to store a POSIX ACL if \fB-k nfs4\fR is in effect. For more
information, see the description of the \fB-k\fR flag for the \fBmmchfs\fR, \fBmmcrfs\fR, and
\fBmmlsfs\fR commands.
.PP
Note that the test to see if the given ACL is acceptable based on the file
system's \fB-k\fR setting cannot be done until after the ACL is
provided. For example, if \fBmmputacl file1\fR is issued (no
\fB-i\fR flag specified) the user then has to input the ACL before the
command can verify that it is an appropriate ACL given the file system
settings. Likewise, the command \fBmmputacl -d dir1\fR (again the
ACL was not given with the \fB-i\fR flag ) requires that the ACL be entered
before file system ACL settings can be tested. In this situation, the
\fB-i\fR flag may be preferable to manually entering a long ACL, only to
find out it is not allowed by the file system.
.SH "Parameters"
.PP
.RS +3
\fB\fIFilename\fR
\fR
.RE
.RS +9
The path name of the file or directory for which the ACL is to be
set. If the \fB-d\fR option is specified, \fIFilename\fR must
be the name of a directory.
.RE
.SH "Options"
.PP
.RS +3
\fB-d
\fR
.RE
.RS +9
Specifies that the default ACL of a directory is to be set. This
flag cannot be used on an NFS V4 ACL.
.RE
.PP
.RS +3
\fB-i \fIInFilename\fR
\fR
.RE
.RS +9
The path name of a source file from which the ACL is to be read.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You may issue the \fBmmputacl\fR command only from a node in the
GPFS cluster where the file system is mounted.
.PP
You must be the file or directory owner, the root user, or someone with
control permission in the ACL, to run the \fBmmputacl\fR command.
.SH "Examples"
.PP
To use the entries in a file named \fBstandard.acl\fR to set the
ACL for a file named \fBproject2.history\fR, enter:
.sp
.nf
mmputacl -i standard.acl project2.history
.fi
.sp
.PP
where \fBstandard.acl\fR contains:
.sp
.nf
user::rwxc
group::rwx-
other::--x-
mask::rw-c
user:alpha:rwxc
group:audit:rwx-
group:system:-w--
.fi
.sp
.PP
To confirm the change, enter:
.sp
.nf
mmgetacl project.history
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
#owner:paul
#group:design
user::rwxc
group::rwx-
other::--x-
mask::rw-c
user:alpha:rwxc
group:audit:rwx-
group:system:-w--
.fi
.sp
.SH "See also"
.PP
mmeditacl Command
.PP
mmdelacl Command
.PP
mmgetacl Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
e to thkS I4@$          wAwAwA                   ./usr/share/man/man3/gpfsAccessRange_t.3                 $          $          .TH gpfsAccessRange_t 10/22/04
gpfsAccessRange_t Structure
.SH "Name"
.PP
\fBgpfsAccessRange_t\fR - Declares an access range within a file
for an application.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct
{
  int          structLen;
  int          structType;
  offset_t     start;
  offset_t     length;
  int          isWrite;
  char         padding[4];
} gpfsAccessRange_t;\ 
.fi
.sp
.SH "Description"
.PP
The \fBgpfsAccessRange_t\fR structure declares an access range within a
file for an application.
.PP
The application accesses file offsets within the given range, and does not
access offsets outside the range. Violating this hint may produce worse
performance than if no hint was specified.
.PP
This hint is useful in situations where a file is partitioned coarsely
among several nodes. If the ranges do not overlap, each node can
specify which range of the file it accesses. This provides a
performance improvement in some cases, such as for sequential writing within a
range.
.PP
Subsequent \fBGPFS_ACCESS_RANGE\fR hints replace a hint passed
earlier.
.SH "Members"
.PP
.RS +3
\fBstructLen
\fR
.RE
.RS +9
Length of the \fBgpfsAccessRange_t\fR structure.
.RE
.PP
.RS +3
\fBstructType
\fR
.RE
.RS +9
The hint identifier \fBGPFS_ACCESS_RANGE\fR.
.RE
.PP
.RS +3
\fBstart
\fR
.RE
.RS +9
The start of the access range offset, in bytes, from beginning of
file.
.RE
.PP
.RS +3
\fBlength
\fR
.RE
.RS +9
Length of the access range.
.PP
\fB0\fR indicates to end of file.
.RE
.PP
.RS +3
\fBisWrite
\fR
.RE
.RS +9
\fB0\fR indicates \fBread\fR access. 
.PP
\fB1\fR indicates \fBwrite\fR access.
.RE
.PP
.RS +3
\fBpadding[4]
\fR
.RE
.RS +9
Provided to make the length of the \fBgpfsAccessRange_t\fR structure a
multiple of 8 bytes in length. There is no need to initialize this
field.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
NFk I4@$          wAwAwA                   ./usr/share/man/man3/gpfsCancelHints_t.3                 $          $          .TH gpfsCancelHints_t 10/22/04
gpfsCancelHints_t Structure
.SH "Name"
.PP
\fBgpfsCancelHints_t\fR - Indicates to remove any hints against
the open file handle.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct
{
  int structLen;
  int structType;
} gpfsCancelHints_t;
.fi
.sp
.SH "Description"
.PP
The \fBgpfsCancelHints_t\fR structure indicates to remove any hints
against the open file handle.
.PP
GPFS removes any hints that may have been issued against this open file
handle: 
.RS +3
.HP 3
\(bu The hint status of the file is restored to what it would have been
immediately after being opened, but does not affect the contents of the GPFS
file cache. Cancelling an earlier hint that resulted in data being
removed from the GPFS file cache does not bring that data back into the
cache; data reenters the cache only upon access by the application or by
user-driven or automatic prefetching.
.HP 3
\(bu Only the \fBGPFS_MULTIPLE_ACCESS_RANGE\fR hint has a state that might
be removed by the \fBGPFS_CANCEL_HINTS\fR directive.
.RE
.RS +3
\fBNote:\fR
.RE
.RS +9
This directive cancels only the effect of other hints, not other
directives.
.RE
.SH "Members"
.PP
.RS +3
\fBstructLen
\fR
.RE
.RS +9
Length of the \fBgpfsCancelHints_t\fR structure.
.RE
.PP
.RS +3
\fBstructType
\fR
.RE
.RS +9
The directive identifier \fBGPFS_CANCEL_HINTS\fR.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
ck꾿  J4@$          wAwAwA                   ./usr/share/man/man3/gpfsClearFileCache_t.3              $          $          .TH gpfsClearFileCache_t 10/22/04
gpfsClearFileCache_t Structure
.SH "Name"
.PP
\fBgpfsClearFileCache_t\fR - Indicates file access in the near
future is not expected.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct
{
  int    structLen;
  int    structType;
} gpfsClearFileCache_t;
.fi
.sp
.SH "Description"
.PP
The \fBgpfsClearFileCache_t\fR structure indicates file access in the
near future is not expected.
.PP
The application does not expect to make any further accesses to the file in
the near future, so GPFS removes any data or metadata pertaining to the file
from its cache.
.PP
Multiple node applications that have finished one phase of their
computation may wish to use this hint before the file is accessed in a
conflicting mode from another node in a later phase. The potential
performance benefit is that GPFS can avoid later synchronous cache consistency
operations.
.SH "Members"
.PP
.RS +3
\fBstructLen
\fR
.RE
.RS +9
Length of the \fBgpfsClearFileCache_t\fR structure.
.RE
.PP
.RS +3
\fBstructType
\fR
.RE
.RS +9
The hint identifier \fBGPFS_CLEAR_FILE_CACHE\fR.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
nkꨢ J4@$          wAwAwA                   ./usr/share/man/man3/gpfsDataShipMap_t.3 .3              $          $          .TH gpfsDataShipMap_t 10/22/04
gpfsDataShipMap_t Structure
.SH "Name"
.PP
\fBgpfsDataShipMap_t\fR - Indicates which agent nodes are to be
used for data shipping.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
#define GPFS_MAX_DS_AGENT_NODES 2048
typedef struct
{
  int  structLen;
  int  structType;
  int  partitionSize;
  int  agentCount;\ 
  int  agentNodeNumber[GPFS_MAX_DS_AGENT_NODES]\ 
 } gpfsDataShipMap_t;
.fi
.sp
.SH "Description"
.PP
The \fBgpfsDataShipMap_t\fR structure indicates which agent nodes are to
be used for data shipping.
.PP
GPFS recognizes which agent nodes to use for data shipping:
.RS +3
.HP 3
\(bu This directive can only appear in a \fBgpfs_fcntl()\fR subroutine that also gives the \fBGPFS_DATA_SHIP_START\fR directive.
.HP 3
\(bu If any of the participating threads include an explicit agent mapping with
this directive, all threads must provide the same agent mapping, or else GPFS
returns \fBEINVAL\fR in \fBerrno\fR. If this directive is not
used, the agents are exactly the nodes on which the \fBGPFS_DATA_SHIP_START\fR directive was given.
The order of these nodes in the mapping is random. Once the order is
set, when all instances have issued the \fBGPFS_DATA_SHIP_START\fR
directive, the partitioning of the blocks is round robin among the agent
nodes.
.HP 3
\(bu All of the nodes named in the data shipping mapping must also be data
shipping clients that have issued the \fBGPFS_DATA_SHIP_START\fR directive. The reason
for this is that GPFS, like most file systems, does not guarantee that data is
written through to disk immediately after a \fBwrite\fR call from an
application, or even after a \fBclose\fR returns. Thus, cached data
can be lost if a node crashes. Data loss can only occur, however, if
the node that crashes is the node that wrote the data.
.sp
With data shipping, this property is no longer true. Any node crash
in the collective of nodes can cause loss of data. An application
running with a file in data shipping mode writes data by shipping it to the
GPFS cache on an agent node. That agent node may later crash before
writing the data to disk. The originating node may not receive, pay
attention to, or realize the severity of an error message. Presumably,
a distributed application would notice a crash of one of the nodes on which it
was running and would take corrective action, such as rolling back to an
earlier stable checkpoint or deleting a corrupt output file. By
requiring that all agent nodes also have at least one data shipping client,
GPFS makes it such that at least one of the nodes of a distributed application
will crash if there is the potential for data loss because of an agent node
crash. If any of the data shipping client nodes suffers a node or GPFS
crash, the file will be taken out of data shipping mode.
.RE
.PP
The value for \fBpartitionSize\fR must be a multiple of the number of
bytes in a single file system block.
.SH "Members"
.PP
.RS +3
\fBstructLen
\fR
.RE
.RS +9
Length of the \fBgpfsDataShipMap_t\fR structure.
.RE
.PP
.RS +3
\fBstructType
\fR
.RE
.RS +9
The directive identifier \fBGPFS_DATA_SHIP_MAP\fR.
.RE
.PP
.RS +3
\fBpartitionSize
\fR
.RE
.RS +9
The number of contiguous bytes per server. 
.PP
This value must be a multiple of the number of bytes in a single file
system block.
.RE
.PP
.RS +3
\fBagentCount
\fR
.RE
.RS +9
The number of entries in the \fBagentNodeNumber\fR array.
.RE
.PP
.RS +3
\fBagentNodeNumber array
\fR
.RE
.RS +9
The data ship agent node numbers assigned by GPFS and displayed with the
\fBmmlscluster\fR command.
.RE
.SH "Error status"
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
Not all participating threads have provided the same agent mapping.
.RE
.PP
.RS +3
\fBENOMEM
\fR
.RE
.RS +9
The available data space in memory is not large enough to allocate the
data structures necessary to run in data shipping mode.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
An attempt to \fBopen\fR a file in data shipping mode that is already
open in \fBwrite\fR mode by some thread that did not issue the \fBGPFS_DATA_SHIP_START\fR directive.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
A node in the data shipping collective has gone down.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
k U J4@$          AAA                   ./usr/share/man/man3/gpfsDataShipStart_t.3               $          $          .TH gpfsDataShipStart_t 10/22/04
gpfsDataShipStart_t Structure
.SH "Name"
.PP
\fBgpfsDataShipStart_t\fR - Initiates data shipping mode.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct
{
  int  structLen;
  int  structType;
  int  numInstances;
  int  reserved;\ 
} gpfsDataShipStart_t;
.fi
.sp
.SH "Description"
.PP
The \fBgpfsDataShipStart_t\fR structure initiates data shipping
mode.
.PP
Once all participating threads have issued this directive for a file, GPFS
enters a mode where it logically partitions the blocks of the file among a
group of agent nodes. The agents are those nodes on which one or more
threads have issued the \fBGPFS_DATA_SHIP_START\fR directive. Each
thread that has issued a \fBGPFS_DATA_SHIP_START\fR directive and the
associated agent nodes are referred to as the data shipping collective.
.PP
In data shipping mode:
.RS +3
.HP 3
\(bu All file accesses result in GPFS messages to the appropriate agents to
\fBread\fR or \fBwrite\fR the requested data.
.HP 3
\(bu The \fBGPFS_DATA_SHIP_START\fR directive is a blocking collective
operation. That is, every thread that intends to access the file
through data shipping must issue the \fBGPFS_DATA_SHIP_START\fR directive
with the same value of \fBnumInstances\fR. These threads all block
within their \fBgpfs_fcntl()\fR subroutines until all
\fBnumInstances\fR threads have issued the \fBGPFS_DATA_SHIP_START\fR
directive.
.HP 3
\(bu The number of threads that issue the \fBGPFS_DATA_SHIP_START\fR
directive does not have to be the same on all nodes. However, each
thread must use a different file handle. The default agent mapping can
be overridden using the \fBGPFS_DATA_SHIP_MAP\fR
directive.
.HP 3
\(bu Applications that perform a fine-grained write, sharing across several
nodes, should benefit most from data shipping. The reason for this is
that the granularity of GPFS cache consistency is an entire file block, which
rarely matches the record size of applications. Without using data
shipping, when several nodes simultaneously write into the same block of a
file, even non-overlapping parts of the block, GPFS serially grants, and then
releases, permission to write into the block to each node in turn. Each
permission change requires dirty cached data on the relinquishing node to be
flushed to disk, yielding poor performance. Data shipping avoids this
overhead by moving data to the node that already has permission to write into
the block rather than migrating access permission to the node trying to write
the data. 
.sp
However, since most data accesses are remote in data shipping mode, clients
do not benefit from caching as much in data shipping mode as they would if
data shipping mode were not in effect. The cost to send a message to
another instance of GPFS to fetch or write data is much higher than the cost
of accessing that data through the local GPFS buffer cache. Thus,
whether or not a particular application benefits from data shipping is highly
dependent on its access pattern and its degree of block sharing.
.HP 3
\(bu Another case where data shipping can help performance is when multiple
nodes must all append data to the current end of the file. If all of
the participating threads open their instances with the \fBO_APPEND\fR flag
before initiating data shipping, one of the participating nodes is chosen as
the agent to which all appends are shipped. The aggregate performance
of all the appending nodes is limited to the throughput of a single node in
this case, but should still exceed what the performance would have been for
appending small records without using data shipping.
.RE
.PP
Data shipping mode imposes several restrictions on file usage: 
.RS +3
.HP 3
\(bu Because an application level \fBread\fR or \fBwrite\fR may be split
across several agents, POSIX \fBread\fR and \fBwrite\fR file atomicity
is not enforced while in data shipping mode.
.HP 3
\(bu A file in data shipping mode cannot be written through any file handle
that was not associated with the data shipping collective through a
\fBGPFS_DATA_SHIP_START\fR directive.
.HP 3
\(bu Calls that are not allowed on a file that has data shipping enabled:
.RS +3
.HP 3
\(bu \fBchmod\fR
.HP 3
\(bu \fBfchmod\fR
.HP 3
\(bu \fBchown\fR
.HP 3
\(bu \fBfchown\fR
.HP 3
\(bu \fBlink\fR
.RE
.RE
.PP
The \fBGPFS_DATA_SHIP_START\fR directive exits cleanly only when
cancelled by a \fBGPFS_DATA_SHIP_STOP\fR
directive. If all threads issue a \fBclose\fR for the file, it is
taken out of data shipping mode but errors are also returned.
.SH "Members"
.PP
.RS +3
\fBstructLen
\fR
.RE
.RS +9
Length of the \fBgpfsDataShipStart_t\fR structure.
.RE
.PP
.RS +3
\fBstructType
\fR
.RE
.RS +9
The directive identifier \fBGPFS_DATA_SHIP_START\fR
.RE
.PP
.RS +3
\fBnumInstances
\fR
.RE
.RS +9
The number of open file instances, on all nodes, collaborating to operate
on the file.
.RE
.PP
.RS +3
\fBreserved
\fR
.RE
.RS +9
This field is currently not used. 
.PP
For compatibility with future versions of GPFS, set this field to
zero.
.RE
.SH "Recovery"
.PP
Since \fBGPFS_DATA_SHIP_START\fR directives block their invoking threads
until all participants respond accordingly, there needs to be a way to recover
if the application program uses the wrong value for \fBnumInstances\fR or
one of the participating nodes crashes before issuing its
\fBGPFS_DATA_SHIP_START\fR directive. While a \fBgpfs_fcntl()\fR subroutine is blocked waiting for other
threads, the subroutine can be interrupted by any signal. If a signal
is delivered to any of the waiting subroutines, all waiting subroutine on
every node are interrupted and return \fBEINTR\fR. GPFS does not
establish data shipping if such a signal occurs.
.PP
It is the responsibility of the application to mask off any signals that
might normally occur while waiting for another node in the data shipping
collective. Several libraries use \fBSIGALRM\fR; the thread
that makes the \fBgpfs_fcntl()\fR invocation should use
\fBsigthreadmask\fR to mask off delivery of this signal while inside the
subroutine.
.SH "Error status"
.PP
.RS +3
\fBEINTR
\fR
.RE
.RS +9
A signal was delivered to a blocked \fBgpfs_fcntl()\fR subroutine. All waiting
subroutines, on every node, are interrupted.
.RE
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
The file mode has been changed since the file was opened to include or
exclude \fBO_APPEND\fR. 
.PP
The value of \fBnumInstances\fR is inconsistent with the value issued by
other threads intending to access the file.
.PP
An attempt has been made to issue a \fBGPFS_DATA_SHIP_START\fR directive
on a file that is already in use in data shipping mode by other
clients.
.RE
.PP
.RS +3
\fBENOMEM
\fR
.RE
.RS +9
The available data space in memory is not large enough to allocate the
data structures necessary to establish and/or run in data shipping
mode.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
An attempt has been made to open a file in data shipping mode that is
already open in \fBwrite\fR mode by some thread that did not issue the
\fBGPFS_DATA_SHIP_START\fR directive. GPFS does not initiate data
shipping.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
A node in the data shipping collective has gone down.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
filk J4@$        	  wAwAwA             	      ./usr/share/man/man3/gpfsDataShipStop_t.3                $          $          .TH gpfsDataShipStop_t 10/22/04
gpfsDataShipStop_t Structure
.SH "Name"
.PP
\fBgpfsDataShipStop_t\fR - Takes a file out of data shipping
mode.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct
{
  int  structLen;
  int  structType;
} gpfsDataShipStop_t;
.fi
.sp
.SH "Description"
.PP
The \fBgpfsDataShipStop_t\fR structure takes a file out of data shipping
mode.
.PP
GPFS takes the file out of data shipping mode: 
.RS +3
.HP 3
\(bu GPFS waits for all threads that issued the \fBGPFS_DATA_SHIP_START\fR directive to issue this
directive, then flushes all dirty file data to disk.
.HP 3
\(bu While a \fBgpfs_fcntl()\fR invocation is blocked
waiting for other threads, the subroutine can be interrupted by any
signal. If a signal is delivered to any of the waiting invocations, all
waiting subroutines on every node are interrupted and return
\fBEINTR\fR. GPFS does not cancel data shipping mode if such a
signal occurs. It is the responsibility of the application to mask off
any signals that might normally occur while waiting for another node in the
data shipping collective. Several libraries use \fBSIGALRM\fR;
the thread that issues the \fBgpfs_fcntl()\fR should
use \fBsigthreadmask\fR to mask off delivery of this signal while inside
the subroutine.
.RE
.SH "Members"
.PP
.RS +3
\fBstructLen
\fR
.RE
.RS +9
Length of the \fBgpfsDataShipStop_t\fR structure.
.RE
.PP
.RS +3
\fBstructType
\fR
.RE
.RS +9
The directive identifier \fBGPFS_DATA_SHIP_STOP\fR.
.RE
.SH "Error status"
.PP
.RS +3
\fBEIO
\fR
.RE
.RS +9
An error occurred while flushing dirty data.
.RE
.PP
.RS +3
\fBEINTR
\fR
.RE
.RS +9
A signal was delivered to a blocked \fBgpfs_fcntl()\fR subroutine. All waiting
subroutines, on every node, are interrupted.
.RE
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
An attempt has been made to issue the \fBGPFS_DATA_SHIP_STOP\fR
directive from a node or thread that is not part of this data shipping
collective.
.PP
An attempt has been made to issue the \fBGPFS_DATA_SHIP_STOP\fR
directive on a file that is not in data shipping mode.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
A node in the data shipping collective has gone down.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
 valk( 
J4@$          wAwAwA                   ./usr/share/man/man3/gpfsFcntlHeader_t.3                 $          $          .TH gpfsFcntlHeader_t 10/22/04
gpfsFcntlHeader_t Structure
.SH "Name"
.PP
\fBgpfsFcntlHeader_t\fR - Contains declaration information for the
\fBgpfs_fcntl()\fR subroutine.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct
{
  int     totalLength;
  int     fcntlVersion;
  int     errorOffset;
  int     fcntlReserved;
} gpfsFcntlHeader_t;
.fi
.sp
.SH "Description"
.PP
The \fBgpfsFcntlHeader_t\fR structure contains size, version, and error
information for the \fBgpfs_fcntl()\fR
subroutine.
.SH "Members"
.PP
.RS +3
\fBtotalLength
\fR
.RE
.RS +9
This field must be set to the total length, in bytes, of the data
structure being passed in this subroutine. This includes the length of
the header and all hints and directives that follow the header. 
.PP
The total size of the data structure \fIcannot\fR exceed the value of
\fBGPFS_MAX_FCNTL_LENGTH\fR, as defined in the header file
\fBgpfs_fcntl.h\fR. The current value of
\fBGPFS_MAX_FCNTL_LENGTH\fR is 64K bytes.
.RE
.PP
.RS +3
\fBfcntlVersion
\fR
.RE
.RS +9
This field must be set to the current version number of the \fBgpfs_fcntl()\fR subroutine, as defined by
\fBGPFS_FCNTL_CURRENT_VERSION\fR in the header file
\fBgpfs_fcntl.h\fR. The current version number is one.
.RE
.PP
.RS +3
\fBerrorOffset
\fR
.RE
.RS +9
If an error occurs processing a hint or directive, GPFS sets this field to
the offset within the parameter area where the error was detected. 
.PP
For example, 
.RS +3
.HP 3
1. An incorrect version number in the header, would cause
\fBerrorOffset\fR to be set to zero.
.HP 3
2. An error in the first hint following the header would set
\fBerrorOffset\fR to \fBsizeof(header)\fR.
.RE
.PP
If no errors are found, GPFS does not alter this field.
.RE
.PP
.RS +3
\fBfcntlReserved
\fR
.RE
.RS +9
This field is currently unused.
.PP
For compatibility with future versions of GPFS, set this field to
zero.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
kZ J4@$          wAwAwA                   ./usr/share/man/man3/gpfsFreeRange_t.3 3         $          $          .TH gpfsFreeRange_t 10/22/04
gpfsFreeRange_t Structure
.SH "Name"
.PP
\fBgpfsFreeRange_t\fR - Undeclares an access range within a file
for an application.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct
{
  int          structLen;
  int          structType;
  offset_t     start;
  offset_t     length;
} gpfsFreeRange_t;
.fi
.sp
.SH "Description"
.PP
The \fBgpfsFreeRange_t\fR structure undeclares an access range within a
file for an application.
.PP
The application no longer accesses file offsets within the given
range. GPFS flushes the data at the file offsets and removes it from
the cache.
.PP
Multiple node applications that have finished one phase of their
computation may wish to use this hint before the file is accessed in a
conflicting mode from another node in a later phase. The potential
performance benefit is that GPFS can avoid later synchronous cache consistency
operations.
.SH "Members"
.PP
.RS +3
\fBstructLen
\fR
.RE
.RS +9
Length of the \fBgpfsFreeRange_t\fR structure.
.RE
.PP
.RS +3
\fBstructType
\fR
.RE
.RS +9
The hint identifier \fBGPFS_FREE_RANGE\fR.
.RE
.PP
.RS +3
\fBstart
\fR
.RE
.RS +9
The start of the access range offset, in bytes, from beginning of
file.
.RE
.PP
.RS +3
\fBlength
\fR
.RE
.RS +9
Length of the access range.
.PP
Zero indicates to end of file.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
k4 J4@$          wAwAwA                   ./usr/share/man/man3/gpfsMultipleAccessRange_t.3                 $          $          .TH gpfsMultipleAccessRange_t 10/22/04
gpfsMultipleAccessRange_t Structure
.SH "Name"
.PP
\fBgpfsMultipleAccessRange_t\fR - Defines \fBprefetching\fR
and \fBwrite-behind\fR file access for an application.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct
{
  offset_t     blockNumber;  /* data block number to access */
  int          start;   /*start of range (from beginning of block)*/\ 
  int          length;       /* number of bytes in range */
  int          isWrite;      /* 0 - READ access 1 - WRITE access */
  char         padding[4];
} gpfsRangeArray_t;
\ 
typedef struct
{
  int                  structLen;
  int                  structType;
  int                  accRangeCnt;
  int                  relRangeCnt;
  gpfsRangeArray_t     accRangeArray[GPFS_MAX_RANGE_COUNT];
  gpfsRangeArray_t     relRangeArray[GPFS_MAX_RANGE_COUNT];
} gpfsMultipleAccessRange_t;
.fi
.sp
.SH "Description"
.PP
The \fBgpfsMultipleAccessRange_t\fR structure defines
\fBprefetching\fR and \fBwrite-behind\fR access where the application
will soon access the portions of the blocks specified in
\fBaccRangeArray\fR and has finished accessing the ranges listed in
\fBrelRangeArray\fR. The size of a block is returned in the
\fBst_blksize\fR field of the \fBstat\fR command, so the offset,
\fBOFF\fR, of a file is in the block, \fBOFF/st_blksize\fR.
.RS +3
.HP 3
\(bu Up to \fBGPFS_MAX_RANGE_COUNT\fR, as defined in the header file
\fBgpfs_fcntl.h\fR, blocks may be given in one multiple access range
hint. The current value of \fBGPFS_MAX_RANGE_COUNT\fR is
eight. Depending on the current load, GPFS may initiate prefetching of
some or all of the blocks.
.HP 3
\(bu Each range named in \fBaccRangeArray\fR that is accepted for
prefetching, should eventually be released with an identical entry in
\fBrelRangeArray\fR, or else GPFS will stop prefetching blocks for this
file.
.RS +3
\fBNote:\fR
.RE
.RS +9
Naming a subrange of a block in \fBrelRangeArray\fR that does not exactly
match a past entry in \fBaccRangeArray\fR has \fBno\fR effect, and does
not produce an error.
.RE
.HP 3
\(bu Applications that make random accesses or regular patterns not recognized
by GPFS may benefit from using this hint.
.sp
GPFS already recognizes sequential and strided file access patterns.
Applications that use such patterns should not need to use this hint, as GPFS
automatically recognizes the pattern and performs \fBprefetching\fR and
\fBwrite-behind\fR accordingly. In fact, using the multiple access
range hint in programs having a sequential or strided access pattern may
degrade performance due to the extra overhead to process the hints.
.RE
.PP
Notice that the units of prefetch and release are file blocks, not file
offsets. If the application intends to make several accesses to the
same block, it will generally get better performance by including the entire
range to be accessed in the \fBGPFS_MULTIPLE_ACCESS_RANGE\fR hint before
actually doing a \fBread\fR or \fBwrite\fR. A sample program
\fBgpfsperf\fR, which demonstrates the use of the
\fBGPFS_MULTIPLE_ACCESS_RANGE\fR hint, is included in the GPFS product and
installed in the \fB/usr/lpp/mmfs/samples/perf\fR directory.
.SH "Members"
.PP
.RS +3
\fBstructLen
\fR
.RE
.RS +9
Length of the \fBgpfsMultipleAccessRange_t\fR structure.
.RE
.PP
.RS +3
\fBstructType
\fR
.RE
.RS +9
The hint identifier \fBGPFS_MULTIPLE_ACCESS_RANGE\fR.
.RE
.PP
.RS +3
\fBaccRangeCnt
\fR
.RE
.RS +9
On input, the number of ranges in \fBaccRangeArray\fR.
.PP
On output, the number of processed ranges, the first \fIn\fR, of the
given ranges.
.RE
.PP
.RS +3
\fBrelRangeCnt
\fR
.RE
.RS +9
The number of ranges in \fBrelRangeArray\fR.
.RE
.PP
.RS +3
\fBaccRangeArray
\fR
.RE
.RS +9
The ranges of blocks that the application will soon access.
.RE
.PP
.RS +3
\fBrelRangeArray
\fR
.RE
.RS +9
The ranges of blocks that the application has finished accessing.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
 dak~ I4@$        %  0xA0xA0xA             %      ./usr/share/man/man3/gpfs_close_inodescan.3 _t.3         $          $          .TH gpfs_close_inodescan 10/22/04
gpfs_close_inodescan() Subroutine
.SH "Name"
.PP
\fBgpfs_close_inodescan()\fR - Closes an inode scan.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
void gpfs_close_inodescan(gpfs_iscan_t *iscan);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_close_inodescan()\fR subroutine closes the scan of the
inodes in a file system or snapshot that was opened with the
\fBgpfs_open_inodescan()\fR subroutine. The
\fBgpfs_close_inodescan()\fR subroutine frees all storage used for the
inode scan and invalidates the \fBiscan\fR handle.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBiscan
\fR
.RE
.RS +9
Pointer to the inode scan handle.
.RE
.SH "Exit status"
.PP
The \fBgpfs_close_inodescan()\fR subroutine returns void.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
None.
.SH "Examples"
.PP
For an example using \fBgpfs_close_inodescan()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
e \k I4@$        E	  0xA0xA0xA             E	      ./usr/share/man/man3/gpfs_cmp_fssnapid.3 .3 _t.3         $          $          .TH gpfs_cmp_fssnapid 10/22/04
gpfs_cmp_fssnapid() Subroutine
.SH "Name"
.PP
\fBgpfs_cmp_fssnapid()\fR - Compares two snapshot IDs for the same file
system to determine the order in which the two snapshots were taken.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_cmp_fssnapid (const gpfs_fssnap_id_t *fssnapId1,
                       const gpfs_fssnap_id_t *fssnapId2,
                       int *result);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_cmp_fssnapid()\fR subroutine compares two snapshot IDs for
the same file system to determine the order in which the two snapshots were
taken. The \fBresult\fR parameter is set as follows:
.RS +3
.HP 3
\(bu \fBresult\fR less than zero indicates that snapshot 1 was taken before
snapshot 2.
.HP 3
\(bu \fBresult\fR equal to zero indicates that snapshot 1 and 2 are the
same.
.HP 3
\(bu \fBresult\fR greater than zero indicates that snapshot 1 was taken
after snapshot 2.
.RE
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfssnapId1
\fR
.RE
.RS +9
File system snapshot ID of the first snapshot.
.RE
.PP
.RS +3
\fBfssnapId2
\fR
.RE
.RS +9
File system snapshot ID of the second snapshot.
.RE
.PP
.RS +3
\fBresult
\fR
.RE
.RS +9
Pointer to an integer indicating the outcome of the comparison.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_cmp_fssnapid()\fR subroutine is successful, it returns a
value of 0 and the \fBresult\fR parameter is set as described above.
.PP
If the \fBgpfs_cmp_fssnapid()\fR subroutine is unsuccessful, it returns
a value of -1 and the global error variable \fBerrno\fR is set to indicate
the nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEDOM
\fR
.RE
.RS +9
The two snapshots cannot be compared because they were taken from two
different file systems.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_cmp_fssnapid()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_FSSNAPHANDLE
\fR
.RE
.RS +9
The file system snapshot handle is not valid.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
ernkZp I4@$          1xA1xA1xA                   ./usr/share/man/man3/gpfs_direntx_t.3 .3         $          $          .TH gpfs_direntx_t 10/22/04
gpfs_direntx_t Structure
.SH "Name"
.PP
\fBgpfs_direntx_t\fR - Contains attributes of a GPFS directory
entry.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct gpfs_direntx
{
  int            d_version;  /*this struct's version*/
  unsigned short d_reclen;  /*actual size of this struct including
                          null-terminated variable-length d_name*/
  unsigned short d_type;      /*types are defined below*/
  gpfs_ino_t     d_ino;       /*file inode number*/
  gpfs_gen_t     d_gen;       /*generation number for the inode*/
  char      d_name[256];  /*null-terminated variable-length name*/
} gpfs_direntx_t;
/* File types for d_type field in gpfs_direntx_t */
#define GPFS_DE_OTHER    0
#define GPFS_DE_DIR      4
#define GPFS_DE_REG      8
#define GPFS_DE_LNK     10
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_direntx_t\fR structure contains the attributes of a GPFS
directory entry.
.SH "Members"
.PP
.RS +3
\fBd_version
\fR
.RE
.RS +9
The version number of this structure.
.RE
.PP
.RS +3
\fBd_reclen
\fR
.RE
.RS +9
The actual size of this structure including the null-terminated
variable-length \fBd_name\fR field. 
.PP
To allow some degree of forward compatibility, careful callers should use
the \fBd_reclen\fR field for the size of the structure rather than the
\fBsizeof()\fR function.
.RE
.PP
.RS +3
\fBd_type
\fR
.RE
.RS +9
The type of directory.
.RE
.PP
.RS +3
\fBd_ino
\fR
.RE
.RS +9
The directory inode number.
.RE
.PP
.RS +3
\fBd_gen
\fR
.RE
.RS +9
The directory generation number.
.RE
.PP
.RS +3
\fBd_name
\fR
.RE
.RS +9
Null-terminated variable-length name of the directory.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_direntx_t\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
+3kA I4@$          AAA                   ./usr/share/man/man3/gpfs_fcntl.3 t.3 .3         $          $          .TH gpfs_fcntl 10/22/04
gpfs_fcntl() Subroutine
.SH "Name"
.PP
\fBgpfs_fcntl()\fR - Passes hints and directives to GPFS on behalf
of an open file.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_fcntl(int fileDesc, void* fcntlArgP)\ 
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_fcntl()\fR subroutine passes hint and directives to GPFS on
behalf of an open file, while obtaining the necessary size, version, and error
information from the \fBgpfsFcntlHeader_t\fR
structure.
.RS +3
.HP 3
\(bu hints:
.RS +3
.HP 3
1. \fBgpfsAccessRange_t\fR
.HP 3
2. \fBgpfsFreeRange_t\fR
.HP 3
3. \fBgpfsMultipleAccessRange_t\fR
.HP 3
4. \fBgpfsClearFileCache_t\fR
.RE
.HP 3
\(bu directives:
.RS +3
.HP 3
1. \fBgpfsCancelHints_t\fR
.HP 3
2. \fBgpfsDataShipStart_t\fR
.HP 3
3. \fBgpfsDataShipMap_t\fR
.HP 3
4. \fBgpfsDataShipStop_t\fR
.RE
.RE
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfileDesc
\fR
.RE
.RS +9
The file descriptor identifying the file to which GPFS applies the hints
and directives.
.RE
.PP
.RS +3
\fBfcntlArgP
\fR
.RE
.RS +9
A pointer to the list of hints and directives to be passed to GPFS.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_fcntl()\fR subroutine is successful, it returns a value
of 0.
.PP
If the \fBgpfs_fcntl()\fR subroutine is unsuccessful, it returns a value
of -1 and sets the global error variable \fBerrno\fR to indicate the nature
of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEBADF
\fR
.RE
.RS +9
The file descriptor is not valid.
.RE
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
The file descriptor does not refer to a GPFS file or a regular
file.
.PP
The hint or directive is not valid.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_fcntl()\fR subroutine is not supported under the current
file system format.
.RE
.PP
.RS +3
\fBE2BIG
\fR
.RE
.RS +9
An argument is longer than \fBGPFS_MAX_FCNTL_LENGTH\fR.
.RE
.SH "Examples"
.PP
This programming segment releases all cache data held by the file
\fIhandle\fR and tell GPFS that the subroutine will write the portion of
the file with file offsets between 2G and 3G-1:
.sp
.nf
struct
{
  gpfsFcntlHeader_t hdr;
  gpfsClearFileCache_t rel;
  gpfsAccessRange_t acc;
} arg;
arg.hdr.totalLength = sizeof(arg);
arg.hdr.fcntlVersion = GPFS_FCNTL_CURRENT_VERSION;
arg.hdr.fcntlReserved = 0;
arg.rel.structLen = sizeof(arg.rel);
arg.rel.structType = GPFS_CLEAR_FILE_CACHE;
arg.acc.structLen = sizeof(arg.acc);
arg.acc.structType = GPFS_ACCESS_RANGE;
arg.acc.start = 2LL * 1024LL * 1024LL * 1024LL;
arg.acc.length = 1024 * 1024 * 1024;
arg.acc.isWrite = 1;
rc = gpfs_fcntl(handle, &arg);\ 
.fi
.sp
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
 kb I4@$        g	  wAwAwA             g	      ./usr/share/man/man3/gpfs_fgetattrs.3 .3         $          $          .TH gpfs_fgetattrs 10/22/04
gpfs_fgetattrs() Subroutine
.SH "Name"
.PP
\fBgpfs_fgetattrs()\fR - Retrieves all extended file attributes in
opaque format.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_fgetattrs(int fileDesc, int flags, void *bufferP,
                   int bufferSize, int *attrSizeP)\ 
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_fgetattrs()\fR subroutine, together with \fBgpfs_fputattrs()\fR is intended for use by a backup
program to save (\fBgpfs_fgetattrs()\fR) and restore (\fBgpfs_fputattrs()\fR) extended ACLs defined for the
file. If the file has no extended ACLs, the \fBgpfs_fgetattrs()\fR
subroutine returns a value of 0, but sets \fBattrSizeP\fR to zero and
leaves the content of the buffer unchanged.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfileDesc
\fR
.RE
.RS +9
The file descriptor identifying the file whose extended attributes are
being retrieved.
.RE
.PP
.RS +3
\fBflags
\fR
.RE
.RS +9
\fBRESERVED\fR. This value must be set to 0.
.RE
.PP
.RS +3
\fBbufferP
\fR
.RE
.RS +9
Pointer to a buffer to store the extended attribute information.
.RE
.PP
.RS +3
\fBbufferSize
\fR
.RE
.RS +9
The size of the buffer that was passed in.
.RE
.PP
.RS +3
\fBattrSizeP
\fR
.RE
.RS +9
If successful, returns the actual size of the attribute information that
was stored in the buffer. If the \fIbufferSize\fR was too small,
returns the minimum buffer size.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_fgetattrs()\fR subroutine is successful, it returns a
value of 0.
.PP
If the \fBgpfs_fgetattrs()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOSPC
\fR
.RE
.RS +9
\fIbufferSize\fR is too small to return all of the attributes.
On return, \fI*attrSizeP\fR is set to the required size.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_fgetattrs()\fR subroutine is not supported under the
current file system format.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
lk~ I4@$        m	  wAwAwA             m	      ./usr/share/man/man3/gpfs_fputattrs.3 .3         $          $          .TH gpfs_fputattrs 10/22/04
gpfs_fputattrs() Subroutine
.SH "Name"
.PP
\fBgpfs_fputattrs()\fR - Sets all the extended file attributes for
a file.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_fputattrs(int fileDesc, int flags, void *bufferP)\ 
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_fputattrs()\fR subroutine, together with \fBgpfs_fgetattrs()\fR is intended for use by a backup
program to save (\fBgpfs_fgetattrs()\fR) and restore
(\fBgpfs_fputattrs()\fR) extended ACLs defined for the file.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfileDesc
\fR
.RE
.RS +9
The file descriptor identifying the file whose extended attributes are
being set.
.RE
.PP
.RS +3
\fBflags
\fR
.RE
.RS +9
\fBRESERVED\fR This value must be set to 0.
.RE
.PP
.RS +3
\fBbufferP
\fR
.RE
.RS +9
A pointer to the buffer containing the extended attributes for the
file.
.PP
If you specify a value of NULL, all extended ACLs for the file are
deleted.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_fputattrs()\fR subroutine is successful, it returns a
value of 0.
.PP
If the \fBgpfs_fputattrs()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
The buffer pointed to by \fIbufferP\fR does not contain valid attribute
data.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_fputattrs()\fR subroutine is not supported under the
current file system format.
.RE
.SH "Examples"
.PP
To copy extended file attributes from file \fBf1\fR to file
\fBf2\fR:
.sp
.nf
char buf[4096];
int f1, f2, attrSize, rc;
rc = gpfs_fgetattrs(f1, 0, buf sizeof(buf), &attrSize);
if (rc != 0)
    ...                                  //  error handling
if (attrSize != 0)
   rc = gpfs_fputattrs(f2, 0, buf);   //copy attributes from f1 to f2
else
   rc = gpfs_fputattrs(f2, 0, NULL);    // f1 has no attributes
                                        // delete attributes on f2
.fi
.sp
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
leCk| I4@$          1xA1xA1xA                   ./usr/share/man/man3/gpfs_free_fssnaphandle.3 .3         $          $          .TH gpfs_free_fssnaphandle 10/22/04
gpfs_free_fssnaphandle() Subroutine
.SH "Name"
.PP
\fBgpfs_free_fssnaphandle()\fR - Frees a GPFS file system snapshot
handle.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
void gpfs_free_fssnaphandle(gpfs_fssnap_handle_t *fssnapHandle);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_free_fssnaphandle()\fR subroutine frees the snapshot handle
that is passed. The return value is always void.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfssnapHandle
\fR
.RE
.RS +9
File system snapshot handle.
.RE
.SH "Exit status"
.PP
The \fBgpfs_free_fssnaphandle()\fR subroutine always returns
void.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
None.
.SH "Examples"
.PP
For an example using \fBgpfs_free_fssnaphandle()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
k I4@$        ^
  2xA2xA2xA             ^
      ./usr/share/man/man3/gpfs_fssnap_handle_t.3 3 .3         $          $          .TH gpfs_fssnap_handle_t 10/22/04
gpfs_fssnap_handle_t Structure
.SH "Name"
.PP
\fBgpfs_fssnap_handle_t\fR - Contains a handle for a GPFS file system or
snapshot.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct gpfs_fssnap_handle gpfs_fssnap_handle_t;
.fi
.sp
.SH "Description"
.PP
A file system or snapshot is uniquely identified by an \fBfssnapId\fR of
type \fBgpfs_fssnap_id_t\fR. While the \fBfssnapId\fR is
permanent and global, a shorter \fBfssnapHandle\fR is used by the backup
application programming interface to identify the file system and snapshot
being accessed. The \fBfssnapHandle\fR, like a POSIX file
descriptor, is volatile and may be used only by the program that created
it.
.PP
There are three ways to create a file system snapshot handle: 
.RS +3
.HP 3
1. By using the name of the file system and snapshot
.HP 3
2. By specifying the path through the mount point
.HP 3
3. By providing an existing file system snapshot ID
.RE
.PP
Additional subroutines are provided to obtain the permanent, global
\fBfssnapId\fR from the \fBfssnapHandle\fR, or to obtain the path or the
names for the file system and snapshot, if they are still available in the
file system.
.PP
The file system must be mounted in order to use the backup programming
application interface. If the \fBfssnapHandle\fR is created by the
path name, the path may be relative and may specify any file or directory in
the file system. Operations on a particular snapshot are indicated with
a path to a file or directory within that snapshot. If the
\fBfssnapHandle\fR is created by name, the file system's unique name
may be specified (for example, \fBfs1\fR) or its device name may be
provided (for example, \fB/dev/fs1\fR). To specify an operation on
the active file system, the pointer to the snapshot's name should be set
to NULL or a zero-length string provided.
.PP
The name of the directory under which all snapshots appear may be obtained
by the \fBgpfs_get_snapdirname()\fR subroutine. By default this is
\fB\&.snapshots\fR, but it can be changed using the \fBmmsnapdir\fR
command. The \fBgpfs_get_snapdirname()\fR subroutine returns the
currently set value, that is, the one that was last set by the
\fBmmsnapdir\fR command, or the default, if it was never changed.
.SH "Members"
.PP
.RS +3
\fBgpfs_fssnap_handle
\fR
.RE
.RS +9
File system snapshot handle
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_fssnap_handle_t\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
arkΔ I4@$          2xA2xA2xA                   ./usr/share/man/man3/gpfs_fssnap_id_t.3          $          $          .TH gpfs_fssnap_id_t 10/22/04
gpfs_fssnap_id_t Structure
.SH "Name"
.PP
\fBgpfs_fssnap_id_t\fR - Contains a permanent, globally unique
identifier for a GPFS file system or snapshot.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct gpfs_fssnap_id
{
  char opaque[48];
} gpfs_fssnap_id_t;
.fi
.sp
.SH "Description"
.PP
A file system or snapshot is uniquely identified by an \fBfssnapId\fR of
type \fBgpfs_fssnap_id_t\fR. The \fBfssnapId\fR is a permanent
and global identifier that uniquely identifies an active file system or a
read-only snapshot of a file system. Every snapshot of a file system
has a unique identifier that is also different from the identifier of the
active file system itself.
.PP
The \fBfssnapId\fR is obtained from an open
\fBfssnapHandle\fR. Once obtained, the \fBfssnapId\fR should be
stored along with the file system's data for each backup. The
\fBfssnapId\fR is required to generate an incremental backup. The
\fBfssnapId\fR identifies the previously backed up file system or snapshot
and allows the inode scan to return only the files and data that have changed
since that previous scan.
.SH "Members"
.PP
.RS +3
\fBopaque
\fR
.RE
.RS +9
A 48-byte area for containing the snapshot identifier.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_fssnap_id_t\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
ot ak꼕 I4@$        u  AAA             u      ./usr/share/man/man3/gpfs_fstat.3 d_t.3          $          $          .TH gpfs_fstat 10/22/04
gpfs_fstat() Subroutine
.SH "Name"
.PP
\fBgpfs_fstat()\fR - Returns exact file status for a GPFS
file.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_fstat(int fileDesc, struct stat64 *Buffer)\ 
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_fstat()\fR subroutine is used to obtain exact information
about the file associated with the \fIFileDesc\fR parameter. This
subroutine is provided as an alternative to the \fBstat()\fR subroutine,
which may not provide exact \fBmtime\fR and \fBatime\fR values.
.PP
\fBread\fR, \fBwrite\fR, or \fBexecute\fR permission for the named
file is not required, but all directories listed in the path leading to the
file must be searchable. The file information is written to the area
specified by the \fIBuffer\fR parameter.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfileDesc
\fR
.RE
.RS +9
The file descriptor identifying the file for which exact status
information is requested.
.RE
.PP
.RS +3
\fBBuffer
\fR
.RE
.RS +9
A pointer to the \fBstat64\fR structure in which the information is
returned. The \fBstat64\fR structure is described in the
\fBsys/stat.h\fR file.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_fstat()\fR subroutine is successful, it returns a value
of 0.
.PP
If the \fBgpfs_fstat()\fR subroutine is unsuccessful, it returns a value
of -1 and sets the global error variable \fBerrno\fR to indicate the nature
of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEBADF
\fR
.RE
.RS +9
The file descriptor is not valid.
.RE
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
The file descriptor does not refer to a GPFS file or a regular
file.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_fstat()\fR subroutine is not supported under the current
file system format.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
apdk갳 I4@$          3xA3xA3xA                   ./usr/share/man/man3/gpfs_get_fsname_from_fssnaphandle.3                 $          $          .TH gpfs_get_fsname_from_fssnaphandle 10/22/04
gpfs_get_fsname_from_fssnaphandle() Subroutine
.SH "Name"
.PP
\fBgpfs_get_fsname_from_fssnaphandle()\fR - Obtains the file
system's name from a GPFS file system snapshot handle.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
const char *gpfs_get_fsname_from_fssnaphandle
         (gpfs_fssnap_handle_t *fssnapHandle);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_get_fsname_from_fssnaphandle()\fR subroutine returns a
pointer to the name of file system that is uniquely identified by the file
system snapshot handle.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfssnapHandle
\fR
.RE
.RS +9
File system snapshot handle.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_get_fsname_from_fssnaphandle()\fR subroutine is
successful, it returns a pointer to the name of the file system identified by
the file system snapshot handle.
.PP
If the \fBgpfs_get_fsname_from_fssnaphandle()\fR subroutine is
unsuccessful, it returns NULL and sets the global error variable
\fBerrno\fR to indicate the nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_get_fsname_from_fssnaphandle()\fR subroutine is not
available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_FSSNAPHANDLE
\fR
.RE
.RS +9
The file system snapshot handle is not valid.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
kL I4@$        [  3xA3xA3xA             [      ./usr/share/man/man3/gpfs_get_fssnaphandle_by_fssnapid.3                 $          $          .TH gpfs_get_fssnaphandle_by_fssnapid 10/22/04
gpfs_get_fssnaphandle_by_fssnapid() Subroutine
.SH "Name"
.PP
\fBgpfs_get_fssnaphandle_by_fssnapid()\fR - Obtains a GPFS file system
snapshot handle given its permanent, unique snapshot ID.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
gpfs_fssnap_handle_t *gpfs_get_fssnaphandle_by_fssnapid
                    (const gpfs_fssnap_id_t *fssnapId);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_get_fssnaphandle_by_fssnapid()\fR subroutine creates a
handle for the file system or snapshot that is uniquely identified by the
permanent, unique snapshot ID.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfssnapId
\fR
.RE
.RS +9
File system snapshot ID
.RE
.SH "Exit status"
.PP
If the \fBgpfs_get_fssnaphandle_by_fssnapid()\fR subroutine is
successful, it returns a pointer to the file system snapshot handle.
.PP
If the \fBgpfs_get_fssnaphandle_by_fssnapid()\fR subroutine is
unsuccessful, it returns NULL and sets the global error variable
\fBerrno\fR to indicate the nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOMEM
\fR
.RE
.RS +9
Space could not be allocated for the file system snapshot handle.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_get_fssnaphandle_by_fssnapid()\fR subroutine is not
available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_FSSNAPID
\fR
.RE
.RS +9
The file system snapshot ID is not valid.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
fer tkv I4@$        	  4xA4xA4xA             	      ./usr/share/man/man3/gpfs_get_fssnaphandle_by_name.3 d.3         $          $          .TH gpfs_get_fssnaphandle_by_name 10/22/04
gpfs_get_fssnaphandle_by_name() Subroutine
.SH "Name"
.PP
\fBgpfs_get_fssnaphandle_by_name()\fR - Obtains a GPFS file system
snapshot handle given the file system and snapshot names.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
gpfs_fssnap_handle_t *gpfs_get_fssnaphandle_by_name
         (const char *fsName, const char *snapName);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_get_fssnaphandle_by_name()\fR subroutine creates a handle
for the file system or snapshot that is uniquely identified by the file
system's name and the name of the snapshot.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfsName
\fR
.RE
.RS +9
A pointer to the name of the file system whose snapshot handle is
desired.
.RE
.PP
.RS +3
\fBsnapName
\fR
.RE
.RS +9
A pointer to the name of the snapshot whose snapshot handle is desired, or
NULL to access the active file system rather than a snapshot within the file
system.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_get_fssnaphandle_by_name()\fR subroutine is successful,
it returns a pointer to the file system snapshot handle.
.PP
If the \fBgpfs_get_fssnaphandle_by_name()\fR subroutine is unsuccessful,
it returns NULL and sets the global error variable \fBerrno\fR to indicate
the nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOENT
\fR
.RE
.RS +9
The file system name is not valid.
.RE
.PP
.RS +3
\fBENOMEM
\fR
.RE
.RS +9
Space could not be allocated for the file system snapshot handle.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_get_fssnaphandle_by_name()\fR subroutine is not
available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_SNAPNAME
\fR
.RE
.RS +9
The snapshot name is not valid.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_get_fssnaphandle_by_name()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
tk I4@$        $  4xA4xA4xA             $      ./usr/share/man/man3/gpfs_get_fssnaphandle_by_path.3 d.3         $          $          .TH gpfs_get_fssnaphandle_by_path 10/22/04
gpfs_get_fssnaphandle_by_path() Subroutine
.SH "Name"
.PP
\fBgpfs_get_fssnaphandle_by_path()\fR - Obtains a GPFS file system
snapshot handle given a path to the file system or snapshot.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
gpfs_fssnap_handle_t *gpfs_get_fssnaphandle_by_path
                       (const char *pathName);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_get_fssnaphandle_by_path()\fR subroutine creates a handle
for the file system or snapshot that is uniquely identified by a path through
the file system's mount point to a file or directory within the file
system or snapshot.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBpathName
\fR
.RE
.RS +9
A pointer to the path name to a file or directory within the desired file
system or snapshot.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_get_fssnaphandle_by_path()\fR subroutine is successful,
it returns a pointer to the file system snapshot handle.
.PP
If the \fBgpfs_get_fssnaphandle_by_path()\fR subroutine is unsuccessful,
it returns NULL and sets the global error variable \fBerrno\fR to indicate
the nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOENT
\fR
.RE
.RS +9
The path name is not valid.
.RE
.PP
.RS +3
\fBENOMEM
\fR
.RE
.RS +9
Space could not be allocated for the file system snapshot handle.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_get_fssnaphandle_by_path()\fR subroutine is not
available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_get_fssnaphandle_by_path()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
t_fsk I4@$        M  5xA5xA5xA             M      ./usr/share/man/man3/gpfs_get_fssnapid_from_fssnaphandle.3               $          $          .TH gpfs_get_fssnapid_from_fssnaphandle 10/22/04
gpfs_get_fssnapid_from_fssnaphandle() Subroutine
.SH "Name"
.PP
\fBgpfs_get_fssnapid_from_fssnaphandle()\fR - Obtains the permanent,
unique GPFS file system snapshot ID given its handle.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_get_fssnapid_from_fssnaphandle
        (gpfs_fssnap_handle_t *fssnapHandle,
         gpfs_fssnap_id_t *fssnapId);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_get_fssnapid_from_fssnaphandle()\fR subroutine obtains the
permanent, globally unique file system snapshot ID of the file system or
snapshot identified by the open file system snapshot handle.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfssnapHandle
\fR
.RE
.RS +9
File system snapshot handle.
.RE
.PP
.RS +3
\fBfssnapId
\fR
.RE
.RS +9
File system snapshot ID.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_get_fssnapid_from_fssnaphandle()\fR subroutine is
successful, it returns a pointer to the file system snapshot ID.
.PP
If the \fBgpfs_get_fssnapid_from_fssnaphandle()\fR subroutine is
unsuccessful, it returns a value of -1 and sets the global error variable
\fBerrno\fR to indicate the nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEFAULT
\fR
.RE
.RS +9
Size mismatch for \fBfssnapId\fR.
.RE
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
NULL pointer given for returned \fBfssnapId\fR.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_get_fssnapid_from_fssnaphandle()\fR subroutine is not
available.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_FSSNAPHANDLE
\fR
.RE
.RS +9
The file system snapshot handle is not valid.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_get_fssnapid_from_fssnaphandle()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
ssnkb I4@$        ~  5xA5xA5xA             ~      ./usr/share/man/man3/gpfs_get_pathname_from_fssnaphandle.3               $          $          .TH gpfs_get_pathname_from_fssnaphandle 10/22/04
gpfs_get_pathname_from_fssnaphandle() Subroutine
.SH "Name"
.PP
\fBgpfs_get_pathname_from_fssnaphandle()\fR - Obtains the path name of a
GPFS file system snapshot given its handle.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
const char *gpfs_get_pathname_from_fssnaphandle
            (gpfs_fssnap_handle_t *fssnapHandle);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_get_pathname_from_fssnaphandle()\fR subroutine obtains the
path name of the file system or snapshot identified by the open file system
snapshot handle.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfssnapHandle
\fR
.RE
.RS +9
File system snapshot handle.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_get_pathname_from_fssnaphandle()\fR subroutine is
successful, it returns a pointer to the path name of the file system or
snapshot.
.PP
If the \fBgpfs_get_pathname_from_fssnaphandle()\fR subroutine is
unsuccessful, it returns NULL and sets the global error variable
\fBerrno\fR to indicate the nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_get_pathname_from_fssnaphandle()\fR subroutine is not
available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_FSSNAPHANDLE
\fR
.RE
.RS +9
The file system snapshot handle is not valid.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_get_pathname_from_fssnaphandle()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
g kq I4@$          wAwAwA                   ./usr/share/man/man3/gpfs_get_snapdirname.3 fssn         $          $          .TH gpfs_get_snapdirname 10/22/04
gpfs_get_snapdirname() Subroutine
.SH "Name"
.PP
\fBgpfs_get_snapdirname()\fR - Obtains the name of the directory
containing snapshots.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_get_snapdirname(gpfs_fssnap_handle_t *fssnapHandle,
                         char *snapdirName, int bufLen);\ 
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_get_snapdirname()\fR subroutine obtains the name of the
directory that is used to contain snapshots.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfssnapHandle
\fR
.RE
.RS +9
File system snapshot handle.
.RE
.PP
.RS +3
\fBsnapdirName
\fR
.RE
.RS +9
Buffer into which the name of the snapshot directory will be
copied.
.RE
.PP
.RS +3
\fBbufLen
\fR
.RE
.RS +9
The size of the provided buffer.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_get_snapdirname()\fR subroutine is successful, it returns
a value of 0 and the \fBsnapdirName\fR and \fBbufLen\fR parameters are
set as described above.
.PP
If the \fBgpfs_get_snapdirname()\fR subroutine is unsuccessful, it
returns a value of -1 and the global error variable \fBerrno\fR is set to
indicate the nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOMEM
\fR
.RE
.RS +9
Unable to allocate memory for this request.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_get_snapdirname()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBERANGE
\fR
.RE
.RS +9
The buffer is too small to return the snapshot directory name.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
The cached file system information was not valid.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_FSSNAPHANDLE
\fR
.RE
.RS +9
The file system snapshot handle is not valid.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_get_snapdirname()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
kN I4@$          6xA6xA6xA                   ./usr/share/man/man3/gpfs_get_snapname_from_fssnaphandle.3               $          $          .TH gpfs_get_snapname_from_fssnaphandle 10/22/04
gpfs_get_snapname_from_fssnaphandle() Subroutine
.SH "Name"
.PP
\fBgpfs_get_snapname_from_fssnaphandle()\fR - Obtains the name of the
snapshot identified by the GPFS file system snapshot handle.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
const char *gpfs_get_snapname_from_fssnaphandle
            (gpfs_fssnap_handle_t *fssnapHandle);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_get_snapname_from_fssnaphandle()\fR subroutine obtains a
pointer to the name of a GPFS snapshot given its file system snapshot
handle. If the \fBfssnapHandle\fR identifies an active file system,
as opposed to a snapshot of a file system,
\fBgpfs_get_snapname_from_fssnaphandle()\fR returns a pointer to a
zero-length snapshot name and a successful return code.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfssnapHandle
\fR
.RE
.RS +9
File system snapshot handle.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_get_snapname_from_fssnaphandle()\fR subroutine is
successful, it returns a pointer to the name of the snapshot.
.PP
If the \fBgpfs_get_snapname_from_fssnaphandle()\fR subroutine is
unsuccessful, it returns NULL and sets the global error variable
\fBerrno\fR to indicate the nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_get_snapname_from_fssnaphandle()\fR subroutine is not
available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_FSSNAPHANDLE
\fR
.RE
.RS +9
The file system snapshot handle is not valid.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_SNAPNAME
\fR
.RE
.RS +9
The snapshot has been deleted.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
an3/gpfk>: I4@$        >  wAwAwA             >      ./usr/share/man/man3/gpfs_getacl.3 ame_f         $          $          .TH gpfs_getacl 10/22/04
gpfs_getacl() Subroutine
.SH "Name"
.PP
\fBgpfs_getacl()\fR - Retrieves the access control information for
a GPFS file.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_getacl(char *pathname, int flags, void *aclP);\ 
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_getacl()\fR subroutine, together with the \fBgpfs_putacl()\fR subroutine, is intended for use by a
backup program to save (\fBgpfs_getacl()\fR) and restore (\fBgpfs_putacl()\fR) the ACL information for the
file.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBpathname
\fR
.RE
.RS +9
The path identifying the file for which the ACLs are being
obtained.
.RE
.PP
.RS +3
\fBflags
\fR
.RE
.RS +9
\fBRESERVED\fR. This value must be set to zero.
.RE
.PP
.RS +3
\fBaclP
\fR
.RE
.RS +9
Pointer to a buffer mapped by the structure \fBgpfs_opaque_acl_t\fR. The first four bytes of
the buffer must contain its total size.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_getacl()\fR subroutine is successful, it returns a value
of 0.
.PP
If the \fBgpfs_getacl()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
The path name does not refer to a GPFS file or a regular file.
.RE
.PP
.RS +3
\fBENOENT
\fR
.RE
.RS +9
The file does not exist.
.RE
.PP
.RS +3
\fBENOSPC
\fR
.RE
.RS +9
The buffer is too small to return the entire ACL. The required
buffer size is returned in the first four bytes of the buffer pointed to by
\fBaclP\fR.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_getacl()\fR subroutine is not supported under the current
file system format.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
lekꮣ I4@$          6xA6xA6xA                   ./usr/share/man/man3/gpfs_iattr_t.3 me_f         $          $          .TH gpfs_iattr_t 10/22/04
gpfs_iattr_t Structure
.SH "Name"
.PP
\fBgpfs_iattr_t\fR - Contains attributes of a GPFS inode.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct gpfs_iattr
{
  int              ia_version;    /* this struct version */
  int              ia_reclen;     /* sizeof this structure */
  int              ia_checksum;   /* validity check on iattr struct */
  gpfs_mode_t      ia_mode;       /* access mode */
  gpfs_uid_t       ia_uid;        /* owner uid */
  gpfs_gid_t       ia_gid;        /* owner gid */
  gpfs_ino_t       ia_inode;      /* file inode number */
  gpfs_gen_t       ia_gen;        /* inode generation number */
  short            ia_nlink;      /* number of links */
  short            ia_flags;      /* flags (defined below) */
  int              ia_blocksize;  /* preferred block size for io */
  gpfs_mask_t      ia_mask;       /* initial attribute mask-not used*/
  gpfs_off64_t     ia_size;       /* file size in bytes */
  gpfs_off64_t     ia_blocks; /*512 byte blocks of disk held by file*/
  gpfs_timestruc_t ia_atime;      /* time of last access */
  gpfs_timestruc_t ia_mtime;      /* time of last data modification */
  gpfs_timestruc_t ia_ctime;      /* time of last status change */
  gpfs_dev_t       ia_rdev;       /* ID of device */
  int              ia_xperm;     /*non-zero if file has extended acl*/
  int              ia_modsnapid; /*Internal snapshot ID indicating\ 
                          the last time that the file was modified*/
  int              ia_reserve_2;
  int              ia_reserve_3;
} gpfs_iattr_t;
/* Define flags for inode attributes */
#define GPFS_IAFLAG_SNAPDIR     0x0001 /* (obsolete) */
#define GPFS_IAFLAG_USRQUOTA    0x0002 /*inode is a user quota file*/
#define GPFS_IAFLAG_GRPQUOTA    0x0004 /*inode is a group quota file*/
#define GPFS_IAFLAG_ERROR       0x0008 /* error reading inode */
/* Define flags for inode replication attributes */
#define GPFS_IAFLAG_REPLMETA    0x0200 /* metadata replication set */
#define GPFS_IAFLAG_REPLDATA    0x0400 /* data replication set */
#define GPFS_IAFLAG_EXPOSED     0x0800 /*may have data on\ 
                                         suspended disks*/
#define GPFS_IAFLAG_ILLREPLICATED 0x1000
                                     /*maybe not properly replicated*/
#define GPFS_IAFLAG_UNBALANCED   0x2000
                                       /*maybe not properly balanced*/
#define GPFS_IAFLAG_DATAUPDATEMISS 0x4000
                       /* has stale data blocks on unavailable disk */
#define GPFS_IAFLAG_METAUPDATEMISS 0x8000
                          /* has stale metadata on unavailable disk */
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_iattr_t\fR structure contains the various attributes of a
GPFS inode.
.SH "Members"
.PP
.RS +3
\fBia_version
\fR
.RE
.RS +9
The version number of this structure.
.RE
.PP
.RS +3
\fBia_reclen
\fR
.RE
.RS +9
The size of this structure.
.RE
.PP
.RS +3
\fBia_checksum 
\fR
.RE
.RS +9
The checksum for this \fBgpfs_iattr\fR structure.
.RE
.PP
.RS +3
\fBia_mode
\fR
.RE
.RS +9
The access mode for this inode.
.RE
.PP
.RS +3
\fBia_uid
\fR
.RE
.RS +9
The owner user ID for this inode.
.RE
.PP
.RS +3
\fBia_gid 
\fR
.RE
.RS +9
The owner group ID for this inode.
.RE
.PP
.RS +3
\fBia_inode
\fR
.RE
.RS +9
The file inode number.
.RE
.PP
.RS +3
\fBia_gen
\fR
.RE
.RS +9
The inode generation number.
.RE
.PP
.RS +3
\fBia_nlink
\fR
.RE
.RS +9
The number of links for this inode.
.RE
.PP
.RS +3
\fBia_flags
\fR
.RE
.RS +9
The flags (defined below) for this inode.
.RE
.PP
.RS +3
\fBia_blocksize
\fR
.RE
.RS +9
The preferred block size for I/O.
.RE
.PP
.RS +3
\fBia_mask
\fR
.RE
.RS +9
The initial attribute mask (not used).
.RE
.PP
.RS +3
\fBia_size
\fR
.RE
.RS +9
The file size in bytes.
.RE
.PP
.RS +3
\fBia_blocks
\fR
.RE
.RS +9
The number of 512 byte blocks of disk held by the file.
.RE
.PP
.RS +3
\fBia_atime
\fR
.RE
.RS +9
The time of last access.
.RE
.PP
.RS +3
\fBia_mtime
\fR
.RE
.RS +9
The time of last data modification.
.RE
.PP
.RS +3
\fBia_ctime
\fR
.RE
.RS +9
The time of last status change.
.RE
.PP
.RS +3
\fBia_rdev
\fR
.RE
.RS +9
The ID of the device.
.RE
.PP
.RS +3
\fBia_xperm
\fR
.RE
.RS +9
Indicator - nonzero if file has extended ACL.
.RE
.PP
.RS +3
\fBia_modsnapid
\fR
.RE
.RS +9
Internal snapshot ID indicating the last time that the file was
modified. Internal snapshot IDs for the current snapshots are displayed
by the \fBmmlssnapshot\fR command.
.RE
.PP
.RS +3
\fBia_reserve_2
\fR
.RE
.RS +9
Reserved for future use.
.RE
.PP
.RS +3
\fBia_reserve_3
\fR
.RE
.RS +9
Reserved for future use.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_iattr_t\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
k I4@$          AAA                   ./usr/share/man/man3/gpfs_iclose.3  me_f         $          $          .TH gpfs_iclose 10/22/04
gpfs_iclose() Subroutine
.SH "Name"
.PP
\fBgpfs_iclose()\fR - Closes a file given its inode file handle.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_iclose(gpfs_ifile_t *ifile);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_iclose()\fR subroutine closes an open file descriptor
created by \fBgpfs_iopen()\fR.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBifile
\fR
.RE
.RS +9
Pointer to \fBgpfs_ifile_t\fR from \fBgpfs_iopen()\fR.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_iclose()\fR subroutine is successful, it returns a value
of 0.
.PP
If the \fBgpfs_iclose()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_iclose()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
Cached file system information was not valid.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_iclose()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
ak I4@$          7xA7xA7xA                   ./usr/share/man/man3/gpfs_ifile_t.3 me_f         $          $          .TH gpfs_ifile_t 10/22/04
gpfs_ifile_t Structure
.SH "Name"
.PP
\fBgpfs_ifile_t\fR - Contains a handle for a GPFS inode.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct gpfs_ifile gpfs_ifile_t;
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_ifile_t\fR structure contains a handle for the file of a
GPFS inode.
.SH "Members"
.PP
.RS +3
\fBgpfs_ifile
\fR
.RE
.RS +9
The handle for the file of a GPFS inode.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_ifile_t\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
PoinkNK I4@$          8xA8xA8xA                   ./usr/share/man/man3/gpfs_igetattrs.3 _f         $          $          .TH gpfs_igetattrs 10/22/04
gpfs_igetattrs() Subroutine
.SH "Name"
.PP
\fBgpfs_igetattrs()\fR - Retrieve all extended file attributes in opaque
format.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_igetattrs(gpfs_ifile_t *ifile,\ 
                  void *buffer,\ 
                  int bufferSize,\ 
                  int *attrSize);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_igetattrs()\fR subroutine retrieves all extended file
attributes in opaque format. This subroutine is intended for use by a
backup program to save all extended file attributes (ACLs, DMAPI attributes,
and so forth) in one invocation. If the file does not have any extended
attributes, the subroutine sets \fBattrSize\fR to zero.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBifile
\fR
.RE
.RS +9
Pointer to \fBgpfs_ifile_t\fR from \fBgpfs_iopen()\fR.
.RE
.PP
.RS +3
\fBbuffer
\fR
.RE
.RS +9
Pointer to buffer for returned attributes.
.RE
.PP
.RS +3
\fBbufferSize
\fR
.RE
.RS +9
Size of the buffer.
.RE
.PP
.RS +3
\fBattrSize
\fR
.RE
.RS +9
Pointer to returned size of attributes.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_igetattrs()\fR subroutine is successful, it returns a
value of 0.
.PP
If the \fBgpfs_igetattrs()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_igetattrs()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBERANGE
\fR
.RE
.RS +9
The buffer is too small to return all attributes. Field
\fB*attrSizeP\fR will be set to the size necessary.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
Cached file system information was not valid.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_IFILE
\fR
.RE
.RS +9
Incorrect \fBifile\fR parameters.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
Ek| I4@$        
  AAA             
      ./usr/share/man/man3/gpfs_iopen.3 s.3 _f         $          $          .TH gpfs_iopen 10/22/04
gpfs_iopen() Subroutine
.SH "Name"
.PP
\fBgpfs_iopen()\fR - Opens a file or directory by inode number
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
gpfs_ifile_t *gpfs_iopen(gpfs_fssnap_handle_t *fssnapHandle,\ 
                         gpfs_ino_t ino,\ 
                         int open_flags,
                         const gpfs_iattr_t *statxbuf,\ 
                         const char *symlink);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_iopen()\fR subroutine opens a user file or directory for
backup. The file is identified by its inode number \fBino\fR within
the file system or snapshot identified by the \fBfssnapHandle\fR.
The \fBfssnapHandle\fR parameter must be the same one that was used to
create the inode scan that returned the inode number \fBino\fR.
.PP
To read the file or directory, the \fBopen_flags\fR must be set to
\fBGPFS_O_BACKUP\fR. The \fBstatxbuf\fR and \fBsymlink\fR
parameters are reserved for future use and must be set to NULL.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfssnapHandle
\fR
.RE
.RS +9
File system snapshot handle.
.RE
.PP
.RS +3
\fBino
\fR
.RE
.RS +9
inode number.
.RE
.PP
.RS +3
\fBopen_flags
\fR
.RE
.RS +9
.PP
.RS +3
\fBGPFS_O_BACKUP
\fR
.RE
.RS +9
Read files for backup.
.RE
.PP
.RS +3
\fBO_RDONLY
\fR
.RE
.RS +9
For \fBgpfs_iread()\fR.
.RE
.RE
.PP
.RS +3
\fBstatxbuf
\fR
.RE
.RS +9
This parameter is reserved for future use and should always be set to
NULL.
.RE
.PP
.RS +3
\fBsymlink
\fR
.RE
.RS +9
This parameter is reserved for future use and should always be set to
NULL.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_iopen()\fR subroutine is successful, it returns a pointer
to the inode's file handle.
.PP
If the \fBgpfs_iopen()\fR subroutine is unsuccessful, it returns NULL
and the global error variable \fBerrno\fR is set to indicate the nature of
the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
Missing or incorrect parameter.
.RE
.PP
.RS +3
\fBENOMEM
\fR
.RE
.RS +9
Unable to allocate memory for request.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_iopen()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
Cached file system information was not valid.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_iopen()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
  kQ I4@$        	  AAA             	      ./usr/share/man/man3/gpfs_iread.3 s.3 _f         $          $          .TH gpfs_iread 10/22/04
gpfs_iread() Subroutine
.SH "Name"
.PP
\fBgpfs_iread()\fR - Reads a file opened by
\fBgpfs_iopen()\fR.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_iread(gpfs_ifile_t *ifile,\ 
               void *buffer,\ 
               int bufferSize,\ 
               gpfs_off64_t *offset);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_iread()\fR subroutine reads data from the file indicated by
the \fBifile\fR parameter returned from \fBgpfs_iopen()\fR. This
subroutine reads data beginning at parameter \fBoffset\fR and continuing
for \fBbufferSize\fR bytes into the buffer specified by
\fBbuffer\fR. If successful, the subroutine returns a value that is
the length of the data read, and sets parameter \fBoffset\fR to the offset
of the next byte to be read. A return value of 0 indicates
end-of-file.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBifile
\fR
.RE
.RS +9
Pointer to \fBgpfs_ifile_t\fR from \fBgpfs_iopen()\fR.
.RE
.PP
.RS +3
\fBbuffer
\fR
.RE
.RS +9
Buffer for the data to be read.
.RE
.PP
.RS +3
\fBbufferSize
\fR
.RE
.RS +9
Size of the buffer (that is, the amount of data to be read).
.RE
.PP
.RS +3
\fBoffset
\fR
.RE
.RS +9
Offset of where within the file to read. If \fBgpfs_iread()\fR
is successful, \fBoffset\fR is updated to the next byte after the last one
that was read.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_iread()\fR subroutine is successful, it returns the
number of bytes read.
.PP
If the \fBgpfs_iread()\fR subroutine is unsuccessful, it returns a value
of -1 and sets the global error variable \fBerrno\fR to indicate the nature
of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEISDIR
\fR
.RE
.RS +9
The specified file is a directory.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_iread()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
Cached file system information was not valid.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_IFILE
\fR
.RE
.RS +9
Incorrect \fBifile\fR parameter.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
 kh  I4@$        S  AAA             S      ./usr/share/man/man3/gpfs_ireaddir.3  _f         $          $          .TH gpfs_ireaddir 10/22/04
gpfs_ireaddir() Subroutine
.SH "Name"
.PP
\fBgpfs_ireaddir()\fR - Reads the next directory entry.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_ireaddir(gpfs_ifile_t *idir,
               const gpfs_direntx_t **dirent);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_ireaddir()\fR subroutine returns the next directory entry in
a file system.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBidir
\fR
.RE
.RS +9
Pointer to \fBgpfs_ifile_t\fR from \fBgpfs_iopen()\fR.
.RE
.PP
.RS +3
\fBdirent
\fR
.RE
.RS +9
Pointer to returned pointer to directory entry.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_ireaddir()\fR subroutine is successful, it returns a
value of 0 and sets the \fBdirent\fR parameter to point to the returned
directory entry. If there are no more GPFS directory entries,
\fBgpfs_ireaddir()\fR returns a value of 0 and sets the \fBdirent\fR
parameter to NULL.
.PP
If the \fBgpfs_ireaddir()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOMEM
\fR
.RE
.RS +9
Unable to allocate memory for request.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_ireaddir()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBENOTDIR
\fR
.RE
.RS +9
File is not a directory.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
The cached file system information was not valid.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_IFILE
\fR
.RE
.RS +9
Incorrect \fBifile\fR parameter.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_ireaddir()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
.RS +k, I4@$        "  :xA:xA:xA             "      ./usr/share/man/man3/gpfs_ireadlink.3 _f         $          $          .TH gpfs_ireadlink 10/22/04
gpfs_ireadlink() Subroutine
.SH "Name"
.PP
\fBgpfs_ireadlink()\fR - Reads a symbolic link by inode number.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_ireadlink(gpfs_fssnap_handle_t *fssnapHandle,
                  gpfs_ino_t ino,\ 
                  char *buffer,\ 
                  int bufferSize);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_ireadlink()\fR subroutine reads a symbolic link by inode
number. Like \fBgpfs_iopen()\fR, use the same
\fBfssnapHandle\fR parameter that was used by the inode scan.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfssnapHandle
\fR
.RE
.RS +9
File system snapshot handle.
.RE
.PP
.RS +3
\fBino
\fR
.RE
.RS +9
inode number of the link file to read.
.RE
.PP
.RS +3
\fBbuffer
\fR
.RE
.RS +9
Pointer to buffer for the returned link data.
.RE
.PP
.RS +3
\fBbufferSize
\fR
.RE
.RS +9
Size of the buffer.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_ireadlink()\fR subroutine is successful, it returns the
number of bytes read.
.PP
If the \fBgpfs_ireadlink()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_ireadlink()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBERANGE
\fR
.RE
.RS +9
The buffer is too small to return the symbolic link.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
Cached file system information was not valid.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_FSSNAPHANDLE
\fR
.RE
.RS +9
The file system snapshot handle is not valid.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
ireadlk I4@$        
  AAA             
      ./usr/share/man/man3/gpfs_ireadx.3 .3 _f         $          $          .TH gpfs_ireadx 10/22/04
gpfs_ireadx() Subroutine
.SH "Name"
.PP
\fBgpfs_ireadx()\fR - Performs block level incremental read of a file
within an incremental inode scan.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
gpfs_off64_t gpfs_ireadx(gpfs_ifile_t *ifile,
                         gpfs_iscan_t *iscan,
                         void *buffer,
                         int bufferSize,
                         gpfs_off64_t *offset,
                         gpfs_off64_t termOffset,
                         int *hole);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_ireadx()\fR subroutine performs a block level incremental
read on a file opened by \fBgpfs_iopen()\fR within a given incremental scan
opened using \fBgpfs_open_inodescan()\fR.
.PP
The \fBgpfs_ireadx()\fR subroutines returns the data that has changed
since the \fBprev_fssnapId\fR specified for the inode scan. The file
is scanned starting at \fBoffset\fR and terminating at \fBtermOffset\fR,
looking for changed data. Once changed data is located, the
\fBoffset\fR parameter is set to its location, the new data is returned in
the \fBbuffer\fR provided, and the amount of data returned is the
subroutine's value.
.PP
If the change to the data is that it has been deleted (that is, the file
has been truncated), no data is returned, but the \fBhole\fR parameter is
returned with a value of 1, and the size of the \fBhole\fR is returned as
the subroutine's value. Note that the returned size of the hole
may exceed the \fBbufferSize\fR provided. If no changed data was
found before reaching the \fBtermOffset\fR or the end-of-file, then the
\fBgpfs_ireadx()\fR subroutine return value is 0.
.PP
Block-level incremental backups are available only if the previous snapshot
was not deleted. If it was deleted, \fBgpfs_ireadx()\fR may still be
used, but it returns all of the file's data, operating like the standard
\fBgpfs_iread()\fR subroutine. However, the \fBgpfs_ireadx()\fR
subroutine will still identify sparse files and explicitly return information
on holes in the files, rather than returning the NULL data.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBifile
\fR
.RE
.RS +9
Pointer to \fBgpfs_file_t\fR returned from
\fBgpfs_iopen()\fR.
.RE
.PP
.RS +3
\fBiscan
\fR
.RE
.RS +9
Pointer to \fBgpfs_iscan_t\fR from
\fBgpfs_open_inodescan()\fR.
.RE
.PP
.RS +3
\fBbuffer
\fR
.RE
.RS +9
Pointer to buffer for returned data, or NULL to query the next increment
to be read.
.RE
.PP
.RS +3
\fBbufferSize
\fR
.RE
.RS +9
Size of buffer for returned data.
.RE
.PP
.RS +3
\fBoffset
\fR
.RE
.RS +9
On input, the offset to start the scan for changes. On output, the
offset of the changed data, if any was detected.
.RE
.PP
.RS +3
\fBtermOffset
\fR
.RE
.RS +9
Read terminates before reading this offset. The caller may specify
\fBia_size\fR from the file's \fBgpfs_iattr_t\fR or 0 to scan the
entire file.
.RE
.PP
.RS +3
\fBhole
\fR
.RE
.RS +9
Pointer to a flag returned to indicated a hole in the file. A value
of 0 indicates that the \fBgpfs_ireadx()\fR subroutine returned data in the
\fBbuffer\fR. A value of 1 indicates that \fBgpfs_ireadx()\fR
encountered a hole at the returned \fBoffset\fR.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_ireadx()\fR subroutine is successful, it returns the
number of bytes read and returned in \fBbufP\fR, or the size of the hole
encountered in the file.
.PP
If the \fBgpfs_ireadx()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEDOM
\fR
.RE
.RS +9
The file system snapshot ID from the \fBiscanId\fR does not match the
\fBifile\fR's.
.RE
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
Missing or incorrect parameter.
.RE
.PP
.RS +3
\fBEISDIR
\fR
.RE
.RS +9
The specified file is a directory.
.RE
.PP
.RS +3
\fBENOMEM
\fR
.RE
.RS +9
Unable to allocate memory for request.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_ireadx()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBERANGE
\fR
.RE
.RS +9
The file system snapshot ID from the \fBiscanId\fR is more recent than
the \fBifile\fR's.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
Cached file system information was not valid.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_IFILE
\fR
.RE
.RS +9
Incorrect \fBifile\fR parameter.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_ISCAN
\fR
.RE
.RS +9
Incorrect \fBiscan\fR parameter.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
pened kc I4@$          ;xA;xA;xA                   ./usr/share/man/man3/gpfs_iscan_t.3 3 _f         $          $          .TH gpfs_iscan_t 10/22/04
gpfs_iscan_t Structure
.SH "Name"
.PP
\fBgpfs_iscan_t\fR - Contains a handle for an inode scan of a GPFS file
system or snapshot.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct gpfs_iscan gpfs_iscan_t;
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_iscan_t\fR structure contains a handle for an inode scan of
a GPFS file system or snapshot.
.SH "Members"
.PP
.RS +3
\fBgpfs_iscan
\fR
.RE
.RS +9
The handle for an inode scan for a GPFS file system or snapshot.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_iscan_t\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
d k I4@$          AAA                   ./usr/share/man/man3/gpfs_next_inode.3 f         $          $          .TH gpfs_next_inode 10/22/04
gpfs_next_inode() Subroutine
.SH "Name"
.PP
\fBgpfs_next_inode()\fR - Retrieves the next inode from the inode
scan.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_next_inode(gpfs_iscan_t *iscan,
                    gpfs_ino_t termIno,
                    const gpfs_iattr_t **iattr);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_next_inode()\fR subroutine obtains the next inode from the
specified inode scan and sets the \fBiattr\fR pointer to the inode's
attributes. The \fBtermIno\fR parameter can be used to terminate the
inode scan before the last inode in the file system or snapshot being
scanned. A value of 0 may be provided to indicate the last inode in the
file system or snapshot. If there are no more inodes to be returned
before the termination inode, the \fBgpfs_next_inode()\fR subroutine
returns a value of 0 and the inode's attribute pointer is set to
NULL.
.PP
To generate a full backup, invoke \fBgpfs_open_inodescan()\fR with NULL for the
\fBprev_fssnapId\fR parameter. Repeated invocations of
\fBgpfs_next_inode()\fR then return inode information about all existing
user files, directories and links, in inode number order.
.PP
To generate an incremental backup, invoke \fBgpfs_next_inode()\fR with
the \fBfssnapId\fR that was obtained from a \fBfssnapHandle\fR at the
time the previous backup was created. The snapshot that was used for
the previous backup does not need to exist at the time the incremental backup
is generated. That is, the backup application needs to remember only
the \fBfssnapId\fR of the previous backup; the snapshot itself can be
deleted as soon as the backup is completed.
.PP
For an incremental backup, only inodes of files that have changed since the
specified previous snapshot will be returned. Any operation that
changes the file's \fBmtime\fR or \fBctime\fR is considered a
change and will cause the file to be included. Files with no changes to
the file's data or file attributes, other than a change to
\fBatime\fR, are omitted from the scan.
.PP
Incremental backups return deleted files, but full backups do not. A
deleted file is indicated by the field \fBia_nlinks\fR having a value of
0.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBiscan
\fR
.RE
.RS +9
Pointer to the inode scan handle.
.RE
.PP
.RS +3
\fBtermIno
\fR
.RE
.RS +9
The inode scan terminates before this inode number. The caller may
specify \fBmaxIno\fR from \fBgpfs_open_inodescan()\fR or zero to scan
the entire inode file.
.RE
.PP
.RS +3
\fBiattr
\fR
.RE
.RS +9
Pointer to the returned pointer to the inode's \fBiattr\fR.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_next_inode()\fR subroutine is successful, it returns a
value of 0 and a pointer. The pointer points to NULL if there are no
more inodes. Otherwise, the pointer points to the returned inode's
attributes.
.PP
If the \fBgpfs_next_inode()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOMEM
\fR
.RE
.RS +9
Unable to allocate memory for request.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_next_inode()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
Cached file system information was not valid.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_ISCAN
\fR
.RE
.RS +9
Incorrect parameters.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_next_inode()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
k8 I4@$          wAwAwA                   ./usr/share/man/man3/gpfs_opaque_acl_t.3 om_fssn         $          $          .TH gpfs_opaque_acl_t 10/22/04
gpfs_opaque_acl_t Structure
.SH "Name"
.PP
\fBgpfs_opaque_acl_t\fR - Contains buffer mapping for the \fBgpfs_getacl()\fR and \fBgpfs_putacl()\fR subroutines.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Structure"
.sp
.nf
typedef struct
{
  int             acl_buffer_len;
  unsigned short  acl_version;
  unsigned char   acl_type;\ 
  char            acl_var_data[1];
} gpfs_opaque_acl_t;
\ 
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_opaque_acl_t\fR structure contains size, version, and ACL
type information for the \fBgpfs_getacl()\fR and \fBgpfs_putacl()\fR subroutines.
.SH "Members"
.PP
.RS +3
\fBacl_buffer_len
\fR
.RE
.RS +9
On input, this field must be set to the total length, in bytes, of the
data structure being passed to GPFS. On output, this field contains the
actual size of the requested information. If the initial size of the
buffer is not large enough to contain all of the information, the \fBgpfs_getacl()\fR invocation must be repeated with a
larger buffer.
.RE
.PP
.RS +3
\fBacl_version
\fR
.RE
.RS +9
This field contains the current version of the GPFS internal
representation of the ACL. On input to the \fBgpfs_getacl()\fR
subroutine, set this field to zero.
.RE
.PP
.RS +3
\fBacl_type
\fR
.RE
.RS +9
On input to the \fBgpfs_getacl()\fR subroutine,
set this field to either \fBGPFS_ACL_TYPE_ACCESS\fR or
\fBGPFS_ACL_TYPE_DEFAULT\fR, depending on which ACL is requested.
These constants are defined in the \fBgpfs.h\fR header file.
.RE
.PP
.RS +3
\fBacl_var_data
\fR
.RE
.RS +9
This field signifies the beginning of the remainder of the ACL
information.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
nodek I4@$        b  AAA             b      ./usr/share/man/man3/gpfs_open_inodescan.3 _fssn         $          $          .TH gpfs_open_inodescan 10/22/04
gpfs_open_inodescan() Subroutine
.SH "Name"
.PP
\fBgpfs_open_inodescan()\fR - Opens an inode scan of a file system
or snapshot.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
gpfs_iscan_t *gpfs_open_inodescan
               (gpfs_fssnap_handle_t *fssnapHandle,
               const gpfs_fssnap_id_t *prev_fssnapId,
               gpfs_ino_t *maxIno);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_open_inodescan()\fR subroutine opens a scan of the inodes in
the file system or snapshot identified by the \fBfssnapHandle\fR
parameter. The scan traverses all user files, directories and links in
the file system or snapshot. The scan begins with the user file with
the lowest inode number and returns the files in increasing order. The
\fBgpfs_seek_inode()\fR subroutine may be used to set the scan position to
an arbitrary inode. System files, such as the block allocation maps,
are omitted from the scan. The file system must be mounted to open an
inode scan.
.PP
To generate a full backup, invoke \fBgpfs_open_inodescan()\fR with NULL
for the \fBprev_fssnapId\fR parameter. Repeated invocations of \fBgpfs_next_inode()\fR then return inode information
about all existing user files, directories and links, in inode number
order.
.PP
To generate an incremental backup, invoke \fBgpfs_open_inodescan()\fR
with the \fBfssnapId\fR that was obtained from a \fBfssnapHandle\fR at
the time the previous backup was created. The snapshot that was used
for the previous backup does not need to exist at the time the incremental
backup is generated. That is, the backup application needs to remember
only the \fBfssnapId\fR of the previous backup; the snapshot itself
can be deleted as soon as the backup is completed.
.PP
For the incremental backup, any operation that changes the file's
\fBmtime\fR or \fBctime\fR causes the file to be included. Files
with no changes to the file's data or file attributes, other than a
change to \fBatime\fR, are omitted from the scan.
.PP
A full inode scan (\fBprev_fssnapId\fR set to NULL) does not return any
inodes of nonexistent or deleted files, but an incremental inode scan
(\fBprev_fssnapId\fR not NULL) does return inodes for files that have been
deleted since the previous snapshot. The inodes of deleted files have a
link count of zero.
.PP
If the snapshot indicated by \fBprev_fssnapId\fR is available, the
caller may benefit from the extended read subroutine, \fBgpfs_ireadx()\fR,
which returns only the changed blocks within the files. Without the
previous snapshot all blocks within the changed files are returned.
.PP
Once a full or incremental backup completes, the \fBnew_fssnapId\fR must
be saved in order to reuse it on a subsequent incremental backup. This
\fBfssnapId\fR must be provided to the \fBgpfs_open_inodescan()\fR
subroutine, as the \fBprev_fssnapId\fR input parameter.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfssnapHandle
\fR
.RE
.RS +9
File system snapshot handle.
.RE
.PP
.RS +3
\fBprev_fssnapId
\fR
.RE
.RS +9
Pointer to file system snapshot ID or NULL. If
\fBprev_fssnapId\fR is provided, the inode scan returns only the files that
have changed since the previous backup. If the pointer is NULL, the
inode scan returns all user files.
.RE
.PP
.RS +3
\fBmaxIno
\fR
.RE
.RS +9
Pointer to inode number or NULL. If provided,
\fBgpfs_open_inodescan()\fR returns the maximum inode number in the file
system or snapshot being scanned.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_open_inodescan()\fR subroutine is successful, it returns
a pointer to an inode scan handle.
.PP
If the \fBgpfs_open_inodescan()\fR subroutine is unsuccessful, it
returns a NULL pointer and the global error variable \fBerrno\fR is set to
indicate the nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEDOM
\fR
.RE
.RS +9
The file system snapshot ID passed for \fBprev_fssnapId\fR is from a
different file system.
.RE
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
Incorrect parameters.
.RE
.PP
.RS +3
\fBENOMEM
\fR
.RE
.RS +9
Unable to allocate memory for request.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_open_inodescan()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBEPERM
\fR
.RE
.RS +9
The caller does not have superuser privileges.
.RE
.PP
.RS +3
\fBERANGE
\fR
.RE
.RS +9
The \fBprev_fssnapId\fR parameter is the same as or more recent than
\fBsnapId\fR being scanned.
.RE
.PP
.RS +3
\fBESTALE
\fR
.RE
.RS +9
Cached file system information was not valid.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_FSSNAPHANDLE
\fR
.RE
.RS +9
The file system snapshot handle is not valid.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_FSSNAPID
\fR
.RE
.RS +9
The file system snapshot ID passed for \fBprev_fssnapId\fR is not
valid.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_open_inodescan()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsbackup.C\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
descank~( I4@$          wAwAwA                   ./usr/share/man/man3/gpfs_prealloc.3 can         $          $          .TH gpfs_prealloc 10/22/04
gpfs_prealloc() Subroutine
.SH "Name"
.PP
\fBgpfs_prealloc()\fR - Pre-allocates disk storage for a GPFS
file.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_prealloc(int fileDesc, offset_t StartOffset,
                 offset_t BytesToPrealloc)\ 
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_prealloc()\fR subroutine is used to preallocate disk storage
for a file that has already been opened, prior to writing data to the
file. The pre-allocated disk storage is started at the requested
offset, \fBStartOffset\fR, and covers at least the number of bytes
requested, \fBBytesToPrealloc\fR. Allocations are rounded to GPFS
sub-block boundaries.
.PP
The preallocation of disk space for a file provides an efficient method for
allocating storage without having to write any data. This can result in
faster I/O compared to a file which gains disk space incrementally as it
grows.
.PP
Existing data in the file is not modified. Reading any of the
pre-allocated blocks returns zeroes.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBfileDesc
\fR
.RE
.RS +9
An integer specifying the file descriptor returned by
\fBopen()\fR.
.PP
The file for which preallocation is to be performed must be opened for
writing.
.RE
.PP
.RS +3
\fBStartOffset
\fR
.RE
.RS +9
The byte offset into the file at which preallocation begins.
.RE
.PP
.RS +3
\fBBytesToPrealloc
\fR
.RE
.RS +9
The number of bytes to preallocate.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_prealloc()\fR subroutine is successful, it returns a
value of 0.
.PP
If the \fBgpfs_prealloc()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error. If \fBerrno\fR is set to one of the following,
some storage may have been pre-allocated:
.RS +3
.HP 3
\(bu EDQUOT
.HP 3
\(bu EFBIG
.HP 3
\(bu ENOSPC
.HP 3
\(bu ENOTREADY
.RE
.PP
The only way to tell how much space was actually pre-allocated is to invoke
the \fBstat()\fR subroutine and compare the reported file size and number
of blocks used with their values prior to preallocation.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEACCES
\fR
.RE
.RS +9
The file is not opened for writing.
.RE
.PP
.RS +3
\fBEBADF
\fR
.RE
.RS +9
The file descriptor is not valid.
.RE
.PP
.RS +3
\fBEDQUOT
\fR
.RE
.RS +9
A disk quota has been exceeded
.RE
.PP
.RS +3
\fBEFBIG
\fR
.RE
.RS +9
The file has become too large for the file system or has exceeded the file
size as defined by the user's \fBulimit\fR value.
.RE
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
The file descriptor does not refer to a GPFS file or a regular file;
a negative value was specified for \fBStartOffset\fR or
\fBBytesToPrealloc\fR.
.RE
.PP
.RS +3
\fBENOTREADY
\fR
.RE
.RS +9
The file system on which the file resides has become unavailable.
.RE
.PP
.RS +3
\fBENOSPC
\fR
.RE
.RS +9
The file system has run out of disk space.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_prealloc()\fR subroutine is not supported under the current
file system format.
.RE
.SH "Examples"
.sp
.nf
#include <stdio.h>
#include <fcntl.h>
#include <errno.h>
#include <gpfs.h>
int rc;
int fileHandle = -1;
char* fileNameP = "datafile";
offset_t startOffset = 0;
offset_t bytesToAllocate = 20*1024*1024;  /* 20 MB */
fileHandle = open(fileNameP, O_RDWR|O_CREAT, 0644);
if (fileHandle < 0)
 {
   perror(fileNameP);
   exit(1);
 }
rc = gpfs_prealloc(fileHandle, startOffset, bytesToAllocate);\ 
if (rc < 0)
 {
   fprintf(stderr, "Error %d preallocation at %lld for %lld in %s\\n",
             errno, startOffset, bytesToAllocate, fileNameP);
   exit(1);
 }\ 
\ 
.fi
.sp
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
k I4@$          wAwAwA                   ./usr/share/man/man3/gpfs_putacl.3 3 can         $          $          .TH gpfs_putacl 10/22/04
gpfs_putacl() Subroutine
.SH "Name"
.PP
\fBgpfs_putacl()\fR - Restores the access control information for
a GPFS file.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_putacl(char *pathname, int flags, void *aclP)\ 
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_putacl()\fR subroutine together with the \fBgpfs_getacl()\fR subroutine is intended for use by a
backup program to save (\fBgpfs_getacl()\fR) and
restore (\fBgpfs_putacl()\fR) the ACL information for the file.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. The use of \fBgpfs_fgetattrs()\fR and \fBgpfs_fputattrs()\fR is preferred.
.sp
.HP 3
2. You must have \fBwrite\fR access to the file.
.sp
.HP 3
3. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBpathname
\fR
.RE
.RS +9
Path name of the file for which the ACLs is to be set.
.RE
.PP
.RS +3
\fBflags
\fR
.RE
.RS +9
\fBRESERVED\fR. This value must be set to zero.
.RE
.PP
.RS +3
\fBaclP
\fR
.RE
.RS +9
Pointer to a buffer mapped by the structure \fBgpfs_opaque_acl_t\fR where the ACL data is
stored. This should be the result of a previous invocation of \fBgpfs_getacl()\fR.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_putacl()\fR subroutine is successful, it returns a value
of 0.
.PP
If the \fBgpfs_putacl()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_putacl()\fR subroutine is not supported under the current
file system format.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
k̜ I4@$          =xA=xA=xA                   ./usr/share/man/man3/gpfs_seek_inode.3 n         $          $          .TH gpfs_seek_inode 10/22/04
gpfs_seek_inode() Subroutine
.SH "Name"
.PP
\fBgpfs_seek_inode()\fR - Advances an inode scan to the specified inode
number.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_seek_inode(gpfs_iscan_t *iscan,\ 
                    gpfs_ino_t ino);
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_seek_inode()\fR subroutine advances an inode scan to the
specified inode number.
.PP
The \fBgpfs_seek_inode()\fR subroutine is used to start an inode scan at
some place other than the beginning of the inode file. This is useful
to restart a partially completed backup or an interrupted dump transfer to a
mirror. It could also be used to do an inode scan in parallel from
multiple nodes, by partitioning the inode number space into separate ranges
for each participating node. The maximum inode number is returned when
the scan was opened and each invocation to obtain the next inode specifies a
termination inode number to avoid returning the same inode more than
once.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBiscan
\fR
.RE
.RS +9
Pointer to the inode scan handle.
.RE
.PP
.RS +3
\fBino
\fR
.RE
.RS +9
The next inode number to be scanned.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_seek_inode()\fR subroutine is successful, it returns a
value of 0.
.PP
If the \fBgpfs_seek_inode()\fR subroutine is unsuccessful, it returns a
value of -1 and sets the global error variable \fBerrno\fR to indicate the
nature of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_seek_inode()\fR subroutine is not available.
.RE
.PP
.RS +3
\fBGPFS_E_INVAL_ISCAN
\fR
.RE
.RS +9
Incorrect parameters.
.RE
.SH "Examples"
.PP
For an example using \fBgpfs_seek_inode()\fR, see
\fB/usr/lpp/mmfs/samples/util/tsinode.c\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
y tok I4@$        Q  AAA             Q      ./usr/share/man/man3/gpfs_stat.3 ode.3 n         $          $          .TH gpfs_stat 10/22/04
gpfs_stat() Subroutine
.SH "Name"
.PP
\fBgpfs_stat()\fR - Returns exact file status for a GPFS
file.
.SH "Library"
.PP
GPFS Library (\fBlibgpfs.a\fR for AIX, \fBlibgpfs.so\fR
for Linux)
.SH "Synopsis"
.sp
.nf
#include <gpfs.h>
int gpfs_stat(char *pathName, struct stat64 *Buffer)\ 
.fi
.sp
.SH "Description"
.PP
The \fBgpfs_stat()\fR subroutine is used to obtain exact information
about the file named by the \fBpathName\fR parameter. This
subroutine is provided as an alternative to the \fBstat()\fR subroutine,
which may not provide exact \fBmtime\fR and \fBatime\fR values.
.PP
\fBread\fR, \fBwrite\fR, or \fBexecute\fR permission for the named
file is not required, but all directories listed in the path leading to the
file must be searchable. The file information is written to the area
specified by the \fBBuffer\fR parameter.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. Compile any program that uses this subroutine with the \fB-lgpfs\fR
flag from the following library:
.RS +3
.HP 3
\(bu \fBlibgpfs.a\fR for AIX
.HP 3
\(bu \fBlibgpfs.so\fR for Linux
.RE
.RE
.SH "Parameters"
.PP
.RS +3
\fBpathName
\fR
.RE
.RS +9
The path identifying the file for which exact status information is
requested.
.RE
.PP
.RS +3
\fBBuffer
\fR
.RE
.RS +9
A pointer to the \fBstat64\fR structure in which the information is
returned. The \fBstat64\fR structure is described in the
\fBsys/stat.h\fR file.
.RE
.SH "Exit status"
.PP
If the \fBgpfs_stat()\fR subroutine is successful, it returns a value of
0.
.PP
If the \fBgpfs_stat()\fR subroutine is unsuccessful, it returns a value
of -1 and sets the global error variable \fBerrno\fR to indicate the nature
of the error.
.SH "Exceptions"
.PP
None.
.SH "Error status"
.PP
.RS +3
\fBEBADF
\fR
.RE
.RS +9
The path name is not valid.
.RE
.PP
.RS +3
\fBEINVAL
\fR
.RE
.RS +9
The path name does not refer to a GPFS file or a regular file.
.RE
.PP
.RS +3
\fBENOSYS
\fR
.RE
.RS +9
The \fBgpfs_stat()\fR subroutine is not supported under the current
file system format.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/lib/libgpfs.a\fR for AIX,
\fB/usr/lpp/mmfs/lib/libgpfs.so\fR for Linux
.PP
,
\fB/ukl J4@$        5  AAA             5      ./usr/share/man/man5/mmsdrbackup.5 e.3 n         $          $          .TH mmsdrbackup 11/01/04
mmsdrbackup User exit
.SH "Name"
.PP
\fBmmsdrbackup\fR - Performs a backup of the GPFS configuration
data.
.SH "Description"
.PP
The \fB/var/mmfs/etc/mmsdrbackup\fR user exit, when properly installed
on the primary GPFS configuration server, will be asynchronously invoked every
time there is a change to the GPFS master configuration file. This user
exit can be used to create a backup of the GPFS configuration data.
.PP
Read the sample file
\fB/usr/lpp/mmfs/samples/mmsdrbackup.samples\fR for a detailed
description on how to code and install this user exit.
.SH "Parameters"
.PP
The generation number of the most recent version of the GPFS configuration
data.
.SH "Exit status"
.PP
The \fBmmsdrbackup\fR user exit should always returns a value of
zero.
.SH "Location"
.PP
\fB/var/mmfs/etc\fR
fiek~N J4@$          AAA                   ./usr/share/man/man5/nsddevices.5  e.3 n         $          $          .TH nsddevices 11/01/04
nsddevices User exit
.SH "Name"
.PP
\fBnsddevices\fR - Identifies local physical devices that are used
as GPFS Network Shared Disks (NSDs).
.SH "Description"
.PP
The \fB/var/mmfs/etc/nsddevices\fR user exit, when properly installed,
is invoked synchronously by the GPFS daemon during its disk discovery
processing. The purpose of this procedure is to discover and verify the
physical devices on each node that correspond to the disks previously defined
to GPFS with the \fBmmcrnsd\fR command. The
\fBnsddevices\fR user exit can be used to either replace or to supplement
the disk discovery procedure of the GPFS daemon.
.PP
Read the sample file
\fB/usr/lpp/mmfs/samples/nsddevices.samples\fR for a detailed
description on how to code and install this user exit.
.SH "Parameters"
.PP
None.
.SH "Exit status"
.PP
The \fBnsddevices\fR user exit should return either zero or one.
.PP
When the \fBnsddevices\fR user exit returns a value of zero, the GPFS
disk discovery procedure is bypassed.
.PP
When the \fBnsddevices\fR user exit returns a value of one, the GPFS
disk discovery procedure is performed and the results are concatenated with
the results from the \fBnsddevices\fR user exit.
.SH "Location"
.PP
\fB/var/mmfs/etc\fR
E
.RS +kL@ J4@$          AAA                   ./usr/share/man/man5/syncfsconfig.5 .3 n         $          $          .TH syncfsconfig 11/01/04
syncfsconfig User exit
.SH "Name"
.PP
\fBsyncfsconfig\fR - Keeps file system configuration data in
replicated clusters synchronized.
.SH "Description"
.PP
The \fB/var/mmfs/etc/syncfsconfig\fR user exit, when properly installed,
will be synchronously invoked after each command that may change the
configuration of a file system. Examples of such commands are: \fBmmadddisk\fR, \fBmmdeldisk\fR, \fBmmdeldisk\fR, and so forth. The
\fBsyncfsconfig\fR user exit can be used to keep the file system
configuration data in replicated GPFS clusters automatically
synchronized.
.PP
Read the sample file
\fB/usr/lpp/mmfs/samples/syncfsconfig.samples\fR for a detailed
description on how to code and install this user exit.
.SH "Parameters"
.PP
None.
.SH "Exit status"
.PP
The \fBsyncfsconfig\fR user exit should always returns a value of
zero.
.SH "Location"
.PP
\fB/var/mmfs/etc\fR
.PP

k| J4@$        $  AAA             $      ./usr/share/man/man8/mmadddisk.8 .5 .3 n         $          $          .TH mmadddisk 11/01/04
mmadddisk Command
.SH "Name"
.PP
\fBmmadddisk\fR - Adds disks to a GPFS file system.
.SH "Synopsis"
.PP
\fBmmadddisk\fR \fIDevice \fR
{"\fIDiskDesc\fR[;\fIDiskDesc\fR...]"
| \fB-F\fR \fIDescFile\fR} [\fB-a\fR]
[\fB-r\fR] [\fB-v\fR {\fB\fIyes\fR\fR | \fBno\fR}
] [\fB-N\fR {\fB\fIall\fR\fR | \fBmount\fR |
\fINodeName\fR[,\fINodeName\fR...]}
]
.SH "Description"
.PP
Use the \fBmmadddisk\fR command to add disks to a GPFS file
system. This command optionally rebalances an existing file system
after adding disks when the \fB-r\fR flag is specified. The
\fBmmadddisk\fR command does not require the file system to be unmounted
before issuing the command. The file system can be in use while the
command is run.
.PP
\fIDevice\fR must be the first parameter.
.PP
The \fB-N\fR parameter can be used only in conjunction with the
\fB-r\fR option.
.PP
To add disks to a GPFS file system, you first must decide if you will:
.RS +3
.HP 3
1. Create new disks using the \fBmmcrnsd\fR
command. 
.sp
You should also decide whether to use the rewritten disk descriptor file
produced by the \fBmmcrnsd\fR command, or create a
new list of disk descriptors. When using the rewritten file, the
\fIDisk Usage\fR and \fIFailure Group\fR specifications will remain the
same as specified on the \fBmmcrnsd\fR
command.
.HP 3
2. Select disks no longer in use in any file system. Issue the \fBmmlsnsd -F\fR command to display the available
disks.
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to which the disks are added.
File system names need not be fully-qualified. \fBfs0\fR is as
acceptable as \fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.PP
.RS +3
\fB\fIDiskDesc\fR
\fR
.RE
.RS +9
A descriptor for each disk to be added. Each descriptor is
delimited by a semicolon (;) and the entire list must be enclosed in
quotation marks (' or "). 
.PP
The current maximum number of disk descriptors that can be defined
for any single file system is 2048. The actual number of disks in your
file system may be constrained by products other than GPFS which you have
installed. Refer to the individual product documentation.
.PP
A disk descriptor is defined as (second and third fields reserved)
.sp
.nf
DiskName:::DiskUsage:FailureGroup
.fi
.sp
.PP
.RS +3
\fB\fIDiskName\fR
\fR
.RE
.RS +9
.PP
You must specify the name of the NSD previously created by the \fBmmcrnsd\fR command. For a list of available
disks, issue the \fBmmlsnsd -F\fR command.
.RE
.PP
.RS +3
\fB\fIDiskUsage\fR
\fR
.RE
.RS +9
Specify a disk usage or accept the default: 
.PP
.RS +3
\fBdataAndMetadata
\fR
.RE
.RS +9
Indicates that the disk contains both data and metadata. This is
the default.
.RE
.PP
.RS +3
\fBdataOnly
\fR
.RE
.RS +9
Indicates that the disk contains data and does not contain
metadata.
.RE
.PP
.RS +3
\fBmetadataOnly
\fR
.RE
.RS +9
Indicates that the disk contains metadata and does not contain
data.
.RE
.PP
.RS +3
\fBdescOnly
\fR
.RE
.RS +9
Indicates that the disk contains no data and no metadata. Such a
disk is used solely to keep a copy of the file system descriptor, and can be
used as a third failure group in certain disaster recovery
configurations. 
.RE
.RE
.PP
.RS +3
\fB\fIFailureGroup\fR
\fR
.RE
.RS +9
A number identifying the failure group to which this disk
belongs. You can specify any value from -1 (where -1 indicates that the
disk has no point of failure in common with any other disk) to 4000. If
you do not specify a failure group, the value defaults to the NSD primary
server node number plus 4000. If an NSD server node is not specified,
the value defaults to -1. GPFS uses this information during data and
metadata placement to assure that no two replicas of the same block are
written in such a way as to become unavailable due to a single failure.
All disks that are attached to the same NSD server or adapter should be placed
in the same failure group.
.RE
.RE
.PP
.RS +3
\fB-F \fIDescFile\fR
\fR
.RE
.RS +9
Specifies a file containing a list of disk descriptors, one per
line. You may use the rewritten \fIDiskDesc\fR file created by the
\fBmmcrnsd\fR command or create your own file.
When using the \fIDiskDesc\fR file created by the \fBmmcrnsd\fR command, the values supplied on input to
the command for \fIDisk Usage\fR and \fIFailureGroup \fR are
used. When creating your own file, you must specify these values or
accept the system defaults. A sample file can be found in
\fB/usr/lpp/mmfs/samples/diskdesc\fR.
.RE
.PP
.RS +3
\fB-N { \fB\fIall\fR\fR | \fBmount\fR  |
\fINodeName\fR[,\fINodeName\fR...] }
\fR
.RE
.RS +9
Specifies the nodes that are to participate in the restripe of the file
system after the specified disks have been made available for use by
GPFS. This parameter can be used only in conjunction with the \fB-r
\fR option.
.PP
Valid values are:
.PP
.RS +3
\fBall
\fR
.RE
.RS +9
Indicates that all nodes in the GPFS cluster, whether they have the
file system mounted or not, are to participate in the restripe of the file
system. This is the default when the \fB-N\fR option has not been
specified.
.RE
.PP
.RS +3
\fBmount
\fR
.RE
.RS +9
Indicates that only the nodes that have the file system mounted are to
participate in the restripe of the file system.
.RE
.PP
.RS +3
\fB\fINodeName\fR[,\fINodeName\fR...]
\fR
.RE
.RS +9
A comma-separated list of nodes to participate in the restripe of the file
system.
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.RE
.SH "Options"
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
Specifies whether the \fBmmadddisk\fR command waits for rebalancing to
complete before returning. If this flag is specified, the
\fBmmadddisk\fR command runs asynchronously and returns after the file
system descriptor is updated and the rebalancing scan is started, but it does
not wait for rebalancing to finish. If no rebalancing is requested (the
\fB-r\fR flag not specified), this option has no effect.
.RE
.PP
.RS +3
\fB-r
\fR
.RE
.RS +9
Rebalance all existing files in the file system to make use of new
disks.
.RS +3
\fBNote:\fR
.RE
.RS +9
Rebalancing of files is an I/O intensive and time consuming
operation, and is important only for file systems with large files that are
mostly invariant. In many cases, normal file update and creation will
rebalance your file system over time, without the cost of the
rebalancing.
.RE
.RE
.PP
.RS +3
\fB-v {\fB\fIyes\fR\fR | no}
\fR
.RE
.RS +9
Verify that specified disks do not belong to an existing file
system. The default is \fB-v yes\fR. Specify \fB-v no\fR
only when you want to reuse disks that are no longer needed for an existing
file system. If the command is interrupted for any reason, you must use
the \fB-v no\fR option on the next invocation of the command.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmadddisk\fR command.
.PP
You may issue the \fBmmadddisk\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on the \fBmmcrcluster\fR or the
\fBmmchcluster\fR command, you must ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To add the disk \fBgpfs2nsd\fR to the file system \fBfs1\fR, use it
for data only, have it belong to failure group 3, and rebalance the existing
files after it is added, enter:
.sp
.nf
mmadddisk fs1 gpfs2nsd:::dataOnly:3 -r
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
GPFS: 6027-531 The following disks of fs1 will be formatted on\ 
node k145n01:  gpfs2nsd: size 4390912 KB
Extending Allocation Map
GPFS: 6027-1503 Completed adding disks to file system fs1.
mmadddisk: 6027-1371 Propagating the changes to all affected nodes.
This is an asynchronous process.
Restriping fs1 ...
GPFS: 6027-589 Scanning file system metadata, phase 1 ...\ 
  70 % complete on Wed Aug 16 15:14:28 2000
 100 % complete on Wed Aug 16 15:14:29 2000
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-589 Scanning file system metadata, phase 2 ...\ 
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-589 Scanning file system metadata, phase 3 ...\ 
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-565 Scanning user file metadata ...
   6 % complete on Wed Aug 16 15:14:45 2000
 100 % complete on Wed Aug 16 15:14:46 2000
GPFS: 6027-552 Scan completed successfully.
Done
.fi
.sp
.SH "See also"
.PP
mmchdisk Command
.PP
mmcrnsd Command
.PP
mmdeldisk Command
.PP
mmlsdisk Command
.PP
mmlsnsd Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
eNamek  J4@$          AAA                   ./usr/share/man/man8/mmaddnode.8 .5 .3 n         $          $          .TH mmaddnode 11/01/04
mmaddnode Command
.SH "Name"
.PP
\fBmmaddnode\fR Adds nodes to a GPFS cluster.
.SH "Synopsis"
.PP
\fBmmaddnode\fR {\fB-n\fR \fINodeFile\fR |
\fINodeDesc\fR[,\fINodeDesc\fR...]}
.SH "Description"
.PP
Use the \fBmmaddnode\fR command to add nodes to an existing GPFS
cluster. On each new node, a mount point directory and character mode
device is created for each GPFS file system.
.PP
You must follow these rules when adding nodes to a GPFS
cluster:
.RS +3
.HP 3
\(bu You may issue the command only from a node that already belongs to the
GPFS cluster.
.HP 3
\(bu While a node may mount file systems from multiple clusters, the node
itself may only be added to a single cluster using the \fBmmcrcluster\fR or
\fBmmaddnode\fR command.
.HP 3
\(bu The nodes must be available for the command to be successful. If
any of the nodes listed are not available when the command is issued, a
message listing those nodes is displayed. You must correct the problem
on each node and reissue the command to add those nodes.
.RE
.SH "Parameters"
.PP
.RS +3
\fB-n \fINodeFile\fR
\fR
.RE
.RS +9
Specifies the file containing the list of node descriptors (see
below), one per line, to be added to the cluster.
.RE
.PP
.RS +3
\fB\fINodeDesc\fR[,\fINodeDesc\fR...]
\fR
.RE
.RS +9
Specifies the list of nodes and node designations to be added to the
GPFS cluster. Node descriptors are defined as:
.sp
.nf
NodeName:NodeDesignations
.fi
.sp
.PP
where:
.RS +3
.HP 3
1. \fBNodeName\fR is the hostname or IP address to be used by GPFS for
node to node communication.
.sp
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.HP 3
2. \fBNodeDesignations\fR is an optional, '-' separated
list of node roles.
.RS +3
.HP 3
\(bu \fBmanager\fR | \fB\fIclient\fR\fR Indicates whether a node is part of
the pool of nodes from which configuration and file system managers are
selected. The default is \fBclient\fR.
.HP 3
\(bu \fBquorum\fR | \fB\fInonquorum\fR\fR Indicates whether a node is
counted as a quorum node. The default is \fBnonquorum\fR.
.RE
.RE
.PP
You must provide a descriptor for each node to be added to the GPFS
cluster.
.RE
.SH "Options"
.PP
NONE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmaddnode\fR command.
.PP
You may issue the \fBmmaddnode\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.PP
\fBExamples\fR
.PP
To add nodes \fBk164n06\fR and \fBk164n07\fR, designating
\fBk164n06\fR to be available as a \fBmanager\fR node, enter:
.sp
.nf
 mmaddnode k164n06:manager,k164n07
.fi
.sp
.PP
To confirm the addition, enter:
.sp
.nf
mmlscluster
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
GPFS cluster information
========================
  GPFS cluster name:         cluster1.kgn.ibm.com
  GPFS cluster id:           680681562214606028
  GPFS UID domain:           cluster1.kgn.ibm.com
  Remote shell command:      /usr/bin/rsh
  Remote file copy command:  /usr/bin/rcp
GPFS cluster configuration servers:
-------------------------------------
  Primary server:    k164n05.kgn.ibm.com
  Secondary server:  k164n04.kgn.ibm.com
 Node number  Node name  IP address     Full node name      Remarks
----------------------------------------------------------------------
       1      k164n04   198.117.68.68  k164n04.kgn.ibm.com quorum node
       2      k164n05   198.117.68.69  k164n05.kgn.ibm.com quorum node
       3      k164n06   198.117.68.70  k164n06.kgn.ibm.com
       4      k164n07   198.117.68.71  k164n07.kgn.ibm.com\ 
.fi
.sp
.SH "See also"
.PP
mmchconfig Command
.PP
mmcrcluster Command
.PP
mmchcluster Command
.PP
mmdelnode Command
.PP
mmlscluster Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
d tkꄒ z4@$        {  8B8B8B             {      ./usr/share/man/man8/mmauth.8 .8         $          $          .TH mmauth 05/20/05
mmauth Command
.SH "Name"
.PP
\fBmmauth\fR - Manages secure access to GPFS file systems.
.SH "Synopsis"
.PP
\fBmmauth\fR \fBadd\fR \fIremoteClusterName\fR \fB-k\fR
\fIkeyFile\fR
.PP
Or,
.PP
\fBmmauth\fR \fBdelete\fR \fB{\fR\fIremoteClusterName\fR \fB|
all }\fR
.PP
Or,
.PP
\fBmmauth\fR \fBdeny\fR \fB{\fR\fIremoteClusterName\fR
\fB| all }\fR \fB-f {\fR \fIDevice\fR \fB| all }\fR
.PP
Or,
.PP
\fBmmauth\fR \fBgenkey\fR
.PP
Or,
.PP
\fBmmauth\fR \fBgrant \fR \fB{\fR\fIremoteClusterName\fR
\fB| all }\fR \fB-f {\fR \fIDevice\fR \fB| all }\fR [\fB-a { \fB\fIrw\fR\fR | ro }\fR ]
.PP
Or,
.PP
\fBmmauth\fR \fBshow\fR [\fIremoteClusterName\fR \fB|
all\fR]
.PP
Or,
.PP
\fBmmauth\fR \fBupdate\fR \fIremoteClusterName\fR \fB-k\fR
\fIkeyFile\fR
.SH "Description"
.PP
The \fBmmauth\fR command prepares a cluster to grant secure access to
file systems owned locally. The \fBmmauth\fR command also prepares a
cluster to receive secure access to file systems owned by another
cluster. Use the \fBmmauth\fR command to generate a public/private
key pair for the local cluster. A public/private key pair must be
generated on both the cluster owning the file system and the cluster desiring
access to the file system. The administrators of the clusters are
responsible for exchanging the public portion of the public/private key
pair. Use the \fBmmauth\fR command to add or delete permission for a
cluster to mount file systems owned by the local cluster.
.PP
When a cluster generates a new public/private key pair, administrators of
clusters participating in remote file system mounts are responsible for
exchanging their respective key file \fB/var/mmfs/ssl/id_rsa.pub\fR
generated by this command.
.PP
The administrator of a cluster desiring to mount a file system from another
cluster must provide the received key file as input to the \fBmmremotecluster\fR command. The administrator
of a cluster allowing another cluster to mount a file system must provide the
received key file to the \fBmmauth\fR command.
.PP
The keyword appearing after \fBmmauth\fR determines which action is
performed:
.PP
.RS +3
\fBadd
\fR
.RE
.RS +9
Adds a cluster and its associated public key to the list of clusters
authorized to connect to this cluster for the purpose of mounting file systems
owned by this cluster.
.RE
.PP
.RS +3
\fBdelete
\fR
.RE
.RS +9
Deletes a cluster and its associated public key from the list of clusters
authorized to mount file systems owned by this cluster.
.RE
.PP
.RS +3
\fBdeny
\fR
.RE
.RS +9
Denies a cluster the authority to mount a specific file system owned by
this cluster.
.RE
.PP
.RS +3
\fBgenkey
\fR
.RE
.RS +9
Generates a new public/private key pair for this cluster. The
key pair is placed in \fB/var/mmfs/ssl\fR. This must be done at
least once before \fBcipherList\fR, the GPFS configuration parameter that
enables GPFS with OpenSSL, is set. For this option to succeed, GPFS
must be inactive on all nodes in the cluster.
.RE
.PP
.RS +3
\fBgrant
\fR
.RE
.RS +9
Allows a cluster to mount a specific file system owned by this
cluster.
.RE
.PP
.RS +3
\fBshow
\fR
.RE
.RS +9
Shows the list of clusters authorized to mount file system owned by this
cluster. This authorization list is enforced only when
\fBcipherList\fR, the GPFS configuration parameter that enables GPFS with
OpenSSL, is set.
.RE
.PP
.RS +3
\fBupdate
\fR
.RE
.RS +9
Updates the public key associated with a cluster authorized to mount file
systems owned by this cluster.
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIremoteClusterName\fR
\fR
.RE
.RS +9
Specifies the remote cluster name requesting access to local GPFS file
systems. The value \fBall\fR indicates all remote clusters defined
to the local cluster.
.RE
.SH "Options"
.PP
.RS +3
\fB-a { \fB\fIrw\fR\fR | ro }
\fR
.RE
.RS +9
The type of access allowed:
.PP
.RS +3
\fBro
\fR
.RE
.RS +9
Specifies read-only access.
.RE
.PP
.RS +3
\fBrw
\fR
.RE
.RS +9
Specifies read/write access. This is the default.
.RE
.RE
.PP
.RS +3
\fB-f \fIDevice \fR
\fR
.RE
.RS +9
The device name for a file system owned by this cluster. The
\fIDevice \fR argument is required. If \fBall\fR is specified,
the command applies to all file systems owned by this cluster at the time that
the command is issued.
.RE
.PP
.RS +3
\fB-k \fIkeyFile\fR
\fR
.RE
.RS +9
Specifies the public key file generated by the \fBmmauth\fR command in
the cluster that wishes to remotely mount the local GPFS file system.
The \fIkeyFile\fR argument is required.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion. After a successful completion of the
\fBmmauth\fR command, the configuration change request will have been
propagated to all nodes in the cluster.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmauth\fR command.
.PP
You may issue the \fBmmauth\fR command from any node in the GPFS
cluster.
.SH "Examples"
.RS +3
.HP 3
1. This is an example of an \fB mmauth genkey\fR command:
.sp
.nf
 mmauth genkey
.fi
.sp
The output is similar to this:
.sp
.nf
Verifying GPFS is stopped on all nodes ...
Generating RSA private key, 512 bit long modulus
mmauth: Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.HP 3
2. This is an example of an \fB mmauth add\fR command:
.sp
.nf
mmauth add clustA.kgn.ibm.com -k /u/admin/keys/clustA.pub\ 
.fi
.sp
The output is similar to this:
.sp
.nf
mmauth: Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.HP 3
3. This is an example of an \fB mmauth update\fR command:
.sp
.nf
mmauth update clustA.kgn.ibm.com -k /u/admin/keys/clustA_new.pub\ 
.fi
.sp
The output is similar to this:
.sp
.nf
mmauth: Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.HP 3
4. This is an example of an \fBmmauth grant\fR command:
.sp
.nf
mmauth grant clustA.kgn.ibm.com -f /dev/gpfs1 -a ro
.fi
.sp
The output is similar to this:
.sp
.nf
mmauth:Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.HP 3
5. This is an example of an \fB mmauth show\fR command:
.sp
.nf
mmauth show all
.fi
.sp
The output is similar to this:
.sp
.nf
Cluster name:        clustA.kgn.ibm.com
Cipher list:         AUTHONLY
SHA digest:          a3917c8282fca7a27d951566940768dcd241902b
File system access:  gpfs1 (ro)\ 
.fi
.sp
.sp
.nf
Cluster name:        clustB.kgn.ibm.com (this cluster)
Cipher list:         AUTHONLY
SHA digest:          6ba5e3c1038246fe30f3fc8c1181fbb2130d7a8a
File system access:  (all rw)
.fi
.sp
.HP 3
6. This is an example of an \fBmmauth deny\fR command: 
.sp
.nf
mmauth deny clustA.kgn.ibm.com -f all
.fi
.sp
The output is similar to this:
.sp
.nf
mmauth:Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.HP 3
7. This is an example of an \fB mmauth delete\fR command:
.sp
.nf
mmauth delete all
.fi
.sp
The output is similar to this:
.sp
.nf
mmauth: Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.RE
.SH "See also"
.PP
mmremotefs Command
.PP
mmremotecluster Command
.PP
\fIAccessing GPFS file systems from other GPFS clusters\fR in \fIGeneral Parallel File System:
Administration Guide\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
tion k.] $J4@$          AAA                   ./usr/share/man/man8/mmbackup.8          $          $          .TH mmbackup 11/01/04
mmbackup Command
.SH "Name"
.PP
\fBmmbackup\fR - Backs up a GPFS file system to a backup
server.
.SH "Synopsis"
.PP
\fBmmbackup\fR \fIDevice\fR \fB-n\fR \fIControlFile\fR
[\fB-t\fR {\fBfull\fR | \fB\fIincremental\fR\fR}]
.PP
Or,
.PP
\fBmmbackup\fR \fIDevice\fR \fB-R\fR
.SH "Description"
.PP
Use the \fBmmbackup\fR command to backup a GPFS file system to a
backup server. The command may be issued from any GPFS node in the
cluster to which the file system being backed up belongs, and on which the
file system is mounted.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name for the file system to be backed up. This must be
the first parameter and must be fully-qualified, such as
\fB/dev/fs0\fR. The device name must be specified; there is no
default value.
.RE
.PP
.RS +3
\fB-n \fIControlFile\fR
\fR
.RE
.RS +9
Specifies the file containing the backup control information. The
file must be in the present working directory or the path must be fully
qualified. Each piece of control information must be on a separate line
and correctly qualified. Comment lines are allowed and must begin with
a \fB#\fR in column 1. Empty lines may not contain any blank
characters. Valid lines either contain a \fB#\fR in column 1
indicating a comment, an \fB=\fR indicating a value is being set, or no
characters at all.
.PP
This option may be specified only if the backup type is \fBfull\fR or
\fBincremental\fR. If the \fB-R\fR option has been specified,
this information is obtained from the control information specified on the
earlier full or incremental \fBmmbackup\fR command that completed with
partial success.
.PP
The allowable qualifiers in the control file are:
.PP
.RS +3
\fB\fIserverName\fR
\fR
.RE
.RS +9
The name of the node specified as the backup server qualified with
\fBserverName=\fR. The backup server node may or may not be a GPFS
node, although performance may be improved if it is also a backup client
node. You may specify only one backup server.
.RE
.PP
.RS +3
\fB\fIclientName\fR
\fR
.RE
.RS +9
The backup clients, one per line, qualified with
\fBclientName=\fR. The backup client nodes must be a member of the
GPFS cluster where the file system is mounted. For improved performance
it is suggested that multiple backup client nodes be specified. The
maximum number of backup clients supported is 32.
.RE
.PP
.RS +3
\fB\fInumberOfProcessesPerClient\fR
\fR
.RE
.RS +9
The number of processes per client qualified with
\fBnumberOfProcessesPerClient=\fR. The number of processes per
client may be specified only once.
.RE
.RE
.SH "Options"
.PP
.RS +3
\fB-R
\fR
.RE
.RS +9
Specifies to resume the previous backup that failed with a return code of
1 (partial success). If the previous backup failed with a return code
of 2 or succeeded with a return code of 0, this option does not succeed and a
new full or incremental backup must be initiated.
.RE
.PP
.RS +3
\fB-t {\fBfull\fR | \fB\fIincremental\fR\fR}
\fR
.RE
.RS +9
Specifies whether to perform a full backup of all of the files in the file
system, or an incremental backup of only those files that have changed since
the last backup was performed. The default is to perform an incremental
backup. The default is \fB-t incremental\fR.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
Partially successful completion. Not all of the eligible files were
successfully backed up. The command may be resumed by specifying the
\fB-R\fR option.
.RE
.PP
.RS +3
\fB2
\fR
.RE
.RS +9
A failure occurred that cannot be corrected by resuming the backup.
A new backup must be initiated.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmbackup\fR command.
.PP
You may issue the \fBmmbackup\fR command from any node in the
cluster where the file system being backed up is mounted.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly-configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
These examples use a control file named \fBinputctrl32\fR, which
specifies a backup server, three backup clients, and two processes per client
as shown here:
.sp
.nf
# backup server
serverName=k145n06.kgn.ibm.com
# backup clients
clientName=k14n04.kgn.ibm.com
clientName=k14n05.kgn.ibm.com
clientName=k14n06.kgn.ibm.com
# number of processes per client
numberOfProcessesPerClient=2
.fi
.sp
.RS +3
.HP 3
1. To perform a full backup of the file system \fB/dev/fs0\fR from node
\fBk145n04\fR, enter:
.sp
.nf
mmbackup /dev/fs0 -n inputctrl32 -t full
.fi
.sp
The system displays information similar to:
.sp
.nf
Tue Mar 18 14:03:25 est 2003 mmbackup is still running ...
Tue Mar 18 14:05:55 est 2003 mmbackup is still running ...
tsbackup: full backup finished with complete successs, rc = 0
mmbackup: Command successfully completed
.fi
.sp
.HP 3
2. To perform an incremental backup of the file system \fB/dev/fs0\fR from
node \fBk145n04\fR, enter:
.sp
.nf
mmbackup /dev/fs0 -n inputctrl32 -t incremental
.fi
.sp
The system displays information similar to:
.sp
.nf
Tue Mar 18 14:16:15 est 2003 mmbackup is still running ...
tsbackup: incremental backup finished with complete success,
rc = 0
mmbackup: Command successfully completed
.fi
.sp
.HP 3
3. In an unsuccessful attempt to perform a full backup of the file system
\fB/dev/fs0\fR from node \fBk145n04\fR, where the user had
entered:
.sp
.nf
mmbackup /dev/fs0 -n inputctrl32 -t full
.fi
.sp
the system displays information similar to:
.sp
.nf
k145n04.kgn.ibm.com: Cannot open /fs0/.mmbuTrans2.
Process 2 on client k145n06 failed in processing its list
of files.
k145n04.kgn.ibm.com: Cannot open /fs0/.mmbuTrans5.
Process 5 on client k145n06 failed in processing its list
of files.
tsbackup: full backup finished with partial success, rc = 1
mmbackup: 6027-1639 Command failed. Examine previous error
messages to determine the cause.
          
.fi
.sp
.HP 3
4. To resume the job unsuccessfully completed in example 3, enter:
.sp
.nf
mmbackup /dev/fs0 -R
tsbackup: resume of full backup finished with complete success,
          rc=0
mmbackup: Command successfully completed
.fi
.sp
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
esses k( &J4@$        O  AAA             O      ./usr/share/man/man8/mmchcluster.8  .3 n         $          $          .TH mmchcluster 11/01/04
mmchcluster Command
.SH "Name"
.PP
\fBmmchcluster\fR - Changes GPFS cluster configuration
data.
.SH "Synopsis"
.PP
\fBmmchcluster\fR {[\fB-p\fR \fIPrimaryServer\fR ]
[\fB-s\fR \fISecondaryServer\fR]}
.PP
Or,
.PP
\fBmmchcluster\fR \fB-p\fR \fBLATEST\fR
.PP
Or,
.PP
\fBmmchcluster\fR {[\fB-r\fR \fIRemoteShellCommand\fR]
[\fB-R\fR \fIRemoteFileCopyCommand\fR]}
.PP
Or,
.PP
\fBmmchcluster\fR \fB-C\fR \fIClusterName\fR
.SH "Description"
.PP
The \fBmmchcluster\fR command serves several purposes: 
.RS +3
.HP 3
1. Change the primary or secondary GPFS cluster configuration server.
.HP 3
2. Synchronize the primary GPFS cluster configuration server.
.HP 3
3. Change the remote shell and remote file copy programs to be used by the
nodes in the cluster.
.HP 3
4. Change the cluster name.
.RE
.PP
To display current system information for the cluster, issue the \fBmmlscluster\fR command.
.PP
You must follow these rules when issuing the \fBmmchcluster\fR
command with the \fB-p\fR or \fB-s\fR flags:
.RS +3
.HP 3
\(bu The node must be available for the command to be successful. If any
of the nodes listed are not available when the command is issued, a message
listing those nodes is displayed. You must correct the problem on each
node and reissue the command.
.HP 3
\(bu The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.PP
\fBAttention:\fR The \fBmmchcluster\fR command, when issued with
either the \fB-p\fR or \fB-s\fR option, is designed to operate in an
environment where the current primary and secondary cluster configuration
servers are \fInot\fR available. As a result, the command can run
without obtaining its regular serialization locks. To assure smooth
transition to a new cluster configuration server, no other GPFS commands
(\fBmm\fR commands) should be running when the command is issued nor should
any other command be issued until the \fBmmchcluster\fR command has
successfully completed.
.SH "Parameters"
.PP
.RS +3
\fB\fB-C\fR \fIClusterName\fR
\fR
.RE
.RS +9
Specifies a new name for the cluster. If the user-provided name
contains dots, it is assumed to be a fully qualified domain name.
Otherwise, to make the cluster name unique, the domain of the primary
configuration server will be appended to the user-provided name. 
.PP
Since each cluster is managed independently, there is no automatic
coordination and propagation of changes between clusters like there is between
the nodes within a cluster. This means that if you change the name of
the cluster, you should notify the administrators of all other GPFS clusters
that can mount your file systems so that they can update their own
environments. See the \fBmmauth\fR, \fBmmremotecluster\fR, and
\fBmmremotefs\fR commands.
.RE
.PP
.RS +3
\fB-p \fIPrimaryServer\fR
\fR
.RE
.RS +9
Change the primary server node for the GPFS cluster data.
.PP
\fBLATEST\fR - Synchronize all of the nodes in the GPFS cluster
ensuring they are using the most recently specified primary GPFS cluster
configuration server. If an invocation of the \fBmmchcluster\fR
command fails, you are prompted to reissue the command and specify
\fBLATEST\fR on the \fB-p\fR option to synchronize all of the nodes in
the GPFS cluster. Synchronization provides for all nodes in the GPFS
cluster to use the most recently specified primary GPFS cluster configuration
server.
.RE
.PP
.RS +3
\fB-s \fISecondaryServer\fR
\fR
.RE
.RS +9
Change the secondary server node for the GPFS cluster data. To
remove the secondary GPFS server and continue operating without it, specify a
null string, \fB''\fR, as the parameter.
.RE
.SH "Options"
.PP
.RS +3
\fB-R \fIRemoteFileCopy\fR
\fR
.RE
.RS +9
Specifies the fully-qualified path name for the remote file copy program
to be used by GPFS.
.PP
The remote copy command must adhere to the same syntax format as the
\fBrcp\fR command, but may implement an alternate authentication
mechanism.
.RE
.PP
.RS +3
\fB-r \fIRemoteShellCommand\fR
\fR
.RE
.RS +9
Specifies the fully-qualified path name for the remote shell program to be
used by GPFS.
.PP
The remote shell command must adhere to the same syntax format as the
\fBrsh\fR command, but may implement an alternate authentication
mechanism.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmchcluster\fR
command.
.PP
You may issue the \fBmmchcluster\fR command from any node in the GPFS
cluster.
.PP
A properly configured \fB.rhosts\fR file must exist in the root
user's home directory on each node in the GPFS cluster. If you
have designated the use of a different remote communication program on either
the \fBmmcrcluster\fR or the \fBmmchcluster\fR command, you must ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To change the primary GPFS server for the cluster, enter:
.sp
.nf
mmchcluster -p k164n06
.fi
.sp
.PP
The system displays output similar to:
.sp
.nf
mmchcluster -p k164n06
mmchcluster: Command successfully completed
.fi
.sp
.PP
To confirm the change, enter:
.sp
.nf
mmlscluster
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
GPFS cluster information
========================
  GPFS cluster name:         cluster1.kgn.ibm.com
  GPFS cluster id:           680681562214606028
  GPFS UID domain:           cluster1.kgn.ibm.com
  Remote shell command:      /usr/bin/rsh
  Remote file copy command:  /usr/bin/rcp
GPFS cluster configuration servers:
-------------------------------------
  Primary server:    k164n06.kgn.ibm.com
  Secondary server:  k164n05.kgn.ibm.com
 Node number Node name IP address     Full node name      Remarks
---------------------------------------------------------------------
       1     k164n04   198.117.68.68  k164n04.kgn.ibm.com quorum node
       2     k164n05   198.117.68.69  k164n05.kgn.ibm.com quorum node
       3     k164n06   198.117.68.70  k164n06.kgn.ibm.com\ 
.fi
.sp
.SH "See also"
.PP
mmaddnode Command
.PP
mmcrcluster Command
.PP
mmdelnode Command
.PP
mmlscluster Command
.PP
mmremotecluster Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
rl8H J4@$        B  mBmBmB             Q)      ./usr/share/man/man8/mmchconfig.8   .3 n         $          $            	  aeinot
dfhlmrsu.PR\cp,-BSbgvyEFITkw+0139ACDGNx"(/2456:HMOqz)8KLW'=UY[]~7;V_|*X 4a3-47(a3-47A%IH0ZJx=Qce"HXfZhkYg6,(I9i2CKIG-!얒KI@ [CZJ B:-%A p-"i+BCI@eZCXi+ZCX	C$ H`K!(d  ,(B\chݬISH0ZJcTXB2C^<n?e/uXϼDN߉5x28ߪ79{ZA$yin?d 0t-ZJ\wcݼdleh I)#;зi+꧇Hj9i
 (-%9ˍEXl6;-ftSH1. č-%tŠf_T5DS^з˼6{(#2,_:ZD;)#yi	b$%W6o{꧖BlҸV26QS.=$OLNbGJ='&RG ТWx `H9pbՍ3UH@LbP#%ZEdŖ}T1Awt2Meg |9 i<ra%S~:oO|G-"nbI]vlIAMvr x0J$:oO|G-"1@'2R r	$QR\ZJ9pO3-45퍬N5WD$,Y[ݲ@7a  NAB$r(MH].KIG-%AB$qfɔx3/bovODG˞lD}1_U'3-45퍬b˼jǑkU-P-%tTIi+شzwqe&>y.ɛkBŞAAG NAB$r(Ii("BP-%A rQ((/As1.avOD2Ro1e2_1QcdsZoO-"i+p770%2KI_ţӻ)5(STXwL_,㮝6w"]ɔes6])7O/"]NA I'T(MW|Z9i	b$%Weht8ߪPcBPZE"6^f{-%R%6k, RPN?KIB9iAcCiPHdBάL~r)09i(AH@L..|-%S &/cAb#F71e--"lށ,u1. IJ)#K}Tc0Ce{S-U'3Yn7eKxz}T!b"Yf126H8{-L.kA]?U?Cj~˝='}8bJ8U<V;KI\c">7hm\/sqe$xuX?~]=889ri(_ȴg H 8^59i-Xm$!$
P,PՎۆIi(C`H`Yl'sI@  4(8Ӑ9`o@vI9aHm~i+r

9iU>?U>7[C8*,l}T&RjPr:-%lT_˟holSDz36(hd_<Ep|cdZ9pn6~xD}zwj|X6Mm~ȎdH
|;ֻзi(PQ$P	/!-%AB$q{p!b?qd-czt"]#36PG .qз˼6yi>\ZJZE^9I9aH~1r

9iś&Rj]SM7?qG?꧀?qoN7IK{X揄&o26f>PGTJaފlr(j\|?qSKIDpSd췣%|j  yܶM9I9aH(K@E;NZJ9H4n-=S!b·C>wt9iehii+ӽ&RjP#}'꧎yi
 (-%zqlI@$,_FK~ohbchohq˹3k-pTз-"
  ŤN6LԠG/%Ǉbchohw3.ͬ7ߪf]ɛY~73eqRlNXZEFZ)ZJTL~?.rs^1t`khtl͊oN7꧘e{T)ٰZJcAAG NAB$r(荁zיlNZJ9H4͓)5.͍WN7ꧻq6~'5pODݸ:]iĸGlIHWݸ:\G9p.GFcf6|tSdƏTWK&y݊gD쵕qӍdmeUNE{bκxKzlO2/}ꥲGgj$plG&C>}tRbxD}1Kq1>kpb1p$,_d\kpbU_Dlsꧢ&n4y5R˻'-`GT51râvZʸf05Tc֎2z2aQZEdEfZ+k/ӽ1GKzbr

9r
 ÖGDmer

9iś&Rj]avODH0a?LW(ޖh#e=SN9nb'"]gUfZhkY~-4vEk\@Oci?7W?h#e=SNd}üpA׳"H9I8D}vݸiA׳"~7ӸɫTXG={BdoTݸiu*,ln3XA׳pAװI'cgէU=wpl.DwLU<f尾L6_=,8?,7\ .ƈh텮6E-%xwL4tyiM.{ed\9pof6}Zq&j?{ODme0#7-a].x`fIX-l-q/Ii+ţa+Hb%6jAװ9I9aHmd夣PQ(_NS/=4l6wX=TU'ܯmXޜoOTZV7|p$3}tޑU'6зϧ}T@N}HWK]:LC}TSfk8p}Op\6[Y}mPPH(( bP#oxp1Mǹc᪞wKGK};L3esMBδOe#WzZZΚ}TˍtOOڸ_{ڷ5T1Qt5u.k;f#Dx}k:o~6\#ţTsQt޻8H,_tt0U?i:6MtU<t-^dl(L~/ZǞ>Sr:o@)B<kf6[Q|/p P(
0z' _Ne-rV央^}N?Dtk>Sr<
GTJa:$
DI8^7K}ZqEXĆtxDb;yia|l-rK,{XplɽC8E-%r

9r
 ÖGC IG P	Ż꧌^V7yi}i+pczqqTc|߹Ot5Tx3htlA{1̟WDT]B{-_f>|t{YQT; q!n.P#ţTztgӢn:HcKI__wDTovuU.~~'mb=;CⷧSُa:爵	ϴ̴8GFT)QlmeZ|V|tSVYoi&m6]R=;ikqյӍ7>yi}i+:bmՍAGTN?x[Ӹ7^fZGTзzQLdg2qjc%=1K{gO}eH:[1ߪ s`,	1^p 	ǵӹnKeUyi+ןiU;iU?U-NN7[Y`׼t-r	$xޗU-iƲ6dl5cx@ӍBň0#7-a].x`p:.&$A((I(_ZEh\ՖCiӖAAG |8?/[wO0:o!8zsxtbczqU=Qh]X߉ p8bw{Q/.ueToT;LmM(H᪞l6;-oN; 'L{>$G2U?TلpZ|V~{d-?U=1KVYoi&m6]S.ueCfU>GMs"̕QSKck/)n:XߪZi5S8oNht߫z&epO|QFF˽1Tl-퟉eH8;xl7 _A@Wp 	ǵӹnKe-!iZJ5dGDxc{iU=VY&`k)5S)x[Ӹ(I'oN5#a p8Μo,Gt-!l/Wsŏv p6Y7Hg%AAG NAB$r)09IG P	爵	Κ\v8 pG|՘x(PhP(׼t-
 qfɔUNoZbfyiI_H8#'5rp( rA$.=ݲpquF~/꧸2R:Eo'1(}죾.wO0;Vcf( r	$/A-"	'-%AB$qfɔ}T{K~'%s*zqdެ{4wF@8ɛ/(#I8ՍKwOAllDK>N\U=zӸ,
:oS%?nꛏ_8^BUCUK{|f6ac~{{K|̗:J@d=́c|oOM.{ep6[$5SB=ǭdFϛǍWB2[<̳6l˹{-x̃V7{dɽXJ:irAAG NAB$r(쀘(g-%AB$qfɔx.dlOVE쑰0~˾'69|dl[-l-;̴zύcW>GTJaN(\i&eoNt!f=qc~|3{<07Iї?sS&mS:b#c0kZ8{kdHUZ:#cÇZ8x-T:~2kLǘ ˟xE_/?U=hk NwwR6:ɽw6}%.E-k!u7_T6QN.wjU;~uPw:2]7t[,w&C?U-VՍ71^S@IŚnb}T{K~'L!<Z; r	$/A-"1{'-%AB$qfɔ}T{K~'%sL{7*H᪞9b**?ƫ'էjqG\ݲ8jS^зϧIH'0\7br

9r
 ÖF71g-%AB$pz˾{AWDU=zONX8jxZ>QB>U==t-&͡/1StRb皡!}h?߆cjƝݲ8dHqзWO1P3eIB:qBpW"AAG NAB$r(gLPj0r

9iz[{ 'L{9PϷd~wD}ke3*oDl8ޚ\|6{f k#~TZ=>oj[#%Uh#I8j_Dla[)1Uc};vZʹG@QgO/%dpOe]1oNA5S:cB>`$z%0|w GT@Nqз(gLPj0{iTل8aIÁ@:[r,	 '#+xtn(r

9r
 Ö[R1Ѕ89-%AB$p~^KxD|>OODA׳֫HK|,n6L=_5pdOOlzC~50.gܶɆ+tǻeg&rWucDx2ݰ_%醘o-!?pjF6`:ZZJ#盁tk";)#}%OlE89qrl66~xHXC2]U,1y| hf/'O];}_.#z)yP:~+<Xl7hǮt{X#~=OE6LՏaPKqU<~5񧐜0riȴjF6`:Z8 Qr 4Ը  CMK 94Is?UN׶Dw@riȴjF6`:Z8/Bh8ӐI'**,mXߪZEmHKBp!b'Z%ǫ-i I' |9i@Cg-%AB$qfɔ}Ttc"2zqU<Ž- IKtKm?U<"HXKx`CYV7:ok/ӎf^\ZC 0!4ڱ8$,_hH=ovNclQi+(c3Z:Z]--[ݲ 롍 $PL !ZŐr

9r
 ÖOc@SrQ((/Aqn$,_DK2}-}r7㢜}8蝓6PG .B.Pqi+yixZJ#I8	T-"lށ,ӿsWU=,솇;xwLR-̽|úwEcc~z)K7gc~NIGTSbTS|dB'CثoO-"pqi(c8jޝSe(ڱ=S}h荃M]Y~=qd=S&d_=wz)ޝ75Z|ρVA$M)}a
pp\RI^;9i>\ZJSbkC?-cz_DΜi%}OM.{eUثqQ8{XTxhK{VKͬxB<GTEU=xd/"yZ?ţY:is=,s[,q7.ŎUN望F˶Q;cxzV7GE89ޯ6{IM{ck-<Z8zdёxɫA5Tݫw+x[J>_xެvHǖ) 0)NUXފt;ef!b{ZZJg-!BŁUӵnl1cyRlْZJ r	$/A-"Xj0r

9iz[{ 'L{=%1lwc\l^pT|6{1 懲U>BfKk[{Kl{ 髧*PG9pO5hOt{bьGlk7yd["S:bشc8` vG2\3huh8jqзϧpYT@Nqзϧp,	#z%0|w@15S:cB?H[)1AlZJP~4:ZSfWa H-[pX`H:[rP
9I9aH!-%AB$qfɔ}ToO1Qce`:|2fc%+?z3i͖=S|"HXNl7epSt.?SZE"6^f{-%RPN?KIB9H2rVX\RN?KIB9iAcCi-%-"08QIB9ilsZJH1})qK-%xm~ˈ9I9aH4rQ((/AlIwFT6]S&o2QQ-'6Zȹ#ݬNt8ߪZE"6^f{-%-"6Mm2qZJH2rVX\RN?KIB9H2;zLi(G-"08QIB9ilsZJH1})qK-%xm~ˈ9`oBu  NA$H0P(夣PQ(_N-.r^f0Y I' |9i9i(r
 ӃbT;=r

9)$6?ި=mhSbM-%s1A N |T)1FeE?"}_O,-i NU7걿U<iZ9i(2-3dc#k,Gkpj2C^G-"jWM.{i{s7wV2z)fG0b'T!bb햑tKm>{?㢒l3dc#k/gZoO-"b햖Sb[ݲW3ȴvml |9 i<pYchN[kz$,_1ovH9 i<p z$,_1ovy|ctdlcꧽ>l9h'{%΁AAG XP0ˀSI'bOdgM.{ebgׁX8hriȴ1Fe@LbP# rG!ƜI8ns2MgO1QcdGhriȴ[M9	NA$5S=,Vk#k/K08ӑi8?-45퍬le(c3H"D$H"D$H"D$H"D$H"D$H"D$H"D$H'1ovBP-ilŽ-G0( ( XP A9{Z<0z<|#88L:xq0{dHX9kG  쀘ΠFx 9s=,w-o1ovHdPf08"D$H"D$H"D$H"D$H"D$H"D$H"DDYMr3nIB/<dZA$1-oI'c+D~1h9v1n3-47A$q{[Ex
0NA$4KqeI' 7t[k} ,J4@$        *-  AAA             *-      ./usr/share/man/man8/mmchdisk.8          $          $          .TH mmchdisk 11/01/04
mmchdisk Command
.SH "Name"
.PP
\fBmmchdisk\fR - Changes state or parameters of one or more disks
in a GPFS file system.
.SH "Synopsis"
.PP
\fBmmchdisk\fR \fIDevice\fR {\fBsuspend\fR | \fBresume\fR |
\fBstop\fR | \fBstart\fR | \fBchange\fR} \fB-d\fR
"\fIDiskDesc\fR[;\fIDiskDesc\fR...
]" [\fB-N\fR {\fB\fIall\fR\fR | \fBmount\fR |
\fINodeName\fR[,\fINodeName\fR ... ] }
]
.PP
Or,
.PP
\fBmmchdisk\fR \fIDevice\fR {\fBresume\fR | \fBstart\fR}
\fB-a\fR [\fB-N\fR { \fB\fIall\fR\fR | \fBmount\fR |
\fINodeName\fR[,\fINodeName\fR... ] }
]
.SH "Description"
.PP
Use the \fBmmchdisk\fR command to change the state or the parameters of
one or more disks in a GPFS file system.
.PP
The state of a disk is a combination of its status and availability,
displayed with the \fBmmlsdisk\fR command.
Disk status is normally either \fBready\fR or \fBsuspended\fR. A
transitional status such as \fBreplacing\fR, \fBreplacement\fR, or
\fBbeing emptied\fR might also appear if a disk is being deleted or
replaced. A suspended disk is one that the user has decided not to
place any new data on. Existing data on a suspended disk may still be
read or updated. Typically, a disk is suspended prior to restriping a
file system. Suspending a disk tells the \fBmmrestripefs\fR command that data is to be migrated
off that disk. Disk availability is either \fBup\fR or
\fBdown\fR.
.PP
Be sure to use \fBstop\fR before you take a disk offline for
maintenance. You should also use \fBstop\fR when a disk has become
temporarily inaccessible due to a disk failure that is repairable without loss
of data on that disk (for example, an adapter failure or a failure of the disk
electronics).
.PP
The \fIDisk Usage\fR (\fBdataAndMetadata\fR,
\fBdataOnly\fR, \fBmetadataOnly\fR, or \fBdescOnly\fR) and
\fIFailure Group\fR parameters of a disk are adjusted with the
\fBchange\fR option. See the \fIGeneral Parallel
File System: Concepts, Planning, and Installation Guide\fR and search
for \fIrecoverability considerations\fR. The \fBmmchdisk
change\fR command does not move data or metadata that resides on the
disk. After changing disk parameters, in particular, \fIDisk
Usage\fR, you may have to issue the \fBmmrestripefs\fR command with the
\fB-r\fR option to relocate data so that it conforms to the new disk
parameters.
.PP
The \fBmmchdisk\fR command can be issued for a mounted or unmounted file
system. When maintenance is complete or the failure has been repaired,
use the \fBmmchdisk\fR command with the \fBstart\fR option. If
the failure cannot be repaired without loss of data, you can use the \fBmmdeldisk\fR command.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to which the disks belong. File
system names need not be fully-qualified. \fBfs0\fR is as acceptable
as \fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.PP
.RS +3
\fB-d "\fIDiskDesc\fR[;\fIDiskDesc\fR...
]"
\fR
.RE
.RS +9
A descriptor for each disk to be changed. 
.PP
Specify only disk names when using the \fBsuspend\fR, \fBresume\fR,
\fBstop\fR, or \fBstart\fR options. Delimit multiple disk names
with semicolons and enclose the list in quotation marks. For example,
"gpfs1nsd;gpfs2nsd"
.PP
When using the \fBchange\fR option, include the disk name and any new
\fIDisk Usage\fR and \fIFailure Group\fR positional parameter values in
the descriptor. Delimit descriptors with semicolons and enclose the
list in quotation marks, for example,
"gpfs1nsd:::dataOnly;gpfs2nsd:::metadataOnly:12"
.PP
A disk descriptor is defined as (second and third fields
reserved):
.sp
.nf
DiskName:::DiskUsage:FailureGroup
.fi
.sp
.PP
.RS +3
\fB\fIDiskName\fR
\fR
.RE
.RS +9
.PP
For a list of disks that belong to a particular file system, issue the
\fBmmlsnsd -f\fR, the \fBmmlsfs -d\fR, or the \fBmmlsdisk\fR
command. The \fBmmlsdisk\fR command will also show the current disk
usage and failure group values for each of the disks.
.RE
.PP
.RS +3
\fB\fIDiskUsage\fR
\fR
.RE
.RS +9
If a value is not specified, the disk usage remains unchanged: 
.PP
.RS +3
\fBdataAndMetadata
\fR
.RE
.RS +9
Indicates that the disk contains both data and metadata. This is
the default.
.RE
.PP
.RS +3
\fBdataOnly
\fR
.RE
.RS +9
Indicates that the disk contains data and does not contain
metadata.
.RE
.PP
.RS +3
\fBmetadataOnly
\fR
.RE
.RS +9
Indicates that the disk contains metadata and does not contain
data.
.RE
.PP
.RS +3
\fBdescOnly
\fR
.RE
.RS +9
Indicates that the disk contains no data and no metadata. Such a
disk is used solely to keep a copy of the file system descriptor, and can be
used as a third failure group in certain disaster recovery
configurations. 
.RE
.RE
.PP
.RS +3
\fB\fIFailureGroup\fR
\fR
.RE
.RS +9
A number identifying the failure group to which this disk belongs.
You can specify any value from -1 (where -1 indicates that the disk has no
point of failure in common with any other disk) to 4000. If you do not
specify a failure group, the value remains unchanged. GPFS uses this
information during data and metadata placement to assure that no two replicas
of the same block are written in such a way as to become unavailable due to a
single disk failure. All disks that are attached to the same NSD server
or adapter should be placed in the same failure group.
.RE
.RE
.PP
.RS +3
\fB-a 
\fR
.RE
.RS +9
Specifies to change the state of all of the disks belonging to the file
system, \fIDevice\fR. This operand is valid only on the
\fBresume\fR and \fBstart\fR options.
.RE
.PP
.RS +3
\fB-N {\fB\fIall\fR\fR | \fBmount\fR |
\fINodeName\fR[,\fINodeName\fR ... ] }
\fR
.RE
.RS +9
Specify the nodes to participate in the restripe of the file system after
the state or parameters of the disks have been changed. Valid values
are: 
.PP
.RS +3
\fBall
\fR
.RE
.RS +9
Indicates that all nodes in the GPFS cluster, whether they have the
file system mounted or not, are to participate in the restripe. This is
the default when the \fB-N\fR option has not been specified.
.RE
.PP
.RS +3
\fBmount
\fR
.RE
.RS +9
Indicates that only the nodes that have the file system mounted are to
participate in the restripe of the file system.
.RE
.PP
.RS +3
\fB\fINodeName\fR[,\fINodeName\fR ... ]
\fR
.RE
.RS +9
A comma-separated list of target nodes that are to participate in the
restripe.
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.RE
.SH "Options"
.PP
.RS +3
\fBchange
\fR
.RE
.RS +9
Instructs GPFS to change the \fIDisk Usage\fR parameter, the
\fIFailure Group\fR parameter, or both, according to the values specified
in the \fIDisk Desc\fR.
.RE
.PP
.RS +3
\fBresume
\fR
.RE
.RS +9
Informs GPFS that a disk previously suspended is now available for
allocating new space. If the disk is currently in a stopped state, it
remains stopped until you specify the \fBstart\fR option. Otherwise,
normal read and write access to the disk resumes.
.RE
.PP
.RS +3
\fBstart
\fR
.RE
.RS +9
Informs GPFS that disks previously stopped are now accessible. This
is accomplished by first changing the disk availability from \fBdown\fR to
\fBrecovering\fR. The file system metadata is then scanned and any
missing updates (replicated data that was changed while the disk was
\fBdown\fR) are repaired. If this operation is successful, the
availability is then changed to \fBup\fR. If the metadata scan
fails, availability is set to \fBunrecovered\fR. This could occur if
too many other disks are \fBdown\fR. The metadata scan can be
re-initiated at a later time by issuing the \fBmmchdisk start\fR command
again. 
.PP
If more than one disk in the file system is down, they must all be started
at the same time by issuing the \fBmmchdisk \fIDevice\fR start -a\fR
command. If you start them separately and metadata is stored on any
disk that remains down, the \fBmmchdisk start\fR command fails.
.RE
.PP
.RS +3
\fBstop
\fR
.RE
.RS +9
Instructs GPFS to stop any attempts to access the specified disks.
Use this option to tell the file system manager that a disk has failed or is
currently inaccessible because of maintenance. 
.PP
A disk remains stopped until it is explicitly started by the
\fBmmchdisk\fR command with the \fBstart\fR option. Restarting
the GPFS Server daemon or rebooting does not restore normal access to a
stopped disk.
.RE
.PP
.RS +3
\fBsuspend
\fR
.RE
.RS +9
Instructs GPFS to stop allocating space on the specified disk.
Place a disk in this state when you are preparing to restripe the file system
off this disk because of faulty performance. This is a user-initiated
state that GPFS never uses without an explicit command to change disk
state. Existing data on a suspended disk may still be read or
updated.
.PP
A disk remains suspended until it is explicitly resumed. Restarting
GPFS or rebooting nodes does not restore normal access to a suspended
disk.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmchdisk\fR command.
.PP
You may issue the \fBmmchdisk\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.RS +3
.HP 3
1. To \fBsuspend\fR active disk \fBgpfs2nsd\fR, enter:
.sp
.nf
mmchdisk fs0 suspend -d gpfs2nsd
.fi
.sp
To confirm the change, enter:
.sp
.nf
mmlsdisk fs0
.fi
.sp
The system displays information similar to:
.sp
.nf
disk     driver sector failure holds  holds
name     type   size   group metadata data  status  availability
-------  ------ ------ ----- -------- ----- ------- ------------
gpfs2nsd  nsd    512    4002   yes    yes   suspended   up
gpfs3nsd  nsd    512    4002   yes    yes   ready       up
gpfs4nsd  nsd    512    4002   yes    yes   ready       up
gpfs5nsd  nsd    512    4002   yes    yes   ready       up
.fi
.sp
.HP 3
2. To specify that metadata should no longer be stored on disk
\fBgpfs1nsd\fR, enter:
.sp
.nf
mmchdisk fs0 change -d "gpfs1nsd:::dataOnly"
.fi
.sp
To confirm the change, enter:
.sp
.nf
mmlsdisk fs0
.fi
.sp
The system displays information similar to:
.sp
.nf
disk      driver sector failure holds  holds
name      type   size   group metadata data status  availability
--------- ------ ---- ------- ------- ----- ------- ------------
gpfs1nsd  nsd     512   -1     no       yes   ready    up\ 
gpfs2nsd  nsd     512   -1     yes      yes   ready    up\ 
gpfs3nsd  nsd     512   -1     yes      yes   ready    up\ 
gpfs4nsd  nsd     512   -1     yes      yes   ready    up
\ 
.fi
.sp
.RE
.SH "See also"
.PP
mmadddisk Command
.PP
mmdeldisk Command
.PP
mmlsdisk Command
.PP
mmrpldisk Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
. The k .J4@$        B  AAA             B      ./usr/share/man/man8/mmcheckquota.8 .3 n         $          $          .TH mmcheckquota 11/01/04
mmcheckquota Command
.SH "Name"
.PP
\fBmmcheckquota\fR - Checks file system user and group
quotas.
.SH "Synopsis"
.PP
\fBmmcheckquota\fR [\fB-v\fR ] { \fIDevice\fR
[\fIDevice\fR ...] | \fB-a\fR}
.PP
Or,
.PP
\fBmmcheckquota\fR {[\fB-u \fR \fIUserQuotaFilename\fR]
| [\fB-g \fR \fIGroupQuotaFileName\fR ]}
\fIDevice\fR
.PP
\fBDescription\fR
.PP
The \fBmmcheckquota\fR command serves two purposes:
.RS +3
.HP 3
1. Count inode and space usage in a file system by user and group, and write
the collected data into quota files.
.HP 3
2. Replace either the user or group quota files, for the file system
designated by \fIDevice\fR, thereby restoring the quota files for the file
system. These files must be contained in the root directory of
\fIDevice\fR. If a backup copy does not exist, an empty file is
created when the \fBmmcheckquota\fR command is issued.
.RE
.PP
The \fBmmcheckquota\fR command counts inode and space usage for a file
system and writes the collected data into quota files. Indications
leading you to the conclusion you should run the \fBmmcheckquota\fR command
include:
.RS +3
.HP 3
1. \fBMMFS_QUOTA\fR error log entries. This error log entry is
created when the quota manager has a problem reading or writing the quota
file.
.HP 3
2. Quota information is lost due to a node failure. A node failure
could leave users unable to open files or deny them disk space that their
quotas should allow.
.HP 3
3. The in-doubt value is approaching the quota limit. 
.sp
The sum of the in-doubt value and the current usage may not exceed the hard
limit. Consequently, the actual block space and number of files
available to the user of the group may be constrained by the in-doubt
value. Should the in-doubt value approach a significant percentage of
the quota, use the \fBmmcheckquota\fR command to account for the lost space
and files.
.HP 3
4. User and group quota files are corrupted.
.RE
.PP
The result of an online quota check may be incomplete when files are being
accessed through SANergy at the time of the file system quota check.
See \fIAppendix B,  SANergy export: restrictions and considerations\fR
in \fIGPFS: Administration and Programming Reference\fR. 
To get an accurate online quota check result, rerun
\fBmmcheckquota\fR when SANergy is not active.
.PP
The \fBmmcheckquota\fR command is I/O intensive and should be run when
the system load is light. When issuing the \fBmmcheckquota\fR
command on a mounted file system, negative in-doubt values may be reported if
the quota server processes a combination of up-to-date and back-level
information. This is a transient situation and may be
ignored.
.SH "Parameters"
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
Checks all GPFS file systems in the cluster from which the command is
issued.
.RE
.PP
.RS +3
\fB\fI\fR\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system. File system names need not be
fully-qualified. \fBfs0\fR is as acceptable as
\fB/dev/fs0\fR.
.RE
.PP
.RS +3
\fB-g \fIUserQuotaFilename\fR
\fR
.RE
.RS +9
Replace the current group quota file with the file indicated.
.PP
When replacing quota files with the \fB-g\fR option:
.RS +3
.HP 3
\(bu The quota file must be in the root directory of the file system.
.HP 3
\(bu The file system must be unmounted.
.RE
.RE
.PP
.RS +3
\fB-u \fIUserQuotaFilename\fR
\fR
.RE
.RS +9
Replace the current user quota file with the file indicated.
.PP
When replacing quota files with the \fB-u\fR option:
.RS +3
.HP 3
\(bu The quota file must be in the root directory of the file system.
.HP 3
\(bu The file system must be unmounted.
.RE
.RE
.SH "Options"
.PP
.RS +3
\fB-v 
\fR
.RE
.RS +9
Reports discrepancies between calculated and recorded disk quotas.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmcheckquota\fR
command.
.PP
GPFS must be running on the node from which the \fBmmcheckquota\fR
command is issued.
.SH "Examples"
.RS +3
.HP 3
1. To check quotas for file system \fBfs0\fR, enter:
.sp
.nf
mmcheckquota fs0
.fi
.sp
The system displays information only if a problem is found.
.HP 3
2. To check quotas for all file systems, enter:
.sp
.nf
mmcheckquota -a
.fi
.sp
The system displays information only if a problem is found or if quota
management is not enabled for a file system:
.sp
.nf
fs2: no quota management installed
fs3: no quota management installed
.fi
.sp
.RE
.SH "See also"
.PP
mmedquota Command
.PP
mmfsck Command
.PP
mmlsquota Command
.PP
mmquotaon Command
.PP
mmquotaoff Command
.PP
mmrepquota Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
er or kL *J4@$        j%  mBmBmB             j%      ./usr/share/man/man8/mmchfs.8 ot         $          $          .TH mmchfs 04/21/05
mmchfs Command
.SH "Name"
.PP
\fBmmchfs\fR - Changes the attributes of a GPFS file
system.
.SH "Synopsis"
.PP
\fBmmchfs\fR \fIDevice\fR [\fB-A {\fR\fByes\fR | \fBno |
automount}\fR] [\fB-E {\fR\fByes |
\fR\fBno\fR\fB}\fR] [\fB-D {\fR\fBnfs4\fR |
\fBposix\fR\fB}\fR] [\fB-F\fR \fIMaxNumInodes\fR] [\fB-k {\fR\fBposix\fR\fB | nfs4 | all}\fR] [\fB-m\fR \fIDefaultMetadataReplicas\fR]
[\fB-o\fR \fIMountOptions\fR]
[\fB-Q {yes | \fR\fBno}\fR] [\fB-r\fR
\fIDefaultDataReplicas\fR] [\fB-S\fR \fB{yes\fR\fB | no}
]\fR [\fB-T\fR \fImountpoint\fR]
[\fB-V\fR] [\fB-z {\fR\fByes\fR \fB| no}\fR]
.PP
Or,
.PP
\fBmmchfs\fR \fIDevice\fR [\fB-W \fR
\fINewDeviceName\fR]
.SH "Description"
.PP
Use the \fBmmchfs\fR command to change the attributes of a GPFS
file system.
.PP
All files created after issuing the \fBmmchfs\fR command take on the new
attributes. Existing files are not affected. Use the \fBmmchattr\fR command to change the replication factor
of existing files.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to be changed. 
.PP
File system names need not be fully-qualified. \fBfs0\fR is as
acceptable as \fB/dev/fs0\fR. However, file system names must be
unique across GPFS clusters.
.PP
This must be the first parameter.
.RE
.SH "Options"
.PP
.RS +3
\fB-A {yes | \fBno\fR | automount}
\fR
.RE
.RS +9
Indicates when the file system is to be mounted:
.PP
.RS +3
\fByes
\fR
.RE
.RS +9
When the GPFS daemon starts.
.RE
.PP
.RS +3
\fBno
\fR
.RE
.RS +9
Manual mount.
.RE
.PP
.RS +3
\fBautomount
\fR
.RE
.RS +9
When the file system is first accessed.
.RE
.RE
.PP
.RS +3
\fB-D {nfs4 | \fBposix\fR}
\fR
.RE
.RS +9
Specifies whether a 'deny-write open lock' will block writes,
which is expected and required by NFS V4. File systems supporting NFS
V4 must have \fB-D nfs4\fR set. The option \fB-D posix\fR allows
NFS writes even in the presence of a deny-write open lock. If you
intend to export the file system using NFS V4, you must use \fB-D
nfs4\fR. For NFS V3 (or if the file system is not NFS exported at
all) use \fB-D posix\fR.
.RE
.PP
.RS +3
\fB-E {yes | \fBno\fR}
\fR
.RE
.RS +9
Specifies whether to report exact \fBmtime\fR values. If \fB- E
no\fR is specified, the \fBmtime\fR value is periodically updated.
If you desire to always display exact modification times, specify the \fB-E
yes\fR option.
.RE
.PP
.RS +3
\fB-F \fI MaxNumInodes\fR
\fR
.RE
.RS +9
Changes the maximum number of files that can be created. Allowable
values range from the current number of created inodes (determined by issuing
the \fBmmdf\fR command), through the maximum
number of files possibly supported as constrained by the formula:
.PP
\fBmaximum number of files = (total file system space/2) / (inode size +
subblock size)\fR
.PP
If your file system has additional disks added or the number of inodes was
insufficiently sized at file system creation, you may change the number of
inodes and hence the maximum number of files that can be created.
.PP
The initial setting of the number of inodes at file system creation is used
as the minimum value. The new value, set by using the \fBmmchfs\fR
command, determines the maximum value. The inode file expands on
demand, from the initial minimum value set up to the new maximum value as
needed.
.PP
For file systems that will be doing parallel file creates, if the total
number of free inodes is not greater than 5% of the total number of inodes,
there is the potential for slowdown in file system access. Take this
into consideration when changing your file system.
.RE
.PP
.RS +3
\fB\fB-k {\fR\fBposix\fR\fB | nfs4 | all}\fR
\fR
.RE
.RS +9
Specifies the type of authorization supported by the file system:
.PP
.RS +3
\fBposix
\fR
.RE
.RS +9
Traditional GPFS ACLs only (NFS V4 ACLs are not allowed).
Authorization controls are unchanged from earlier releases.
.RE
.PP
.RS +3
\fBnfs4
\fR
.RE
.RS +9
Support for NFS V4 ACLs only. Users are not allowed to assign
traditional GPFS ACLs to any file system objects (directories and individual
files).
.RE
.PP
.RS +3
\fBall
\fR
.RE
.RS +9
Any supported ACL type is permitted. This includes traditional GPFS
(\fBposix\fR) and NFS V4 ACLs (\fBnfs4\fR).
.PP
The administrator is allowing a mixture of ACL types. For example,
fileA may have a \fBposix\fR ACL, while fileB in the same file system may
have an NFS V4 ACL, implying different access characteristics for each file
depending on the ACL type that is currently assigned.
.RE
.PP
Neither \fBnfs4\fR nor \fBall\fR should be specified here unless the
file system is going to be exported to NFS V4 clients. NFS V4 ACLs
affect file attributes (mode) and have access and authorization
characteristics that are different from traditional GPFS ACLs.
.RE
.PP
.RS +3
\fB-m \fIDefaultMetaDataReplicas\fR
\fR
.RE
.RS +9
Changes the default number of metadata replicas. Valid values are 1
and 2 but cannot exceed the values of \fIMaxMetaDataReplicas\fR set when
the file system was created.
.RE
.PP
.RS +3
\fB-o\fR \fIMountOptions\fR
.RE
.RS +9
Specifies mount options to pass to the mount command when mounting the file system.
.RE
.PP
.RS +3
\fB-Q {yes | no}
\fR
.RE
.RS +9
If \fByes\fR is specified, quotas are activated automatically when the
file system is mounted. If \fBno\fR is specified, the quota files
remain in the file system, but are not used.
.PP
To activate quota management after the \fBmmchfs -Q\fR command has been
issued:
.RS +3
.HP 3
1. Unmount the file system.
.HP 3
2. Remount the file system.
.HP 3
3. Compile inode and disk block statistics using the \fBmmcheckquota\fR command. Use these values as a reference
to establish realistic quota values when issuing the \fBmmedquota\fR and \fBmmdefedquota\fR commands.
.HP 3
4. For default quotas:
.RS +3
.HP 3
a. Issue the \fBmmdefedquota\fR command to establish
default quota values.
.HP 3
b. Issue the \fBmmdefquotaon -d\fR command to
activate default quotas.
.RE
.HP 3
5. For explicit quotas:
.RS +3
.HP 3
a. Issue the \fBmmedquota\fR command to establish
quota values. 
b. Issue the \fBmmquotaon\fR command to activate
quota enforcement.
.RE
.RE
.PP
To deactivate quota management after the \fBmmchfs -Q\fR command has
been issued:
.RS +3
.HP 3
1. Unmount the file system.
.HP 3
2. Remount the file system.
.RE
.RE
.PP
.RS +3
\fB-r \fIDefaultDataReplicas\fR
\fR
.RE
.RS +9
Changes the default number of data replicas. Valid values are 1 and
2 but cannot exceed the values of \fIMaxDataReplicas\fR set when the file
system was created.
.RE
.PP
.RS +3
\fB-S  {yes | no}
\fR
.RE
.RS +9
Suppress the periodic updating of the value of \fBatime\fR as reported
by the \fBgpfs_stat()\fR, \fBgpfs_fstat()\fR, \fBstat()\fR, and \fBfstat()\fR
calls. If \fByes\fR is specified these calls report the last time
the file was accessed when the file system was mounted with the \fB-S no\fR
option.
.RE
.PP
.RS +3
\fB-T \fImountpoint\fR
\fR
.RE
.RS +9
Change the mount point of the file system starting at the next mount of
the file system.
.PP
The file system must be unmounted on all nodes prior to issuing the
command.
.RE
.PP
.RS +3
\fB-V
\fR
.RE
.RS +9
After migration, change the file system format to the latest format
supported by the currently installed level of GPFS. This causes the
file system to become permanently incompatible with prior releases of
GPFS.
.PP
Prior to issuing the \fB-V\fR option, see the \fIGeneral Parallel File System: Concepts, Planning, and
Installation Guide\fR and search for \fImigrating to the latest level of
GPFS\fR.
.RE
.PP
.RS +3
\fB-W \fINewDeviceName\fR
\fR
.RE
.RS +9
Assign \fINewDeviceName\fR to be the device name for the file
system.
.RE
.PP
.RS +3
\fB-z {yes | no}
\fR
.RE
.RS +9
Enable or disable DMAPI on the file system. For further information
on DMAPI for GPFS, see the \fIGeneral
Parallel File System: Data Management API Guide\fR.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmchfs\fR command.
.PP
You may issue the \fBmmchfs\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.PP
When considering data replication for files accessible to SANergy, see 
\fIAppendix B,  SANergy export: restrictions and considerations\fR in
\fIGPFS: Administration and Programming Reference\fR.
.SH "Examples"
.PP
To change the default replicas for metadata to 2 and the default replicas
for data to 2 for new files created in the \fBfs0\fR file system,
enter:
.sp
.nf
mmchfs fs0 -m 2 -r 2
.fi
.sp
.PP
To confirm the change, enter:
.sp
.nf
mmlsfs fs0 -m -r
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
flag value          description
---- -------------- -----------------------------------
 -m  2              Default number of metadata replicas
 -r  2              Default number of data replicas
.fi
.sp
.SH "See also"
.PP
mmcrfs Command
.PP
mmdelfs Command
.PP
mmdf Command
.PP
mmfsck Command
.PP
mmlsfs Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
file sk 2J4@$        
  &A&A&A             
      ./usr/share/man/man8/mmchmgr.8 t         $          $          .TH mmchmgr 11/01/04
mmchmgr Command
.SH "Name"
.PP
\fBmmchmgr\fR - Assigns a file system manager node.
.SH "Synopsis"
.PP
\fBmmchmgr\fR \fIDevice\fR [\fINodeName\fR]
.SH "Description"
.PP
The \fBmmchmgr\fR command assigns a file system manager node.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system for which the file system manager node
is to be changed. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.PP
.RS +3
\fB\fINodeName\fR
\fR
.RE
.RS +9
The target node to be appointed as the new file system manager. 
.PP
If no \fINodeName\fR is specified, the configuration manager selects the
new file system manager node.
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.SH "Options"
.PP
NONE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmchmgr\fR command.
.PP
You may issue the \fBmmchmgr\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
Assume the file system manager for the file system \fBgpfs1\fR
is currently \fBk164n05\fR. To migrate the file system manager
responsibilities to \fBk164n06\fR, enter:
.sp
.nf
mmchmgr gpfs1 k164n06
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
GPFS: 6027-628 Sending migrate request to current manager node
      9.114.68.69 (k164n05).
GPFS: 6027-629 Node 9.14.68.69 (k164n05) resigned as manager for gpfs1
GPFS: 6027-630 Node 9.14.68.70 (k164n06) appointed as manager for gpfs1
.fi
.sp
.SH "See also"
.PP
mmlsmgr Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
1pkHv 4J4@$          (A(A(A                   ./usr/share/man/man8/mmchnsd.8 t         $          $          .TH mmchnsd 11/01/04
mmchnsd Command
.SH "Name"
.PP
\fBmmchnsd\fR - Changes Network Shared Disk (NSD) configuration parameters.
.SH "Synopsis"
.PP
\fBmmchnsd\fR
{"\fIDiskDesc\fR[;\fIDiskDesc\fR...]"
| \fB-F\fR \fIDescFile\fR}
.SH "Description"
.PP
The \fBmmchnsd\fR command serves several purposes:
.RS +3
.HP 3
1. Change either or both the primary and backup NSD server nodes.
.HP 3
2. Define a backup server node for an NSD that currently does not have
one.
.HP 3
3. Delete a backup server node for an NSD.
.HP 3
4. Delete both the primary and backup NSD server nodes. The disk
must now be SAN-attached to all nodes in the cluster.
.HP 3
5. Assign a primary and, if specified, a backup NSD server node.
Nodes that are not SAN-attached to the disk, or nodes that experience a local
device driver failure, will now have access to the data over the network from
these servers.
.RE
.PP
You must follow these rules when changing NSDs:
.RS +3
.HP 3
\(bu You must identify the disks by the NSD names that were given to them by
the \fBmmcrnsd\fR command.
.HP 3
\(bu You must explicitly specify values for both the primary and backup NSD
server nodes even if you are only changing one of them.
.HP 3
\(bu The file system that contains the NSD being changed must be unmounted
prior to issuing the \fBmmchnsd\fR command.
.HP 3
\(bu The NSD must be properly connected to the new nodes prior to issuing the
\fBmmchnsd\fR command.
.HP 3
\(bu This command cannot be used to change the \fIDiskUsage\fR or
\fIFailureGroup\fR for an NSD. You must issue the \fBmmchdisk\fR command to change these.
.HP 3
\(bu You cannot change the name of the NSD.
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIDiskDesc\fR
\fR
.RE
.RS +9
A descriptor for each NSD to be changed. Each descriptor is
separated by a semicolon (;). The entire list must be enclosed in
single or double quotation marks.
.RE
.PP
.RS +3
\fB\fIDescFile\fR
\fR
.RE
.RS +9
Specifies a file containing a list of disk descriptors, one per
line.
.PP
Each disk descriptor must be specified in the form:
.sp
.nf
DiskName:PrimaryServer:BackupServer
.fi
.sp
.PP
.RS +3
\fB\fIDiskName\fR
\fR
.RE
.RS +9
The NSD name that was given to the disk by the \fBmmcrnsd\fR command.
.RE
.PP
.RS +3
\fB\fIPrimaryServer\fR
\fR
.RE
.RS +9
The name of the primary NSD server node. 
.PP
If this field is omitted, the disk is assumed to be SAN-attached to
all nodes in the cluster.
.PP
To change only the primary NSD server, ensure that if a backup NSD server
exists that you specify it, or the backup NSD server will be deleted.
Any parameter that is not specified defaults to null.
.sp
.nf
DiskName:PrimaryServer:BackupServer
.fi
.sp
.PP
To remove the primary GPFS server, explicitly skip the parameter:
.sp
.nf
DiskName
.fi
.sp
.RE
.PP
.RS +3
\fB\fIBackupServer\fR
\fR
.RE
.RS +9
The name of the backup NSD server node. 
.PP
If the \fIPrimaryServer\fR has not been specified and this field
is omitted, the disk is assumed to be SAN-attached to all nodes in the
cluster.
.PP
To remove the backup NSD server, explicitly skip the parameter, but ensure
you have specified the \fIPrimaryServer\fR or it will be removed
also:
.sp
.nf
DiskName:PrimaryServer
.fi
.sp
.RE
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.SH "Options"
.PP
NONE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmchnsd\fR command.
.PP
You may issue the \fBmmchnsd\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
If the disk \fBgpfs1nsd\fR is currently defined with
\fBk145n05\fR as its primary server and \fBk145n07\fR as its backup
NSD server, and you want to change the primary NSD server to
\fBk145n09\fR, enter:
.sp
.nf
mmchnsd "gpfs1nsd:k145n09:k145n07:::"
.fi
.sp
.PP
To confirm the changes, enter:
.sp
.nf
mmlsnsd -d "gpfs1nsd"
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
 File system   Disk name    Primary node             Backup node
 ---------------------------------------------------------------
 fs1           gpfs1nsd     k145n09                  k145n07
\ 
.fi
.sp
.SH "See also"
.PP
mmchdisk Command
.PP
mmcrcluster Command
.PP
mmcrnsd Command
.PP
mmlsnsd Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
(bu Thkl 0J4@$        $  mBmBmB             $      ./usr/share/man/man8/mmcrcluster.8  .3 n         $          $          .TH mmcrcluster 04/21/05
mmcrcluster Command
.SH "Name"
.PP
\fBmmcrcluster\fR - Creates a GPFS cluster from a set of
nodes.
.SH "Synopsis"
.PP
\fBmmcrcluster\fR \fB-n\fR \fINodeFile\fR \fB-p\fR
\fIPrimaryServer\fR [\fB-s\fR \fISecondaryServer\fR]
[\fB-r\fR \fIRemoteShellCommand\fR] [\fB-R\fR
\fIRemoteFileCopyCommand\fR] [\fB-C\fR
\fIClusterName\fR] [\fB-U\fR \fIDomainName\fR]
[\fB-A\fR] [\fB-c\fR
\fIConfigFile\fR]
.SH "Description"
.PP
Use the \fBmmcrcluster\fR command to create a GPFS cluster.
.PP
Upon successful completion of the \fBmmcrcluster\fR command, the
\fB/var/mmfs/gen/mmsdrfs\fR and the \fB/var/mmfs/gen/mmfsNodeData\fR
files are created on each of the nodes in the cluster. Do not delete
these files under any circumstances. For further information, see the
\fIGeneral Parallel File System: Concepts, Planning,
and Installation Guide\fR.
.PP
You must follow these rules when creating your GPFS cluster:
.RS +3
.HP 3
\(bu While a node may mount file systems from multiple clusters, the node
itself may only be added to a single cluster using the \fBmmcrcluster\fR or
\fBmmaddnode\fR command.
.HP 3
\(bu The nodes must be available for the command to be successful.
If any of the nodes listed are not available when the command is issued, a
message listing those nodes is displayed. You must correct the problem
on each node and issue the \fBmmaddnode\fR command to
add those nodes.
.HP 3
\(bu You must designate at least one node as a quorum node. You
are strongly advised to designate the cluster configuration servers as quorum
nodes. How many quorum nodes altogether you will have depends on
whether you intend to use the node quorum with tiebreaker algorithm. or
the regular node based quorum algorithm. See the \fIGeneral Parallel File System: Concepts, Planning, and
Installation Guide\fR and search for \fIdesignating quorum nodes\fR for
more details.
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fB-A\fR
\fR
.RE
.RS +9
Specifies that GPFS daemons are to be automatically started when nodes
come up. The default is not to start daemons automatically.
.RE
.PP
.RS +3
\fB\fB-C\fR \fIClusterName\fR
\fR
.RE
.RS +9
Specifies a name for the cluster. If the user-provided name
contains dots, it is assumed to be a fully qualified domain name.
Otherwise, to make the cluster name unique, the domain of the primary
configuration server will be appended to the user-provided name. 
.PP
If the \fB-C\fR flag is omitted, the cluster name defaults to the name
of the primary GPFS cluster configuration server.
.RE
.PP
.RS +3
\fB\fB-c\fR \fIConfigFile\fR
\fR
.RE
.RS +9
Specifies a file containing GPFS configuration parameters with values
different than the documented defaults. A sample file can be found in
\fB/usr/lpp/mmfs/samples/mmfs.cfg.sample\fR. See the
\fBmmchconfig\fR command for a detailed description of the different
configuration parameters.
.PP
The \fB-c\fR \fIConfigFile\fR parameter should only be used by experienced
administrators.  Use this file to set up only parameters that appear in 
the \fBmmfs.cfg.sample\fR file.  Changes to any other values may be ignored by 
GPFS.  When in doubt, use the \fBmmchconfig\fR command instead.
.RE
.PP
.RS +3
\fB-n \fINodeFile\fR
\fR
.RE
.RS +9
\fINodeFile\fR consists of a list of node descriptors, one per line, to
be included in the GPFS cluster. Node descriptors are defined as:
.sp
.nf
NodeName:NodeDesignations
.fi
.sp
.PP
where:
.RS +3
.HP 3
1. \fBNodeName\fR is the hostname or IP address to be used by GPFS for
node to node communication.
.sp
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.HP 3
2. \fBNodeDesignations\fR is an optional, '-' separated
list of node roles.
.RS +3
.HP 3
\(bu \fBmanager\fR | \fB\fIclient\fR\fR - Indicates whether a node is
part of the pool of nodes from which configuration and file system managers
are selected. The default is \fBclient\fR.
.HP 3
\(bu \fBquorum\fR | \fB\fInonquorum\fR\fR - Indicates whether a node is to
be counted as a quorum node. The default is \fBnonquorum\fR.
.RE
.RE
.PP
You must provide a descriptor for each node to be added to the GPFS
cluster.
.RE
.PP
.RS +3
\fB-p \fIPrimaryServer\fR
\fR
.RE
.RS +9
Specifies the primary GPFS cluster configuration server node used to
store the GPFS configuration data. This node must be a member of the
GPFS cluster.
.RE
.PP
.RS +3
\fB-R \fIRemoteFileCopy\fR
\fR
.RE
.RS +9
Specifies the fully-qualified path name for the remote file copy program
to be used by GPFS. The default value is
\fB/usr/bin/rcp\fR.
.PP
The remote copy command must adhere to the same syntax format as the
\fBrcp\fR command, but may implement an alternate authentication
mechanism.
.RE
.PP
.RS +3
\fB-r \fIRemoteShellCommand\fR
\fR
.RE
.RS +9
Specifies the fully-qualified path name for the remote shell program to be
used by GPFS. The default value is \fB/usr/bin/rsh\fR.
.PP
The remote shell command must adhere to the same syntax format as the
\fBrsh\fR command, but may implement an alternate authentication
mechanism.
.RE
.PP
.RS +3
\fB-s \fISecondaryServer\fR
\fR
.RE
.RS +9
Specifies the secondary GPFS cluster configuration server node used to
store the GPFS cluster data. This node must be a member of the GPFS
cluster. 
.PP
It is suggested that you specify a secondary GPFS cluster configuration
server to prevent the loss of configuration data in the event your primary
GPFS cluster configuration server goes down. When the GPFS daemon
starts up, at least one of the two GPFS cluster configuration servers must be
accessible.
.PP
If your primary GPFS cluster configuration server fails and you have
not designated a secondary server, the GPFS cluster configuration files are
inaccessible, and any GPFS administrative commands that are issued
fail. File system mounts or daemon startups also fail if no GPFS
cluster configuration server is available.
.RE
.PP
.RS +3
\fB-U \fIDomainName\fR
\fR
.RE
.RS +9
Specifies the UID domain name for the cluster.
.PP
A detailed description of the GPFS user ID remapping convention is
contained in \fI\fIUID Mapping for GPFS In a Multi-Cluster
Environment\fR\fR at www.ibm.com/servers/eserver/clusters/library/wp_aix_lit.html.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmcrcluster\fR
command.
.PP
You may issue the \fBmmcrcluster\fR command from any node in the GPFS
cluster.
.PP
A properly configured \fB.rhosts\fR file must exist in the root
user's home directory on each node in the GPFS cluster. If you
have designated the use of a different remote communication program on either
the \fBmmcrcluster\fR or the \fBmmchcluster\fR
command, you must ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To create a GPFS cluster made of all of the nodes listed in the file
\fB/u/admin/nodelist\fR, using node \fBk164n05\fR as the primary
server, and node \fBk164n04\fR as the secondary server, issue:
.sp
.nf
mmcrcluster  -n /u/admin/nodelist -p k164n05 -s k164n04
.fi
.sp
.PP
where \fB/u/admin/nodelist\fR has the these contents:
.sp
.nf
k164n04.kgn.ibm.com:quorum
k164n05.kgn.ibm.com:quorum
k164n06.kgn.ibm.com
.fi
.sp
.PP
The output of the command is similar to:
.sp
.nf
Mon Aug  9 22:14:34 EDT 2004: 6027-1664 mmcrcluster: Processing node
    k164n04.kgn.ibm.com
Mon Aug  9 22:14:38 EDT 2004: 6027-1664 mmcrcluster: Processing node
    k164n05.kgn.ibm.com
Mon Aug  9 22:14:42 EDT 2004: 6027-1664 mmcrcluster: Processing node
    k164n06.kgn.ibm.com
mmcrcluster: Command successfully completed
mmcrcluster: 6027-1371 Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.PP
To confirm the creation, enter: 
.sp
.nf
mmlscluster
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
GPFS cluster information
========================
  GPFS cluster name:         k164n05.kgn.ibm.com
  GPFS cluster id:           680681562214606028
  GPFS UID domain:           k164n05.kgn.ibm.com
  Remote shell command:      /usr/bin/rsh
  Remote file copy command:  /usr/bin/rcp
GPFS cluster configuration servers:
-------------------------------------
  Primary server:    k164n05.kgn.ibm.com
  Secondary server:  k164n04.kgn.ibm.com
 Node number Node name IP address     Full node name           Remarks
----------------------------------------------------------------------
       1     k164n04  198.117.68.68  k164n04.kgn.ibm.com  quorum node
       2     k164n05  198.117.68.69  k164n05.kgn.ibm.com  quorum node
       3     k164n06  198.117.68.70  k164n06.kgn.ibm.com\ 
.fi
.sp
.SH "See also"
.PP
mmaddnode Command
.PP
mmchconfig Command
.PP
mmdelnode Command
.PP
mmlscluster Command
.PP
mmlsconfig Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
lv 8J4@$        @   A A A             =)      ./usr/share/man/man8/mmcrfs.8 er         $          $            	  efst
.R\acdilmnorhpu023BPSbgy+,-149:DEFIkvwx"()/58ACGHMNTz%67KLOVW[]q{|}U_'QY;=j@P#NƇƇd8>PӠGk$N!2w/k:@/n}|+PAH#Fct"uYAUBR6:dT.R<8!V*$b P UbNa**҄ XcUWU*YXZZUiwUu|
Uj*UVAV*d*ҫ )V@4=V*A`'iV,WUU
R!*/JUiUU,pʭ*p;?oxHY *uXZ~VF=^*)V@4%i:T$jڽZhǹZ@*V	 XHp)V@4&OÙdZ@uY Ukiqdb*Rl=V*%V_Kզ{U8 ZzT$%F=^*)V@4'Ud
* iU_dEijTb*8  UkiwUUJ:	x JqDt"p'xOdſe:^_%ϯsj8P.u^FkXO']V*$!UPJužTI,%X٪OD(ҭ=qi+b75VNSG
\o
\[F=^Q=)arʏ]3}mCSN)(]Nc.<qϯsju#cB]76׾QwE5CΡ-n}|+PH	.Ls]Q]x,e6&O`N}ѓk]=m@3Q6O۟_
3k0翓:%9>3_e'SsYݞ2Rt-qGjl	2m`guݟKd[!b=¹puZu?+YظuNn:%>GU]="DIuc_&0[サS¹ Gڟ(I:K:$x#4%=o8+Y:Hp?8`R;:DtH.@.M`kFDI.<ΠI3xP\Vo>ͨ|pVaQ%?zR}m@|4%=ex@/([2tJ|+PT#4MkSCnɕ3f#[%8ǛSAC/0x `fx `f ΀<7E6M'POӴ-O)]^pABt\$lwWzr_>ū:Ln0[_vpXW&|pVt$L=Rr_2rW2S5޹[Eݔ߲["7ɂ/uasy3n).le|pV

ouE	ro8+]/X_*%f+サuW6̩ܧŕ಼|z<mMSOMG54Pr<¹n(_mWuˋ+?K
וs8ôD]{s\ڀg7 |-Z5^2̩ܧŕ\W>V?KvqgEWmC{iBMZܮM`k`R)&0X&/Q/+À/+]{s:]J]uF|e}XG+\W>{u	-o{]qo/6>LnnS} ۟Yݜ5oy^E.:))6z]LTSp^QU"n,ʞuCyS./ST0`[@ݨ/`"gA'UMaU"DGefY}k+]uYAUPϥWuΎ@|B^2m`В\,y#ǝ@f𔍎$kLLvvN<y	&os\ڇv{j<Es\+jx::K:T%#cIZNIБ3$Xׄ$kLL.@/n}|+PAБ3$X*D]̥ZNIБ3$XmW.mC:/i:8_
<W
y:kT45ou!x5V:󭌣5d]ydmp]ZQ2zg>ͨx朝sb0"(Kĥ-e1W>]^V@UBD΂$NcΫ %;*L8eVZ@t$LI. rFSn֗SډPщkʏ|, <a&0[MXqɔͦžty2&(K}|+PφdN$p ?Zx(]yzh2XZzabݵ=]s'YQey__mDhݵeGLGVFڋ~ɒ.mCN%VFGz(⒟	@	uϯsj-1B^Lק[
uY .d*ڵΫ $s%UBD΂$NcΫ )bUsVt$LI. rFSndt]}`mc&6S(2w4 r7ey_K:j=V*$b>/i_d!N8Z{%.-/k><;S>]nMlQ]vJ\[8]GObNa**Ϯɕ]uYA5VBC>\]o6i=(Dw쾗]TI-,*XgA	O\뛔p$dzdl#ϯ)KMl~]4s~Lu]equ)f.ͨk5n}v.<{otN 7&y^.>`kJH	%ǝV@v	V1V( *Q'`ciBIБ3$XݵOܧl/-\wE5q5Ml݇1s^y2*d(H AHBuӮfרotN-^Q\ų@|
G:8P6s]al	-Xɸ͔kǓڷF{WQϯsji'S`wG=mCey:sfFɕNM5Z]Ok%sdo-bwG5o}55u?]wEsM[MfɆ£J(:8;-Xɸ͔Ѧ2Qo\[MOnu1zh(Dĝ5DDDRE:NFt[0"DI.<
Q'`MpZNIБ3$XDFcp+]p!.d]7Wte~]VBdUPqIOپW.=dGk[;UEZ{%.-AБ3$X*DRi:'BD΄b$e1W-Ŕ)f.jkGH	%ǝVAՃЕjڽYi:&t$8Z/k>]v%[
յzpn>]k5"gA'BIpUj`<^T괝	:KV-sK^.]ݒVx{%[xVՠHDВ\,yd!ZVNIБ3$Xb1{Y-([W{eY-unڽZH	%ǝVAUHВ\,@kZuunx˵z^jha}gk[9M'3F׷>ͨv2n3e0θq7:g\ibj~tʝ^Q	:&t"t$uYXgA	i:'BD΄bxu>uU)<)9gqvrtD #{SezW׹4;F\g׿K`-y#'G|g]!(i}q+]FGVFZ|2+nyRr4ϥ[ͮڵϲ]s&.e~T~2mxu3s5VdT~2mwg[K.]k5ppB\ܬ[7)zGٺ%;W|+V[*#0׾21/:1b_
&uθݻQX__Y^#J͊笛.G׵Αk[>uO]|\+	|ՙ?׷cot~_
<NF"gBD΂AJ8::K:h;
Rp;ݔ$lt:'BD΄b\g.¹.N$lv"t"t$uYUHВ\,@Iu%ںW=pn]vլH	%ǝVB2:'BD΄b	.&6:"gA'BIpUY(Hi:&t$8]Guϯsjc+2"gBD΂$NcΫ CH>L$*uZN	%IZ7ɂ؎77g 
@3 |!:]vլ(0 >4pHpHw	η3VM
G$e1Wn.\aY-s[6d UZ|dJ{>HDВ\,ydDJTb*Y ;JUHВ\,@䌦:Y.^tdV+C+fd!*k8%V@4OU)25[WcWBiyS¹	U svg]ɾ˵cWپk^QV{m@0e1W|ܯmZ~Y >VV#5ҊH	%ǝVAV@4[*L8eVZUd8xU괝	:K[>_J^Wn)*tɕ]{s\ڀDВ\,yd8d괝	:K)5Eo"8g^WBb:^2޷CR7K^Qvr~uιakJo7Ob8PYӻ:h2XZPt$L"DI.<:'BD΄b$iJ{S1B\xQyTp<xzS~˯1"}tB\W۟_
< npK]:fn{>"gA'BIpU[uZN	%_dJ]̩۴pn涳k8%V@cUiuż&(KUd#xH)A'
]mٯe>{0pΛ:JX))\}VB*಼_U2XZx8qZ{Qϯsj+XuǄ	p ?0gtyWDkuc+3lڛ\=~FTZ냁7ҦW׽lQוqHPt$L"D:vSUS{i\{y:e1o.\ؾ>G>ͨwg)'DrudOiebTwbfsvn&npL$uż[YqoY\҉ZmMٮf}-{:uN(}tB\BD΂$NcΫ YЕjڽZhǹZNIБ3$XĪk_dugFMt)3-]:foܧ{s[.Y:qoɛerOk닭וV*XJm^4c(B붭f{ HDВ\,yd8hKBK	Vզ{U괝	:KJf|2`N}1՞W63Ӧlθ-troܧϤp!osxmg\użw&n.1uYK\NYЕjڽZhǹZQmZ~gAБ3$XǪBf'&*uZN	%.fo'S6k0F~^W6-wg7)yo(mf{]voy_BmCziG
]k5ݜxpn.]evw?׷>ͨrלT{'KjH	%ǝV@4'b3`+6Vt$LI. pu౗@xu?+wGuϯsjBݜ]o6jk]|pVuϯsj{:^QpMg\[ظt|2u7;KmۆC=~W\O	#vquηG6kti_׹Nb'A'U]al	۟YW6Ųc\._|47ɂR'A'mC>{:퓢Sstjxu?+:ɵ-'Kڟ uZu?+X}.]Kj:ܧl#n}|+Pc+1(j[˲Q6ttJr6~W6"gA'BIpU .wUU@z*V"gBIp^p0jݔb`۟_
;FiBY%ZPt"pٮ^%e\VRQ޻ڟ۟_
9kN'd{MBIp@Dpp.mA@DqIzY7Y-^	%ǝ<y֎+m}. ںhljJ\[^VMڵau<yɣ_K!66ZEVB~˭uk5e	:$xG
Kuz1cW'BIp@Du_K!S@eUP߲f]{G MW0]uY@6ZEVB~˭u6ZQM
	:&t$L"DI.<Q*^4c'UHВ\,@Uk5l	F:-WɾL)s[Ӌuθw\[rfbo'\{SZuXIc"Kզ{U(]vլvp	::K:jzT$%F=^*:'BD΄bY_dugWɾLr_>8mg\użw&n.1uYK\NY%F=^*Ҏ.jk8rH	%ǝV@4iZuYXdJ yi:&t$9#L2uT&tJy_닭וYJ)4.C/ j
U32j
U3fA ZC:ޫ!yH)Vo~#ڵyݝV@4iG$e1U)dIb>>¹:_:2SD_Kn}|+P8tHDВ\,ydE +XJ<uZN	%Te~e1ok[;e-O돡f%=|+PG
]k5ݝV@4_UY^ /%ZO(.0yѹ]lZu.DOƩr\}5({s\ڂ8.%.-Nfr\JQdoܣudEdOG?KɋJ>W BD΂$NcΫ jZZxdb*UHВ\,@qY7ה]&A q_(.mA(]vլvuY Щ%ZQ%?fKF8/](GE}| DPgʮTjk"G>"$YKT*;Ex@6iAБ3G]5zn$N$NcΫ 4괝	:KefY}k	::K:qi:&t$8;g\lMAБ3G	S "D#|^-7Kݥ~. *(Aqj$N r7e{U;Z{%.-9C%|d;nmMA'Kq)aŽVBxZ=OΡ-PsC:7m7"e7iׁ]wGu)o7*`B]:fe|l/M.@#omcE{M]sru:%vJ6.bҏ2r)5>]VBdsjjS]uYAi 3枣΄Nc΀<g#*}frJ=Mqou2mgt~\ DPcͩ9(]xɵ]pB]76ظJ6.bôKnWJmxôq+gfԍ
YJ	:8]GQ6^Ό{tSuo:B*eE?$	2d>u73](\[(S^QZ{uX@!lfzG-g"\ 髩YJ$1$N'.âSP߲/k۟_
;zC/i8ôW2r2:K:$xLA`k`R@DuIKKY޸c./](urjڽ_߲1#"G:$%լ\[౗@yяsxmgOzebG@DuI+-^u[Wu7Q]{s\ڇvxH4ΠI3xJF{<8k<7張۟_
:Az-+#c0]GuE	v(pM:&taDl'yeи,4SV0QϤ2;぀B\r5<w .VǇ |4#taA'
_
;]=es7)zGd"taDl"x4p׹=tJvC/ÞNp^7>2mŃ bCO4GサÃR^4GサÃR^4GサÃR^#ZㆎF)C/ƈpVpxᣁѣx
@P8
4p44oHxG#ZㆎF)C/ƈpVpxᣁѣx
@"x4p%8/u)ϯsjASM(pP1x!pѡ<dck<	#ÄC8CpP1x 8h8(ifh`4hqpѼ}myG4x4@DxC/]ɋJ<$0ⱓk1zGa)>LL^Q%7](&voƈ"4ÀNvJ>oټp "<pp8h8dck<	#xC](&voƈ"84hqpP1xGxC0.@הxM;7DG0vJ>oټp "<p8h8(ifh!pѡ.@הxM;7DGㆍ?(ifh g4hqpѼ}myG4x4@Dx`	qG%89xIfaE	h4<ĝ38+\x!ظu~ns\ڂ"(HGcG w>:^Qn}|+Pи,8@!csiqo-O2w<b/]Ke6& Bݝ+9FF2ref aH#ߒHaw(AqiD8>PӠ "D46Z "D>PӠ $L^QHV@nLn'A' wg7k(r :J4@$        (  AAA             (      ./usr/share/man/man8/mmcrnsd.8 r         $          $          .TH mmcrnsd 11/01/04
mmcrnsd Command
.SH "Name"
.PP
\fBmmcrnsd\fR - Creates cluster-wide names for Network
Shared Disks used by GPFS.
.SH "Synopsis"
.PP
\fBmmcrnsd\fR \fB-F\fR \fIDescFile\fR [\fB-v\fR {\fB\fIyes\fR\fR |\fBno\fR}]
.SH "Description"
.PP
The \fBmmcrnsd\fR command is used to create cluster-wide names
for NSDs used by GPFS.
.PP
This is the first GPFS step in preparing a disk for use by a GPFS file
system. A disk descriptor file supplied to this command is rewritten
with the new NSD names and that rewritten disk descriptor file can then be
supplied as input to the \fBmmcrfs\fR command.
.PP
The name created by the \fBmmcrnsd\fR command is necessary since
disks connected at multiple nodes may have differing disk device names in
\fB/dev\fR on each node. The name uniquely identifies the
disk. This command must be run for all disks that are to be used in
GPFS file systems. The \fBmmcrnsd\fR command is also used to assign
a primary and backup NSD server that can be used for I/O operations on behalf
of nodes that do not have direct access to the disk.
.PP
To identify that the disk has been processed by the \fBmmcrnsd\fR
command, a unique NSD volume ID is written on sector 2 of the disk. All
of the NSD commands (\fBmmcrnsd\fR, \fBmmlsnsd\fR,
and \fBmmdelnsd\fR) use this unique NSD volume ID to
identify and process NSDs.
.PP
After the NSDs are created, the GPFS cluster data is updated and they are
available for use by GPFS.
.PP
To avoid performance degradation when utilizing an IBM 
pSeries High Performance Switch (pSeries HPS) in your configuration, it is
suggested you process your disks in two steps:
.RS +3
.HP 3
1. Create virtual shared disks on each physical disk through the
\fBmmcrvsd\fR command.
.HP 3
2. Using the rewritten disk descriptors from the \fBmmcrvsd \fRcommand,
create NSDs through the \fBmmcrnsd\fR command.
.RE
.PP
\fBResults\fR
.PP
Upon successful completion of the \fBmmcrnsd\fR command, these tasks are
completed:
.RS +3
.HP 3
\(bu NSDs are created.
.HP 3
\(bu The \fIDescFile\fR contains NSD names to be used as input to the \fBmmcrfs\fR, \fBmmadddisk\fR,
or the \fBmmrpldisk\fR commands.
.HP 3
\(bu A unique NSD volume ID to identify the disk as an NSD has been
written on sector 2.
.HP 3
\(bu An entry for each new disk is created in the GPFS cluster data.
.RE
.SH "Parameters"
.PP
.RS +3
\fB-F \fIDescFile\fR
\fR
.RE
.RS +9
The file containing the list of disk descriptors, one per line, in
the format:
.sp
.nf
DiskName:PrimaryServer:BackupServer:DiskUsage:FailureGroup:DesiredName
.fi
.sp
.PP
.RS +3
\fB\fIDiskName\fR
\fR
.RE
.RS +9
.PP
The block device name appearing in \fB/dev\fR for the disk you want to
define as an NSD. Examples of disks accessible through a block device
are SAN-attached disks or virtual shared disks. If a
\fIPrimaryServer\fR node is specified, \fIDiskName\fR must be the
\fB/dev\fR name for the disk device on the primary NSD server node.
See the Frequently Asked Questions at publib.boulder.ibm.com/infocenter/clresctr/ topic/com.ibm.cluster.gpfs.doc/
gpfs_faqs/gpfs_faqs.html for the latest supported disk types.
.RE
.PP
.RS +3
\fB\fIPrimaryServer\fR
\fR
.RE
.RS +9
The name of the primary NSD server node. 
.PP
If this field is omitted, the disk is assumed to be SAN-attached to
all nodes in the cluster. If not all nodes in the cluster have access
to the disk, or if the file system to which the disk belongs is to be accessed
by other GPFS clusters, \fIPrimaryServer\fR must be specified.
.RE
.PP
.RS +3
\fB\fIBackupServer\fR
\fR
.RE
.RS +9
The name of the backup NSD server node. 
.PP
If the \fIPrimaryServer\fR has been specified and this field is omitted,
it is assumed you do not want failover in the event that the
\fIPrimaryServer\fR fails. If \fIBackupServer\fR is specified and
the \fIPrimaryServer\fR has not been specified, the command
fails.
.PP
The hostname or IP address must refer to the communications adapter over
which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.PP
.RS +3
\fB\fIDiskUsage\fR
\fR
.RE
.RS +9
Specify a disk usage or accept the default. This field is ignored by the
\fBmmcrnsd\fR command and is passed unchanged to the output descriptor
file produced by the \fBmmcrnsd\fR command. Possible values are:
.PP
.RS +3
\fBdataAndMetadata
\fR
.RE
.RS +9
Indicates that the disk contains both data and metadata. This is
the default.
.RE
.PP
.RS +3
\fBdataOnly
\fR
.RE
.RS +9
Indicates that the disk contains data and does not contain
metadata.
.RE
.PP
.RS +3
\fBmetadataOnly
\fR
.RE
.RS +9
Indicates that the disk contains metadata and does not contain
data.
.RE
.PP
.RS +3
\fBdescOnly
\fR
.RE
.RS +9
Indicates that the disk contains no data and no metadata. Such a
disk is used solely to keep a copy of the file system descriptor, and can be
used as a third failure group in certain disaster recovery
configurations.
.RE
.RE
.PP
.RS +3
\fB\fIFailureGroup\fR
\fR
.RE
.RS +9
A number identifying the failure group to which this disk belongs.
You can specify any value from -1 (where -1 indicates that the disk has no
point of failure in common with any other disk) to 4000. If you do not
specify a failure group, the value defaults to the NSD primary server node
number plus 4000. If an NSD server node is not specified, the value
defaults to -1. GPFS uses this information during data and metadata
placement to assure that no two replicas of the same block are written in such
a way as to become unavailable due to a single failure. All disks that
are attached to the same NSD server or adapter should be placed in the same
failure group.
.RE
.PP
.RS +3
\fB\fIDesiredName\fR
\fR
.RE
.RS +9
Specify the name you desire for the NSD to be created. This name
must not already be used as another GPFS disk name, and it must not begin with
the reserved string 'gpfs'. 
.RS +3
\fBNote:\fR
.RE
.RS +9
This name can contain only the following characters: 'A' through 'Z', 'a' through 'z',  '0' through '9', or '_' (the underscore).
All other characters are not valid.
.RE
.PP
If a desired name is not specified, the NSD is assigned a name according to
the convention:
.PP
.RS +3
\fBgpfs\fINN\fRnsd
\fR
.RE
.RS +9
where \fINN\fR is a unique nonnegative integer not used in any prior
NSD.
.RE
.RE
.RE
.PP
Upon successful completion of the \fBmmcrnsd\fR command, the
\fIDescFile\fR file is rewritten to contain the created NSD names in place
of the device name. Primary and backup NSD servers and
\fIdesiredName\fR are omitted from the rewritten disk descriptor and all
other fields, if specified, are copied without modification. The
original lines, as well as descriptor lines in error, are commented out and
preserved for reference. The rewritten disk descriptor file can then be
used as input to the \fBmmcrfs\fR, \fBmmadddisk\fR, or the \fBmmrpldisk\fR commands. You must have
\fBwrite\fR access to the directory where the \fIDescFile\fR file is
located in order to rewrite the created NSD information.
.PP
The \fIDisk Usage\fR and \fIFailure Group\fR specifications in the
disk descriptor are preserved only if you use the rewritten file produced by
the \fBmmcrnsd\fR command. If you do not use this file, you must
either accept the default values or specify new values when creating disk
descriptors for other commands.
.SH "Options"
.PP
.RS +3
\fB -v {\fB\fIyes\fR\fR |\fBno\fR}
\fR
.RE
.RS +9
Verify the disk is not already formatted as an NSD.
.PP
A value of \fB-v yes\fR specifies that the NSD are to be
created only if the disk has not been formatted by a previous invocation of
the \fBmmcrnsd\fR command, as indicated by the NSD volume ID on sector 2 of
the disk. A value of \fB-v no\fR specifies that the disk is to be
formatted irrespective of its previous state. The default is \fB-v
yes\fR.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmcrnsd\fR command.
.PP
You may issue the \fBmmcrnsd\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory, normally \fB/root\fR, on each
node in the GPFS cluster. If you have designated the use of a different
remote communication program on either the \fBmmcrcluster\fR or the \fBmmchcluster\fR command, you must ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To create your NSDs from the descriptor file \fBnsdesc\fR
containing: 
.sp
.nf
 sdav1:k145n05:k145n06:dataOnly:4
 sdav2:k145n04::dataAndMetadata:5:ABC
.fi
.sp
.PP
enter:
.sp
.nf
mmcrnsd -F nsdesc\ 
.fi
.sp
.PP
These descriptors translate as:
.PP
.RS +3
\fBDisk Name
\fR
.RE
.RS +9
sdav1
.RE
.PP
.RS +3
\fBPrimaryServer Name
\fR
.RE
.RS +9
k145n05
.RE
.PP
.RS +3
\fBBackupServer Name
\fR
.RE
.RS +9
k145n06
.RE
.PP
.RS +3
\fBDisk Usage
\fR
.RE
.RS +9
\fBdataOnly\fR
.RE
.PP
.RS +3
\fBFailure Group
\fR
.RE
.RS +9
4
.RE
.PP
and 
.PP
.RS +3
\fBDisk Name
\fR
.RE
.RS +9
sdav2
.RE
.PP
.RS +3
\fBServer Name
\fR
.RE
.RS +9
k145n04
.RE
.PP
.RS +3
\fBBackup Server Name
\fR
.RE
.RS +9
none
.RE
.PP
.RS +3
\fBDisk Usage
\fR
.RE
.RS +9
\fBdataAndMetadata,\fR allowing both
.RE
.PP
.RS +3
\fBFailure Group
\fR
.RE
.RS +9
5
.RE
.PP
.RS +3
\fBDesired Name
\fR
.RE
.RS +9
ABC
.RE
.PP
\fBnsdesc\fR is rewritten as 
.sp
.nf
#sdav1:k145n05:k145n06:dataOnly:4
gpfs20nsd:::dataOnly:4
#sdav2:k145n04::dataAndMetadata:5:ABC
ABC:::dataAndMetadata:5
.fi
.sp
.PP
The output is similar to this:
.sp
.nf
mmcrnsd: Processing disk sdav1
mmcrnsd: Processing disk sdav2
mmcrnsd: 6027-1371 Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.SH "See also"
.PP
mmadddisk Command
.PP
mmcrfs Command
.PP
mmdeldisk Command
.PP
mmdelnsd Command
.PP
mmlsnsd Command
.PP
mmrpldisk Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
e notkZ <J4@$        R  AAA             R      ./usr/share/man/man8/mmcrsnapshot.8 .3 n         $          $          .TH mmcrsnapshot 11/01/04
mmcrsnapshot Command
.SH "Name"
.PP
\fBmmcrsnapshot\fR - Creates a snapshot of an entire GPFS file
system at a single point in time.
.SH "Synopsis"
.PP
\fBmmcrsnapshot\fR \fIDevice\fR \fIDirectory\fR
.SH "Description"
.PP
Use the \fBmmcrsnapshot\fR command to create a snapshot of an entire
GPFS file system at a single point in time.
.PP
A snapshot is a copy of the changed user data in the file system.
System data and existing snapshots are not copied. The snapshot
function allows a backup or mirror program to run concurrently with user
updates and still obtain a consistent copy of the file system as of the time
the copy was created. Snapshots are exact copies of changed data in the
active files and directories of a file system with the exception of the inode
number. A different inode number allows application programs to
distinguish between the snapshot and the active, or original, files and
directories of the file system. Snapshots of a file system are
read-only and are stored in a \fB.snapshots\fR directory. The
files and attributes of the file system may be changed only in the active
copy.
.PP
There is a maximum limit of 31 snapshots per file system. Snapshots
may be deleted only by issuing the \fBmmdelsnapshot\fR command. The
\fB\&.snapshots\fR directory cannot be deleted.
.PP
If the \fBmmcrsnapshot\fR command is issued while a conflicting command
is running, the \fBmmcrsnapshot\fR command waits for that command to
complete. If the \fBmmcrsnapshot\fR command is running while a
conflicting command is issued, the conflicting command waits for the
\fBmmcrsnapshot\fR command to complete. Conflicting operations
include: 
.RS +3
.HP 3
1. Other snapshot commands
.HP 3
2. Adding, deleting, replacing disks in the file system
.HP 3
3. Rebalancing, repairing, reducing disk fragmentation in a file system
.RE
.PP
If quorum is lost before the \fBmmcrsnapshot\fR command completes, the
snapshot is considered partial and will be deleted when quorum is achieved
again.
.PP
Because snapshots are not copies of the entire file system, they should not
be used as protection against media failures. For protection against
media failures, see \fIGeneral Parallel
File System: Concepts, Planning and Installation Guide\fR and
search on \fIrecoverability considerations\fR.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system for which the snapshot is to be
created. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.PP
.RS +3
\fB\fIDirectory\fR
\fR
.RE
.RS +9
The subdirectory name where the snapshots are stored.
.PP
This is a subdirectory of the root directory and must be a unique name
within the root directory. If you do not want to traverse the root to
access the snapshot you can create a symbolic link to the snapshot by issuing
the \fBmmsnapdir\fR command.
.RE
.SH "Options"
.PP
NONE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmcrsnapshot\fR
command.
.PP
You may issue the \fBmmcrsnapshot\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To create a snapshot \fBsnap1\fR, for the file system \fBfs1\fR,
enter:
.sp
.nf
mmcrsnapshot fs1 snap1
.fi
.sp
.PP
The output is similar to this:
.sp
.nf
Writing dirty data to disk
Quiescing all file system operations
Writing dirty data to disk again
Creating snapshot.
Resuming operations.
.fi
.sp
.PP
Before issuing the command, the directory structure would appear similar
to:
.sp
.nf
/fs1/file1
/fs1/userA/file2
/fs1/userA/file3
.fi
.sp
.PP
After the command has been issued, the directory structure would appear
similar to:
.sp
.nf
/fs1/file1
/fs1/userA/file2
/fs1/userA/file3
/fs1/.snapshots/snap1/file1
/fs1/.snapshots/snap1/userA/file2
/fs1/.snapshots/snap1/userA/file3
.fi
.sp
.PP
If a second snapshot were to be created at a later time, the first snapshot
would remain as is. Snapshots are made only of active file systems, not
existing snapshots. For example:
.sp
.nf
mmcrsnapshot fs1 snap2
.fi
.sp
.PP
The output is similar to this:
.sp
.nf
Writing dirty data to disk
Quiescing all file system operations
Writing dirty data to disk again
Creating snapshot.
Resuming operations.
.fi
.sp
.PP
After the command has been issued, the directory structure would appear
similar to:
.sp
.nf
/fs1/file1
/fs1/userA/file2
/fs1/userA/file3
/fs1/.snapshots/snap1/file1
/fs1/.snapshots/snap1/userA/file2
/fs1/.snapshots/snap1/userA/file3
/fs1/.snapshots/snap2/file1
/fs1/.snapshots/snap2/userA/file2
/fs1/.snapshots/snap2/userA/file3
.fi
.sp
.SH "See also"
.PP
mmdelsnapshot Command
.PP
mmlssnapshot Command
.PP
mmrestorefs Command
.PP
mmsnapdir Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
is issk괶 >J4@$        i+  AAA             i+      ./usr/share/man/man8/mmcrvsd.8 o         $          $          .TH mmcrvsd 11/01/04
mmcrvsd Command
.SH "Name"
.PP
\fBmmcrvsd\fR - Creates virtual shared disks for use by
GPFS.
.SH "Synopsis"
.PP
\fBmmcrvsd\fR [\fB-f\fR \fIFanoutNumber\fR]
[\fB-y\fR] [\fB-c\fR] \fB-F\fR \fIDescFile\fR
.SH "Description"
.PP
The \fBmmcrvsd\fR command can be used to create virtual shared
disks for subsequent use by the \fBmmcrnsd\fR
command. Virtual shared disks created with \fBmmcrvsd\fR follow the
convention of one local volume group, one local logical volume, one global
volume group, and one virtual shared disk per physical volume. After
the virtual shared disk is created, it is configured and started on each node
with a defined virtual shared disk adapter. See the
\fBupdatevsdnode\fR command in the correct manual for your environment
at: publib.boulder.ibm.com/clresctr/windows/ 
 public/rsctbooks.html.
.PP
Where possible, the \fBmmcrvsd\fR command creates and starts virtual
shared disk components in parallel. For instance, when multiple
physical disk servers are specified in the disk descriptor file, their LVM
components are created in parallel. Starting of all virtual shared
disks, on all nodes, always occurs in parallel.
.PP
The \fBmmcrvsd\fR command may also be restarted should one of the steps
fail. See Error recovery.
.PP
\fBResults\fR
.PP
Upon successful completion of the \fBmmcrvsd\fR command: 
.RS +3
.HP 3
\(bu Virtual shared disks are created.
.sp
If a desired name \fBvsdname\fR is specified on the disk descriptor,
\fBmmcrvsd\fR uses that name for the name of the virtual shared
disk. If a desired name is not specified, the virtual shared disk is
assigned a name according to the convention:
.sp
.RS +3
\fBgpfs\fINN\fRvsd
\fR
.RE
.RS +9
where \fINN\fR is a unique non-negative integer not used in any prior
virtual shared disk named with this convention.
.RE
.HP 3
\(bu Virtual shared disks are synchronously started on all nodes.
.HP 3
\(bu Appropriate global volume groups, logical volumes, and local volume groups
are created.
.sp
If a desired name \fBvsdname\fR is specified on the disk descriptor,
\fBmmcrvsd\fR uses that name as the basis for the names of the global
volume group, local logical volume, and local volume group according to the
convention:
.sp
.RS +3
\fBvsdnamegvg
\fR
.RE
.RS +9
the global volume group
.RE
.sp
.RS +3
\fBvsdnamelv
\fR
.RE
.RS +9
the local logical volume
.RE
.sp
.RS +3
\fBvsdnamevg
\fR
.RE
.RS +9
the local volume group
.RE
.sp
If a desired name is not specified, the global volume group, local logical
volume, and local volume group for the virtual shared disk are named according
to the convention:
.sp
.RS +3
\fBgpfs\fINN\fRgvg
\fR
.RE
.RS +9
the global volume group
.RE
.sp
.RS +3
\fBgpfs\fINN\fRlv
\fR
.RE
.RS +9
the local logical volume
.RE
.sp
.RS +3
\fBgpfs\fINN\fRvg
\fR
.RE
.RS +9
the local volume group
.RE
.sp
where gpfs\fINN\fRvsd was the name chosen for the virtual shared
disk.
.HP 3
\(bu The primary server is configured and the volume group is varied
there.
.HP 3
\(bu The backup server is configured and the volume group is imported there,
but varied off.
.HP 3
\(bu The \fIDescFile\fR file is rewritten to contain the created virtual
shared disk names in place of any disk descriptors containing physical disk or
vpath names. Primary and backup servers are omitted from the rewritten
disk descriptor and all other fields, if specified, are copied without
modification. The rewritten disk descriptor file can then be used as
input to the \fBmmcrnsd\fR command.
.RE
.PP
\fBError recovery\fR
.PP
Each step of the \fBmmcrvsd\fR process is enumerated during command
execution. For example at step 1, the \fBmmcrvsd\fR command
prints:
.sp
.nf
Step \fI1\fR: Setting up environment
.fi
.sp
.PP
As each step is started, its corresponding number is recorded in the
\fIDescFile\fR file as a comment at the end. This comment serves as
restart information to subsequent invocations of the \fBmmcrvsd\fR
command. For example at step one, the recorded comment would be:
.sp
.nf
#MMCRVSD_STEP=\fI1\fR
.fi
.sp
.PP
Upon failure, appropriate error messages from the failing system component
are displayed along with \fBmmcrvsd\fR error messages.
.PP
After correcting the failing condition and restarting the \fBmmcrvsd\fR
command with the same descriptor file, the command prompts you to restart at
the last failing step. For example, if a prior invocation of
\fBmmcrvsd\fR failed at step one, the prompt would be:
.sp
.nf
A prior invocation of this command has recorded a partial
completion in the file (/tmp/\fIDescFile\fR).
Should we restart at prior failing step(\fI1\fR)?[y]/n=>
.fi
.sp
.PP
The default response is \fBy\fR; yes restart at the prior failing
step.
.SH "Parameters"
.PP
.RS +3
\fB-F \fIDescFile\fR
\fR
.RE
.RS +9
The file containing the list of disk descriptors, one per line, in the
form:
.sp
.nf
DiskName:PrimaryServer:BackupServer:DiskUsage:FailureGroup:DesiredName
.fi
.sp
.PP
.RS +3
\fB\fIDiskName\fR
\fR
.RE
.RS +9
The device name of the disk you want to use to create a virtual shared
disk. This can be either an hdisk name or a vpath name for an SDD
device. GPFS performance and recovery processes function best with one
disk per virtual shared disk. If you want to create virtual shared
disks with more than one disk, refer to the correct manual for your
environment at: publib.boulder.ibm.com/clresctr/windows/public/rsctbooks.html
.RE
.PP
.RS +3
\fBPrimaryServer
\fR
.RE
.RS +9
The name of the virtual shared disk server node. This can be in any
recognizable form.
.RE
.PP
.RS +3
\fBBackupServer
\fR
.RE
.RS +9
The backup server name. This can be specified in any recognizable
form or allowed to default to none.
.RE
.PP
.RS +3
\fB\fIDisk Usage\fR
\fR
.RE
.RS +9
Specify a disk usage or accept the default (see \fIGeneral Parallel File System:
Concepts, Planning and Installation Guide\fR and search on \fIrecoverability considerations\fR).
This field is ignored by the \fBmmcrvsd\fR command and is passed unchanged to the output descriptor file produced by the
\fBmmcrvsd\fR command.  
.PP
.RS +3
\fBdataAndMetadata
\fR
.RE
.RS +9
Indicates that the disk contains both data and metadata. This is
the default.
.RE
.PP
.RS +3
\fBdataOnly
\fR
.RE
.RS +9
Indicates that the disk contains data and does not contain
metadata.
.RE
.PP
.RS +3
\fBmetadataOnly
\fR
.RE
.RS +9
Indicates that the disk contains metadata and does not contain
data.
.RE
.PP
.RS +3
\fBdescOnly
\fR
.RE
.RS +9
Indicates that the disk contains no data and no metadata. Such a
disk is used solely to keep a copy of the file system descriptor, and can be
used as a third failure group in certain disaster recovery
configurations.
.RE
.PP
.PP
\fBDisk usage considerations: \fR
.RS +3
.sp
.HP 3
1. The \fIDiskUsage\fR parameter is not utilized by the
\fBmmcrvsd\fR command but is copied intact to the output file that the
command produces. The output file may then be used as input to the \fBmmcrnsd\fR command.
.sp
.HP 3
2. RAID devices are not well-suited for performing small block writes.
Since GPFS metadata writes are often smaller than a full block, you may find
using non-RAID devices for GPFS metadata better for performance.
.RE
.RE
.PP
.RS +3
\fB\fIFailure Group\fR
\fR
.RE
.RS +9
A number identifying the failure group to which this disk belongs.
You can specify any value from -1 (where -1 indicates that the disk has no
point of failure in common with any other disk) to 4000. All disks that
have a common point of failure, such as all disks that are attached to the
same virtual shared disk server node, should be placed in the same failure
group. The value is passed unchanged to the output descriptor file produced
by the \fBmmcrvsd\fR command. 
If you do not specify a failure group, a failure group will be
assigned later by the \fBmmcrnsd\fR command. 
.RE
.PP
.RS +3
\fB\fIDesiredName\fR
\fR
.RE
.RS +9
Specify the name you desire for the virtual shared disk to be
created. This name must not already be used as another GPFS or AIX disk
name, and it must not begin with the reserved string 'gpfs'.
.RS +3
\fBNote:\fR
.RE
.RS +9
This name can contain only the following characters: 'A' through 'Z', 'a' through 'z', '0' through '9', or '_' (the underscore).
All other characters are not valid. The maximum size of this name is 13
characters.
.RE
.RE
.RE
.SH "Options"
.PP
.RS +3
\fB-f \fIFanoutNumber\fR
\fR
.RE
.RS +9
The maximum number of concurrent nodes to communicate with during parallel
operations. The default value is 10.
.RE
.PP
.RS +3
\fB-y
\fR
.RE
.RS +9
Specifies no prompting for any queries the command may produce. All
default values are accepted.
.RE
.PP
.RS +3
\fB-c
\fR
.RE
.RS +9
Specifies to create Concurrent Virtual Shared Disks. This option is
valid only for disk descriptors that specify both a primary and a backup
virtual shared disk server.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmcrvsd\fR command.
.PP
You may issue the \fBmmcrvsd\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory, normally \fB/root\fR, on each
node in the GPFS cluster. If you have designated the use of a different
remote communication program on either the \fBmmcrcluster\fR or the \fBmmchcluster\fR command, you must ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To create a virtual shared disk with the descriptor file \fBvsdesc\fR
containing: 
.sp
.nf
 hdisk2:k145n01:k145n02:dataOnly:4
 hdisk3:k145n06::dataAndMetadata:5:ABC
.fi
.sp
.PP
These descriptors translate as:
.PP
.RS +3
\fBDisk Name
\fR
.RE
.RS +9
hdisk2
.RE
.PP
.RS +3
\fBServer Name
\fR
.RE
.RS +9
k145n01
.RE
.PP
.RS +3
\fBBackup Server Name
\fR
.RE
.RS +9
k145n02
.RE
.PP
.RS +3
\fBDisk Usage
\fR
.RE
.RS +9
\fBdataOnly\fR
.RE
.PP
.RS +3
\fBFailure Group
\fR
.RE
.RS +9
4
.RE
.PP
and 
.PP
.RS +3
\fBDisk Name
\fR
.RE
.RS +9
hdisk3
.RE
.PP
.RS +3
\fBServer Name
\fR
.RE
.RS +9
k145n06
.RE
.PP
.RS +3
\fBBackup Server Name
\fR
.RE
.RS +9
none
.RE
.PP
.RS +3
\fBDisk Usage
\fR
.RE
.RS +9
\fBdataAndMetadata\fR
.RE
.PP
.RS +3
\fBFailure Group
\fR
.RE
.RS +9
5
.RE
.PP
.RS +3
\fBDesired Name
\fR
.RE
.RS +9
ABC
.RE
.PP
The low level components of the virtual shared disk \fBgpfs20vsd\fR are
created:
.PP
.RS +3
\fBgpfs20gvg
\fR
.RE
.RS +9
global volume group
.RE
.PP
.RS +3
\fBgpfs20lv
\fR
.RE
.RS +9
local logical volume
.RE
.PP
.RS +3
\fBgpfs20vg
\fR
.RE
.RS +9
local volume group
.RE
.PP
The low level components of the virtual shared disk \fBABC\fR are
created:
.PP
.RS +3
\fBABCgvg
\fR
.RE
.RS +9
global volume group
.RE
.PP
.RS +3
\fBABClv
\fR
.RE
.RS +9
local logical volume
.RE
.PP
.RS +3
\fBABCvg
\fR
.RE
.RS +9
local volume group
.RE
.SH "See also"
.PP
mmcrnsd Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
n smallk @J4@$          	A	A	A                   ./usr/share/man/man8/mmdefedquota.8 .3 n         $          $          .TH mmdefedquota 11/01/04
mmdefedquota Command
.SH "Name"
.PP
\fBmmdefedquota\fR - Sets default quota limits to a file
system.
.SH "Synopsis"
.PP
\fBmmdefedquota\fR {\fB-u\fR | \fB-g\fR} \fIDevice\fR
.SH "Description"
.PP
Use the \fBmmdefedquota\fR command to set or change default quota limits
for new users and groups of a file system. Default quota limits for a
file system may be set or changed only if the file system was created with the
\fB-Q yes\fR option on the \fBmmcrfs\fR command or
changed with the \fBmmchfs\fR command, and default
quotas have been activated by issuing the \fBmmdefquotaon\fR command.
.PP
The \fBmmdefedquota\fR command displays the current values for these
limits, if any, and prompts you to enter new values using your default
editor:
.RS +3
.HP 3
\(bu Current block usage (display only)
.HP 3
\(bu Current inode usage (display only)
.HP 3
\(bu Inode soft limit
.HP 3
\(bu Inode hard limit
.HP 3
\(bu Block soft limit
.sp
Displayed in \fBKB\fR, but may be specified using \fBk\fR,
\fBK\fR, \fBm\fR, or \fBM\fR. If no suffix is provided, the
number is assumed to be in \fBbytes\fR.
.HP 3
\(bu Block hard limit
.sp
Displayed in \fBKB\fR, but may be specified using \fBk\fR,
\fBK\fR, \fBm\fR, or \fBM\fR. If no suffix is provided, the
number is assumed to be in \fBbytes\fR.
.RE
.RS +3
\fBNote:\fR
.RE
.RS +9
A block or inode limit of 0 indicates no limit.
.RE
.PP
The \fBmmdefedquota\fR command waits for the edit window to be closed
before checking and applying new values. If an incorrect entry is made,
you must reissue the command and enter the correct values.
.PP
When setting quota limits for a file system, replication within the file
system should be considered. GPFS quota management takes replication
into account when reporting on and determining if quota limits have been
exceeded for both block and file usage. In a file system that has
either type of replication set to a value of two, the values reported on by
both the \fBmmlsquota\fR command and the \fBmmrepquota\fR command are double the value reported
by the \fBls\fR command.
.PP
The EDITOR environment variable must contain a complete path name, for
example:
.sp
.nf
export EDITOR=/bin/vi
.fi
.sp
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to have default quota values set
for.
.PP
File system names need not be fully-qualified. \fBfs0\fR is
just as acceptable as \fB/dev/fs0\fR.
.RE
.SH "Options"
.PP
.RS +3
\fB-g 
\fR
.RE
.RS +9
Specifies that the default quota value is to be applied for new groups
accessing the specified file system.
.RE
.PP
.RS +3
\fB-u 
\fR
.RE
.RS +9
Specifies that the default quota value is to be applied for new users
accessing the specified file system.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmdefedquota\fR
command.
.PP
You must specify either the \fB-u\fR or the \fB-g\fR option.
.PP
GPFS must be running on the node from which the \fBmmdefedquota\fR
command is issued.
.SH "Examples"
.PP
To set default quotas for new users of the file system \fBfs0\fR,
enter:
.sp
.nf
mmdefedquota -u fs0
.fi
.sp
.PP
The system displays information in your default editor similar to:
.sp
.nf
*** Edit default quota limits for users:
NOTE: block limits will be rounded up to the next multiple
      of the block size.
fs0: blocks in use: 0K, limits (soft = 2500K , hard = 10M)
inodes in use: 0, limits (soft = 100, hard = 1000)
.fi
.sp
.PP
To confirm the change, enter:
.sp
.nf
mmlsquota -d -u
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
                    Block Limits              |         File Limits
Filesystem type KB quota limit in_doubt grace |files quota limit in_doubt grace
fs0        USR  0K 2500  10M    0        none |  0     100  1000    0     none
fs1        USR  no default limits
fs2        USR  no default limits
.fi
.sp
.SH "See also"
.PP
mmcheckquota Command
.PP
mmdefquotaoff Command
.PP
mmdefquotaon Command
.PP
mmedquota Command
.PP
mmlsquota Command
.PP
mmquotaoff Command
.PP
mmrepquota Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
H "SynokB BJ4@$          AAA                   ./usr/share/man/man8/mmdefquotaoff.8 3 n         $          $          .TH mmdefquotaoff 11/01/04
mmdefquotaoff Command
.SH "Name"
.PP
\fBmmdefquotaoff\fR - Deactivates default quota limit usage for a
file system.
.SH "Synopsis"
.PP
\fBmmdefquotaoff\fR [\fB-u\fR] [\fB-g\fR]
[\fB-v\fR] {\fIDevice\fR[ \fIDevice\fR
\&.\&.\&. ] | \fB-a\fR}
.SH "Description"
.PP
The \fBmmdefquotaoff\fR command deactivates default quota limits for
file systems. If default quota limits are deactivated, new users or
groups for that file system will then have a default quota limit of 0,
indicating no limit.
.PP
If neither the \fB-u\fR or the \fB-g\fR option is specified, the
\fBmmdefquotaoff\fR command deactivates both user and group default
quotas.
.PP
If the \fB-a\fR option is not used, \fIDevice\fR must be the last
parameter specified.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to have default quota values
deactivated.
.PP
If more than one file system is listed, the names must be delimited by a
space. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR.
.RE
.SH "Options"
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
Deactivates default quotas for all GPFS file systems in the
cluster. When used in combination with the \fB-g \fR option, only
group quotas are deactivated. When used in combination with the
\fB-u\fR option, only user quotas are deactivated.
.RE
.PP
.RS +3
\fB-g 
\fR
.RE
.RS +9
Specifies that default quotas for groups are to be deactivated.
.RE
.PP
.RS +3
\fB-u 
\fR
.RE
.RS +9
Specifies that default quotas for users are to be deactivated.
.RE
.PP
.RS +3
\fB-v
\fR
.RE
.RS +9
Prints a message for each file system in which default quotas are
deactivated.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmdefquotaoff\fR
command.
.PP
GPFS must be running on the node from which the \fBmmdefquotaoff\fR
command is issued.
.SH "Examples"
.RS +3
.HP 3
1. To deactivate default user quotas on file system \fBfs0\fR,
enter:
.sp
.nf
mmdefquotaoff -u fs0
.fi
.sp
To confirm the change, enter:
.sp
.nf
mmlsquota -d -u
.fi
.sp
The system displays information similar to:
.sp
.nf
                          Block Limits        |         File Limits
Filesystem type KB quota limit in_doubt grace | files quota limit in_doubt grace
fs0        USR  no default limits
.fi
.sp
.HP 3
2. To deactivate default group quotas on all file systems, enter:
.sp
.nf
mmdefquotaoff -g -a
.fi
.sp
To confirm the change, enter:
.sp
.nf
mmlsquota  -d -g
.fi
.sp
The system displays information similar to:
.sp
.nf
Default Block Limits        | Default File Limits
Filesystem type quota limit | quota limit\ 
fs0:       GRP no default limits
fs1:       GRP no default limits
fs2:       GRP no default limits\ 
.fi
.sp
.RE
.SH "See also"
.PP
mmcheckquota Command
.PP
mmdefedquota Command
.PP
mmdefquotaon Command
.PP
mmedquota Command
.PP
mmlsquota Command
.PP
mmquotaoff Command
.PP
mmrepquota Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
3b/buik(D DJ4@$          AAA                   ./usr/share/man/man8/mmdefquotaon.8  3 n         $          $          .TH mmdefquotaon 11/01/04
mmdefquotaon Command
.SH "Name"
.PP
\fBmmdefquotaon\fR - Activates default quota limit usage for a
file system.
.SH "Synopsis"
.PP
\fBmmdefquotaon\fR [\fB-u\fR] [\fB-g\fR]
[\fB-v\fR] [\fB-d\fR] {\fIDevice\fR[
\fIDevice\fR ... ] | \fB-a\fR}
.SH "Description"
.PP
The \fBmmdefquotaon\fR command activates default quota limits for a file
system. If default quota limits are not applied, new users or groups
for that file system will have a quota limit of 0, indicating no limit.
.PP
To use default quotas, the file system must have been created or
changed with the \fB-Q yes\fR option. See the \fBmmcrfs\fR and \fBmmchfs\fR
commands.
.PP
If neither the \fB-u\fR or the \fB-g\fR option is specified, the
\fBmmdefquotaon\fR command activates both user and group default quota
limits.
.PP
If the \fB-a\fR option is not used, \fIDevice\fR must be the last
parameter specified.
.PP
Default quotas are established for new users or groups of users by issuing
the \fBmmdefedquota\fR command. Under the
\fB-d\fR option, all users without an explicitly set quota limit will have
a default quota limit assigned.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to have default quota values
activated.
.PP
If more than one file system is listed, the names must be delimited by a
space. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR.
.RE
.SH "Options"
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
Activates default quotas for all GPFS file systems in the
cluster. When used in combination with the \fB-g \fR option, only
group quotas are activated. When used in combination with the
\fB-u\fR option, only user quotas are activated.
.RE
.PP
.RS +3
\fB-d 
\fR
.RE
.RS +9
Specifies that existing users or groups with no established quota limits
will have default quota values assigned when the \fBmmdefedquota\fR command is issued.
.PP
If this option is not chosen, existing quota entries remain in effect and
are not governed by the default quota rules.
.RE
.PP
.RS +3
\fB-g 
\fR
.RE
.RS +9
Specifies that only a default quota value for group quotas are to be
activated.
.RE
.PP
.RS +3
\fB-u 
\fR
.RE
.RS +9
Specifies that only a default quota value for users are to be
activated.
.RE
.PP
.RS +3
\fB-v
\fR
.RE
.RS +9
Prints a message for each file system in which default quotas are
activated.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmdefquotaon\fR
command.
.PP
GPFS must be running on the node from which the \fBmmdefquotaon\fR
command is issued.
.SH "Examples"
.RS +3
.HP 3
1. To activate default user quotas on file system \fBfs0\fR, enter:
.sp
.nf
mmdefquotaon -u fs0
.fi
.sp
To confirm the change, enter:
.sp
.nf
mmlsfs fs0 -Q
.fi
.sp
The system displays information similar to:
.sp
.nf
flag value          description
---- -------------- ----------------------------------------
 -Q  user           Quotas enforced
     user           Default quotas enabled\ 
.fi
.sp
.HP 3
2. To activate default group quotas on all file systems in the cluster,
enter:
.sp
.nf
mmdefquotaon -g -a
.fi
.sp
To confirm the change, individually for each file system, enter:
.sp
.nf
mmlsfs  fs1 -Q
.fi
.sp
The system displays information similar to:
.sp
.nf
flag value          description
---- -------------- -----------------------------
 -Q  group          Quotas enforced
     group          Default quotas enabled
.fi
.sp
.HP 3
3. To activate both user and group default quotas on file system
\fBfs2\fR, enter:
.sp
.nf
mmdefquotaon fs2
.fi
.sp
To confirm the change, enter:
.sp
.nf
mmlsfs  fs2 -Q
.fi
.sp
The system displays information similar to:
.sp
.nf
flag value          description
---- -------------- --------------------------
 -Q  user;group     Quotas enforced
     user;group     Default quotas enabled\ 
.fi
.sp
.RE
.SH "See also"
.PP
mmcheckquota Command
.PP
mmchfs Command
.PP
mmcrfs Command
.PP
mmdefedquota Command
.PP
mmdefquotaoff Command
.PP
mmedquota Command
.PP
mmlsquota Command
.PP
mmquotaoff Command
.PP
mmrepquota Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
nopsis"kꢥ FJ4@$          AAA                   ./usr/share/man/man8/mmdefragfs.8 8  3 n         $          $          .TH mmdefragfs 11/01/04
mmdefragfs Command
.SH "Name"
.PP
\fBmmdefragfs\fR - Reduces disk fragmentation by increasing the
number of full free blocks available to the file system.
.SH "Synopsis"
.PP
\fBmmdefragfs\fR \fIDevice\fR [\fB-i\fR] [
\fB-v\fR] [\fB-u\fR \fIBlkUtilPct\fR]
.SH "Description"
.PP
Use the \fBmmdefragfs\fR command to reduce fragmentation of a file
system. The \fBmmdefragfs\fR command moves existing file system data
within a disk to make more efficient use of disk blocks. The data is
migrated to unused subblocks in partially allocated blocks, thereby increasing
the number of free full blocks.
.PP
The \fBmmdefragfs\fR command can be run against a mounted or unmounted
file system. However, best results are achieved when the file system is
unmounted. When a file system is mounted, allocation status may change
causing retries to find a suitable unused subblock.
.PP
If \fBmmdefragfs\fR is issued on a file that is locked by SANergy, the
file is not de-fragmented.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to have fragmentation reduced.
File system names need not be fully-qualified. \fBfs0\fR is as
acceptable as \fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.SH "Options"
.PP
.RS +3
\fB-i
\fR
.RE
.RS +9
Specifies to query the current disk fragmentation state of the file
system. Does not perform the actual defragmentation of the disks in the
file system.
.RE
.PP
.RS +3
\fB-u \fIBlkUtilPct\fR
\fR
.RE
.RS +9
The average block utilization goal for the disks in the file
system. The \fBmmdefragfs\fR command reduces the number of allocated
blocks by increasing the percent utilization of the remaining blocks.
The command automatically goes through multiple iterations until
\fIBlkUtilPct\fR is achieved on all of the disks in the file system or
until no progress is made in achieving \fIBlkUtilPct\fR from one iteration
to the next, at which point it exits.
.RE
.PP
.RS +3
\fB-v
\fR
.RE
.RS +9
Specifies that the output is verbose.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmdefragfs\fR command.
.PP
You may issue the \fBmmdefragfs\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on the \fBmmcrcluster\fR or the
\fBmmchcluster\fR command, you must ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.RS +3
.HP 3
1. To query the fragmentation state of file system \fBfs0\fR,
enter:
.sp
.nf
mmdefragfs fs0 -i
.fi
.sp
.sp
The system displays information similar to:
.sp
.nf
                         free subblk    free\ 
disk            disk size  in full  subblk in      %       %
name          in nSubblk   blocks   fragments free blk blk util
--------------- --------- --------- --------- --------  -------
gpfs68nsd         4390912   4270112       551   97.249   99.544
gpfs69nsd         4390912   4271360       490   97.277   99.590
                --------- --------- ---------           -------
(total)           8781824   8541472      1041            99.567
.fi
.sp
.HP 3
2. To reduce fragmentation of the file system \fBfs0\fR on all defined,
accessible disks that are not stopped or suspended, enter:
.sp
.nf
mmdefragfs fs0\ 
.fi
.sp
.sp
The system displays information similar to:
.sp
.nf
                     free subblk           free
  disk                in full            subblk in       %            %
  name               blocks    blk       framents     free blk     blk util
              before     after freed   before after before after before after
 ----------  ------------------------ ------------- ------------ ------------
  gpfs57nsd    28896     29888     31    1462   463  50.39 52.12  94.86 98.31
  gpfs60nsd    41728     43200     46    1834   362  59.49 61.59  93.55 98.66
             ---------------------  --------------               -----------
 (total)      70624     73088    77     3296      825             93.63 98.84
.fi
.sp
.HP 3
3. To reduce fragmentation of all files in the file system \fBfs1\fR
until the disks have an average block utilization percentage higher then 99%,
enter:
.sp
.nf
mmdefragfs fs1 -u 99\ 
.fi
.sp
.sp
The system displays information similar to:
.sp
.nf
WARNING: "fs1" is mounted on 1 nodes(s) and in use on 1 node(s)
       Start processing: iteration 1 Processing Pass 1 of 1
       Disk Name gpfs57nsd gpfs60nsd
   6 % complete
  12 % complete
  32 % complete
  38 % complete
  43 % complete
  75 % complete
  81 % complete
  92 % complete
  98 % complete
                   free subblk              free
  disk               in full              subblk in        %           %
  name               blocks     blk       fragments      free blk   blk util
              before     after freed   before  after before after before after\ 
  ----------  ------------------------ -------------- ------------ ------------
  gpfs57nsd     39424     40064    20     1347    589  68.75 69.87  92.48 96.59
  gpfs60nsd     50688     51456    24     1555    593  72.26 73.36  92.01 96.83\ 
               ---------------------  --------------               ------------
  (total)      90112     91520    44     2902    1182             93.63 98.84
.fi
.sp
.RE
.SH "See also"
.PP
mmdf Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
dk HJ4@$          AAA                   ./usr/share/man/man8/mmdeldisk.8  8  3 n         $          $          .TH mmdeldisk 11/01/04
mmdeldisk Command
.SH "Name"
.PP
\fBmmdeldisk\fR - Deletes disks from a GPFS file system.
.SH "Synopsis"
.PP
\fBmmdeldisk\fR \fIDevice\fR
{"\fIDiskDesc\fR[;\fIDiskDesc\fR...]"
| \fB-F\fR \fIDescFile\fR} [\fB-a\fR]
[\fB-c\fR] [\fB-r\fR] [\fB-N\fR {\fB\fIall\fR\fR | \fBmount\fR | \fINodeName\fR[,\fINodeName\fR
\&.\&.\&. ] }]
.SH "Description"
.PP
The \fBmmdeldisk\fR command migrates all data that would otherwise be
lost to the remaining disks in the file system. It then removes the
disks from the file system descriptor and optionally rebalances the file
system after removing the disks.
.PP
Run the \fBmmdeldisk\fR command when system demand is low.
.PP
I/O operations from SANergy clients must terminate before using the
\fBmmdeldisk\fR command. If not, the client applications receive an
error.
.PP
If a replacement for a failing disk is available, use the \fBmmrpldisk\fR command in order to keep the file system
balanced. Otherwise, use one of these procedures to delete a
disk:
.RS +3
.HP 3
\(bu If the disk is not failing and GPFS can still read from it:
.RS +3
.HP 3
1. Suspend the disk
.HP 3
2. Restripe to rebalance all data onto other disks
.HP 3
3. Delete the disk
.RE
.HP 3
\(bu If the disk is permanently damaged and the file system is
replicated:
.RS +3
.HP 3
1. Suspend and stop the disk.
.HP 3
2. restripe and restore replication for the file system, if possible.
.HP 3
3. Delete the disk from the file system.
.RE
.HP 3
\(bu If the disk is permanently damaged and the file system is not replicated,
or if the \fBmmdeldisk\fR command repeatedly fails, see the \fIGeneral Parallel File System: Problem Determination
Guide\fR and search for \fIDisk media failure\fR.
.RE
.PP
\fBResults\fR
.PP
Upon successful completion of the \fBmmdeldisk\fR command, these tasks
are completed:
.RS +3
.HP 3
\(bu Data that has not been replicated from the target disks is migrated to
other disks in the file system.
.HP 3
\(bu Remaining disks are rebalanced, if specified.
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to delete the disks from. File
system names need not be fully-qualified. \fBfs0\fR is as acceptable
as \fB/dev/fs0\fR. This must be the first parameter.
.RE
.PP
.RS +3
\fB"\fIDiskDesc\fR[;\fIDiskDesc\fR...]"
.PP
\fR
.RE
.RS +9
The disk descriptors of the disks to be deleted from the file
system. If there is more than one disk to be deleted, delimit each
descriptor with a semicolon (;) and enclose the list of disk descriptors
in quotation marks.
.RE
.PP
.RS +3
\fB-F \fIDescFile\fR
\fR
.RE
.RS +9
A file that contains a list of disk descriptors, one per line representing
disks, to be deleted.
.RE
.PP
.RS +3
\fB-N { \fB\fIall\fR\fR | \fBmount\fR |
\fINodeName\fR[,\fINodeName\fR ... ] }
\fR
.RE
.RS +9
Specify the nodes that participate in the restripe of the file system
after the specified disks have been removed. Valid values are: 
.PP
.RS +3
\fBall
\fR
.RE
.RS +9
Indicates that all nodes in the GPFS cluster, whether they have the
file system mounted or not, participate in the restripe. This is the
default when the \fB-N\fR option has not been specified.
.RE
.PP
.RS +3
\fBmount
\fR
.RE
.RS +9
Indicates that only the nodes that have the file system mounted
participate in the restripe of the file system.
.RE
.PP
.RS +3
\fB\fINodeName\fR[,\fINodeName\fR ... ]
\fR
.RE
.RS +9
A comma-separated list of nodes that participate in the restripe.
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.RE
.SH "Options"
.PP
.RS +3
\fB-a 
\fR
.RE
.RS +9
Specifies that the \fBmmdeldisk\fR command \fInot\fR wait for
rebalancing to complete before returning. When this flag is specified,
the \fBmmdeldisk\fR command runs asynchronously and returns after the file
system descriptor is updated and the rebalancing scan is started, but it does
not wait for rebalancing to finish. If no rebalancing is requested
(\fB-r \fR option is not specified), this option has no effect.
.RE
.PP
.RS +3
\fB-c
\fR
.RE
.RS +9
Specifies that processing continues even in the event that unreadable data
exists on the disks being deleted. Data that has not been replicated is
lost. Replicated data is not lost as long as the disks containing the
replication are accessible.
.RE
.PP
.RS +3
\fB-r 
\fR
.RE
.RS +9
Rebalance all existing files in the file system to make more efficient use
of the remaining disks.
.RS +3
\fBNote:\fR
.RE
.RS +9
Rebalancing of files is an I/O intensive and time consuming
operation, and is important only for file systems with large files that are
mostly invariant. In many cases, normal file update and creation will
rebalance your file system over time, without the cost of the
rebalancing.
.RE
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmdeldisk\fR command.
.PP
You may issue the \fBmmdeldisk\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To delete \fBgpfs2nsd\fR and \fBgpfs3nsd\fR from file system
\fBfs0\fR and rebalance the files across the remaining disks, enter:
.sp
.nf
mmdeldisk fs0 "gpfs2nsd;gpfs3nsd" -r
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
Deleting disks ...
GPFS: 6027-589 Scanning file system metadata, phase 1 ...\ 
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-589 Scanning file system metadata, phase 2 ...\ 
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-589 Scanning file system metadata, phase 3 ...\ 
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-565 Scanning user file metadata ...
   6 % complete on Wed Aug 16 16:03:25 2000
 100 % complete on Wed Aug 16 16:03:27 2000
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-370 tsdeldisk completed.
mmdeldisk: 6027-1371 Propagating the changes to all affected nodes.
This is an asynchronous process.
Restriping fs0 ...
GPFS: 6027-589 Scanning file system metadata, phase 1 ...\ 
  76 % complete on Wed Aug 16 16:03:57 2000
 100 % complete on Wed Aug 16 16:03:58 2000
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-589 Scanning file system metadata, phase 2 ...\ 
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-589 Scanning file system metadata, phase 3 ...\ 
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-565 Scanning user file metadata ...
   6 % complete on Wed Aug 16 16:04:09 2000
 100 % complete on Wed Aug 16 16:04:11 2000
GPFS: 6027-552 Scan completed successfully.
Done
.fi
.sp
.SH "See also"
.PP
mmadddisk Command
.PP
mmchdisk Command
.PP
mmlsdisk Command
.PP
mmrpldisk Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
iask JJ4@$        4
  AAA             4
      ./usr/share/man/man8/mmdelfs.8 8         $          $          .TH mmdelfs 11/01/04
mmdelfs Command
.SH "Name"
.PP
\fBmmdelfs\fR - Removes a GPFS file system.
.SH "Synopsis"
.PP
\fBmmdelfs\fR \fIDevice\fR [\fB-p\fR]
.SH "Description"
.PP
The \fBmmdelfs\fR command removes all the structures for the
specified file system from the nodes in the cluster.
.PP
Before you can delete a file system using the \fBmmdelfs\fR command, you
must unmount it on all nodes.
.PP
\fBResults\fR
.PP
Upon successful completion of the \fBmmdelfs\fR command, these tasks are
completed on all nodes:
.RS +3
.HP 3
\(bu Deletes the character device entry from \fB/dev\fR.
.HP 3
\(bu Removes the mount point directory where the file system had been
mounted.
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to be removed. File system names
need not be fully-qualified. \fBfs0\fR is as acceptable as
\fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.SH "Options"
.PP
.RS +3
\fB-p 
\fR
.RE
.RS +9
Indicates that the disks are permanently damaged and the file system
information should be removed from the GPFS cluster data even if the disks
cannot be marked as \fBavailable\fR.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmdelfs\fR command.
.PP
You may issue the \fBmmdelfs\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To delete file system \fBfs0\fR, enter:
.sp
.nf
mmdelfs fs0
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
mmdelfs: 6027-1366 Marking the disks as available
GPFS: 6027-573 All data on following disks of fs0 will be destroyed:
    gpfs9nsd
    gpfs10nsd
    gpfs15nsd
    gpfs17nsd
GPFS: 6027-574 Completed deletion of file system fs0.
mmdelfs: 6027-1371 Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.SH "See also"
.PP
mmcrfs Command
.PP
mmchfs Command
.PP
mmlsfs Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
bHk LJ4@$          $A$A$A                   ./usr/share/man/man8/mmdelnode.8  8  3 n         $          $          .TH mmdelnode 11/01/04
mmdelnode Command
.SH "Name"
.PP
\fBmmdelnode\fR Removes one or more nodes from a GPFS
cluster.
.SH "Synopsis"
.PP
\fBmmdelnode\fR {\fB-n\fR \fINodeFile\fR |
\fINodeName\fR[,\fINodeName\fR...] |
\fB-a\fR}
.SH "Description"
.PP
Use the \fBmmdelnode\fR command to delete one or more nodes from
the GPFS cluster. You may issue the \fBmmdelnode\fR command on any
GPFS node.
.PP
You must follow these rules when deleting nodes:
.RS +3
.HP 3
1. The node being deleted cannot be the primary or secondary GPFS cluster
configuration server unless you intend to delete the entire cluster.
Verify this by issuing the \fBmmlscluster\fR command. If a node to
be deleted is one of the servers and you intend to keep the cluster, issue the
\fBmmchcluster\fR command to assign another node as the server before
deleting the node.
.HP 3
2. A node being deleted cannot be the primary or backup NSD server for any
disk unless you intend to delete the entire cluster. Verify this by
issuing the \fBmmlsnsd\fR command. If a node to be deleted is an NSD
server for one or more disks, use the \fBmmchnsd\fR command to assign
another node as an NSD server for the affected disks.
.HP 3
3. Unless all nodes in the cluster are being deleted, run the
\fBmmdelnode\fR command from a node that will remain in the cluster.
.HP 3
4. Before you can delete a node, you must unmount all of the GPFS file
systems and stop GPFS on the node to be deleted.
.HP 3
5. Exercise caution when deleting quorum nodes from the GPFS cluster.
If the number of remaining quorum nodes falls below the requirement for a
quorum, you will be unable to perform file system operations. See the
\fIGeneral Parallel File System: Concepts, Planning,
and Installation Guide\fR and search for \fIquorum\fR.
.RE
.RS +3
\fBNote:\fR
.RE
.RS +9
Since each cluster is managed independently, there is no automatic
coordination and propagation of changes between clusters like there is between
the nodes within a cluster. This means that if you permanently delete
nodes that are being used as contact nodes by other GPFS clusters that can
mount your file systems, you should notify the administrators of those GPFS
clusters so that they can update their own environments.
.RE
.PP
\fBResults\fR
.PP
Upon successful completion of the \fBmmdelnode\fR command, the specified
nodes are deleted from the GPFS cluster.
.SH "Parameters"
.PP
.RS +3
\fB-n \fINodeFile\fR
\fR
.RE
.RS +9
Specifies the file containing the list of node names, one per line,
to be deleted from the cluster.
.RE
.PP
.RS +3
\fB\fINodeName\fR[,\fINodeName\fR...]
\fR
.RE
.RS +9
A comma-separated list of nodes to be deleted. The nodes must all
be members of the same GPFS cluster. 
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.SH "Options"
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
Delete all nodes in the cluster.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmdelnode\fR command.
.PP
You may issue the \fBmmdelnode\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.RS +3
.HP 3
1. To delete all of the nodes in the cluster, enter:
.sp
.nf
mmdelnode -a
.fi
.sp
The system displays information similar to:
.sp
.nf
Verifying GPFS is stopped on all affected nodes ...
mmdelnode: Command successfully completed
mmdelnode: 6027-1371 Propagating the changes to all affected
 nodes. This is an asynchronous process.
.fi
.sp
.HP 3
2. To delete nodes \fBk145n12\fR, \fBk145n13\fR, and
\fBk145n14\fR, enter:
.sp
.nf
mmdelnode k145n12,k145n13,k145n14
.fi
.sp
The system displays information similar to:
.sp
.nf
Verifying GPFS is stopped on all affected nodes ...
mmdelnode: Command successfully completed
mmdelnode: 6027-1371 Propagating the changes to all affected
nodes. This is an asynchronous process.
.fi
.sp
.RE
.SH "See also"
.PP
mmaddnode Command
.PP
mmcrcluster Command
.PP
mmchconfig Command
.PP
mmlsfs Command
.PP
mmlscluster Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
ustek NJ4@$          'A'A'A                   ./usr/share/man/man8/mmdelnsd.8          $          $          .TH mmdelnsd 11/01/04
mmdelnsd Command
.SH "Name"
.PP
\fBmmdelnsd\fR - Deletes Network Shared Disks (NSDs) from the GPFS cluster.
.SH "Synopsis"
.PP
\fBmmdelnsd\fR {\fB-F\fR \fIDiskFile\fR |
"\fIDiskName\fR[;\fIDiskName\fR]"}
.PP
Or,
.PP
\fBmmdelnsd\fR \fB-p\fR \fINSDId\fR [\fB-N\fR
\fInodeName\fR[,\fInodeName\fR]]
.SH "Description"
.PP
The \fBmmdelnsd\fR command serves two purposes:
.RS +3
.HP 3
1. Delete NSDs from the GPFS cluster.
.HP 3
2. Remove the unique NSD volume ID left on the disk after the failure
of a previous invocation of the \fBmmdelnsd\fR command. The NSD had
been successfully deleted from the GPFS cluster but there was a failure to
clear sector 2 of the disk.
.RE
.PP
The NSD being deleted cannot be part of any file system. Either the
\fBmmdeldisk\fR or \fBmmdelfs\fR command must be issued prior to deleting
the NSD from the GPFS cluster.
.PP
The NSD being deleted cannot be a tiebreaker disk. Use the
\fBmmchconfig\fR command to assign new tiebreaker disks prior to deleting
the NSD from the cluster.
.PP
\fBResults\fR
.PP
Upon successful completion of the \fBmmdelnsd\fR command, these tasks
are completed:
.RS +3
.HP 3
\(bu All references to the disk are removed from the GPFS cluster data.
.HP 3
\(bu Sector 2 of the disk is cleared of the unique NSD volume
ID.
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIDiskName\fR[;\fIDiskName\fR]
\fR
.RE
.RS +9
The names of the NSDs to be deleted from the GPFS cluster. Specify
the names generated when the NSDs were created. Use the \fBmmlsnsd -F\fR command to display disk names.
If there is more than one disk to be deleted, delimit each name with a
semicolon (;) and enclose the list of disk names in quotation
marks.
.RE
.PP
.RS +3
\fB-F \fIDiskFile\fR
\fR
.RE
.RS +9
Specifies a file containing the names of the NSDs, one per line, to be
deleted from the GPFS cluster.
.RE
.PP
.RS +3
\fB-N \fInodeName\fR
\fR
.RE
.RS +9
Specifies the list of nodes to which the disk is attached. If no
nodes are listed, the disk is assumed to be directly attached to the local
node.
.RE
.PP
.RS +3
\fB-p \fINSDId\fR
\fR
.RE
.RS +9
Specifies the NSD volume ID of an NSD that needs to be cleared from the
disk as indicated by the failure of a previous invocation of the
\fBmmdelnsd\fR command.
.RE
.SH "Options"
.PP
NONE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmdelnsd\fR command.
.PP
You may issue the \fBmmdelnsd\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To delete \fBgpfs2nsd\fR and \fBgpfs3nsd\fR from the GPFS cluster,
enter:
.sp
.nf
mmdelnsd "gpfs2nsd;gpfs3nsd"
.fi
.sp
.PP
The system displays output similar to:
.sp
.nf
mmdelnsd: Processing disk gpfs2nsd
mmdelnsd: Processing disk gpfs3nsd
mmdelnsd: 6027-1371 Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.PP
The confirm the deletion, enter:
.sp
.nf
mmlsnsd
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
 File system   Disk name    Primary node      Backup node
 --------------------------------------------------------
 fs0           gpfs16nsd    k145n06              k145n07
 fs0           gpfs17nsd    k145n06              k145n07
 fs0           gpfs18nsd    k145n06              k145n07
 fs0           gpfs19nsd    k145n06              k145n07
 fs0           gpfs20nsd    k145n06              k145n07
 fs1           gpfs1nsd    (directly attached)
 fs1           gpfs4nsd    (directly attached)\ 
\ 
.fi
.sp
.SH "See also"
.PP
mmcrnsd Command
.PP
mmlsnsd Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
nopsis"kb PJ4@$        "  (A(A(A             "      ./usr/share/man/man8/mmdelsnapshot.8 3 n         $          $          .TH mmdelsnapshot 11/01/04
mmdelsnapshot Command
.SH "Name"
.PP
\fBmmdelsnapshot\fR - Deletes a GPFS snapshot.
.SH "Synopsis"
.PP
\fBmmdelsnapshot\fR \fIDevice\fR \fIDirectory\fR
.SH "Description"
.PP
Use the \fBmmdelsnapshot\fR command to delete a GPFS snapshot.
.PP
Once the \fBmmdelsnapshot\fR command has been issued, the snapshot is
marked for deletion and cannot be recovered.
.PP
If the node from which the \fBmmdelsnapshot\fR command is issued
fails, you must reissue the command from another node in the cluster to
complete the deletion. Prior to reissuing a subsequent
\fBmmdelsnapshot\fR command, the file system may be recovered, mounted, and
updates may continue to be made and the \fBmmcrsnapshot\fR command may be issued. However,
the \fBmmrestorefs\fR and \fBmmdelsnapshot\fR
commands may not be issued on other snapshots until the present snapshot is
successfully deleted.
.PP
If the \fBmmdelsnapshot\fR command is issued while a conflicting command
is running, the \fBmmdelsnapshot\fR command waits for that command to
complete. Conflicting operations include: 
.RS +3
.HP 3
1. Other snapshot commands on the same snapshot
.HP 3
2. Adding, deleting, replacing disks in the file system
.HP 3
3. Rebalancing, repairing, reducing disk fragmentation in a file system
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system for which the snapshot is to be
deleted. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.PP
.RS +3
\fB\fIDirectory\fR
\fR
.RE
.RS +9
The snapshot subdirectory to be deleted.
.RE
.SH "Options"
.PP
NONE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmdelsnapshot\fR
command.
.PP
You may issue the \fBmmdelsnapshot\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To delete the snapshot \fBsnap1\fR, for the file system \fBfs1\fR,
enter:
.sp
.nf
mmdelsnapshot fs1 snap1
.fi
.sp
.PP
The output is similar to this:
.sp
.nf
Deleting snapshot files...
Delete snapshot snap1 complete, err = 0
.fi
.sp
.PP
Before issuing the command, the directory structure would appear similar
to:
.sp
.nf
/fs1/file1
/fs1/userA/file2
/fs1/userA/file3
/fs1/.snapshots/snap1/file1
/fs1/.snapshots/snap1/userA/file2
/fs1/.snapshots/snap1/userA/file3
.fi
.sp
.PP
After the command has been issued, the directory structure would appear
similar to:
.sp
.nf
/fs1/file1
/fs1/userA/file2
/fs1/userA/file3
/fs1/.snapshots
.fi
.sp
.SH "See also"
.PP
mmcrsnapshot Command
.PP
mmlssnapshot Command
.PP
mmrestorefs Command
.PP
mmsnapdir Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP

mmdelkJg RJ4@$          *A*A*A                   ./usr/share/man/man8/mmdf.8 apsh         $          $          .TH mmdf 11/01/04
mmdf Command
.SH "Name"
.PP
\fBmmdf\fR - Queries available file space on a GPFS file
system.
.SH "Synopsis"
.PP
\fBmmdf\fR \fIDevice\fR [\fB-d\fR | \fB-F\fR |
\fB-m\fR] [\fB-q\fR]
.SH "Description"
.PP
Use the \fBmmdf\fR command to display available file space on a GPFS
file system.
.PP
For each disk in the GPFS file system, the \fBmmdf\fR command displays
this information, by failure group:
.RS +3
.HP 3
\(bu The size of the disk.
.HP 3
\(bu The failure group of the disk.
.HP 3
\(bu Whether the disk is used to hold data, metadata, or both.
.HP 3
\(bu Available space in full blocks.
.HP 3
\(bu Available space in fragments.
.RE
.PP
Displayed values are rounded down to a multiple of 1024 bytes. If
the fragment size used by the file system is not a multiple of 1024 bytes,
then the displayed values may be lower than the actual values. This can
result in the display of a total value that exceeds the sum of the rounded
values displayed for individual disks. The individual values are
accurate if the fragment size is a multiple of 1024 bytes.
.PP
For the file system, the \fBmmdf\fR command displays:
.RS +3
.HP 3
\(bu The total number of inodes and the number available.
.RE
.PP
The \fBmmdf\fR command may be run against a mounted or unmounted file
system. 
.RS +3
\fBNote:\fR
.RE
.RS +9
The command is I/O intensive and should be run when the system load is
light.
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to be queried for available file
space. File system names need not be fully-qualified.
\fBfs0\fR is as acceptable as \fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.SH "Options"
.PP
.RS +3
\fB-d
\fR
.RE
.RS +9
List only disks that can hold data.
.RE
.PP
.RS +3
\fB-F
\fR
.RE
.RS +9
List the number of inodes and how many of them are free.
.RE
.PP
.RS +3
\fB-m
\fR
.RE
.RS +9
List only disks that can hold metadata.
.RE
.PP
.RS +3
\fB-q
\fR
.RE
.RS +9
For quick access to the file space information, list the data collected
from the most recent synchronization period.
.PP
With this flag, the values recorded by the \fBmmdf\fR command are
synchronized only at the last invocation of the \fBsyncd\fR daemon on the
node where the command is issued.
.RE
.PP
The default is to list all disks.
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
If you are a root user:
.RS +3
.HP 3
1. You may issue the \fBmmdf \fR command from any node in the GPFS
cluster.
.HP 3
2. When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or \fBmmchcluster\fR command, you must ensure:
.RS +3
.HP 3
a. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
b. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.RE
.PP
If you are a non-root user, you may specify only file systems that
belong to the same cluster as the node on which the \fBmmdf \fR command was
issued.
.SH "Examples"
.RS +3
.HP 3
1. To query all disks in the \fBfs1\fR file system that can hold data,
enter:
.sp
.nf
mmdf fs1 -d
.fi
.sp
.sp
The system displays information similar to:
.sp
.nf
disk    disk size failure holds   holds  free KB in    free KB
name        in KB  group metadata data  full blocks in fragments
------- --------- ------ -------  ---- -------------------------
gpfs1nsd  8886720  4003   yes     yes  8733184 (98%)   632 (0%)\ 
gpfs2nsd  8886720  4003   yes     yes  8879360 (100%)  440 (0%)\ 
gpfs3nsd  8886720  4003   yes     yes  8884224 (100%)  440 (0%)\ 
gpfs4nsd  8886720  4003   yes     yes  8884224 (100%)  440 (0%)\ 
gpfs5nsd  8886720  4003   yes     yes  8884224 (100%)  440 (0%)\ 
        ---------                      -------------- ---------
(total)  44433600                     44265216 (100%) 2392 (0%)
.fi
.sp
.HP 3
2. To query all disks in the \fBfs1\fR file system that can hold metadata,
enter:
.sp
.nf
mmdf fs1 -m
.fi
.sp
.sp
The system displays information similar to:
.sp
.nf
disk    disk size failure holds   holds  free KB in     free KB
name        in KB group  metadata data  full blocks in fragments
------- --------- ------ -------- ----  ----------- ------------
gpfs1nsd  8886720  4003   yes     yes  8733184 (98%)    632 (0%)\ 
gpfs2nsd  8886720  4003   yes     yes  8879360 (100%)   440 (0%)\ 
gpfs3nsd  8886720  4003   yes     yes  8884224 (100%)   440 (0%)\ 
gpfs4nsd  8886720  4003   yes     yes  8884224 (100%)   440 (0%)\ 
gpfs5nsd  8886720  4003   yes     yes  8884224 (100%)   440 (0%)\ 
        ---------                      --------------  ---------
(total)  44433600                     44265216 (100%)  2392 (0%)
.fi
.sp
.HP 3
3. To query \fBfs1\fR for inode information, enter:
.sp
.nf
mmdf fs1 -F
.fi
.sp
.sp
The system displays information similar to:
.sp
.nf
Inode Information
------------------
Total number of inodes: 33792
Total number of free inodes: 33752
.fi
.sp
.RE
.SH "See also"
.PP
mmchfs Command
.PP
mmcrfs Command
.PP
mmdelfs Command
.PP
mmlsfs Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
k TJ4@$           AAA                    ./usr/share/man/man8/mmedquota.8 t.8 3 n         $          $          .TH mmedquota 11/01/04
mmedquota Command
.SH "Name"
.PP
\fBmmedquota\fR - Sets quota limits.
.SH "Synopsis"
.PP
\fBmmedquota\fR {\fB-u\fR [\fB-p\fR \fIProtoUser\fR]
\fIUser\fR... | \fB-g\fR [\fB-p\fR
\fIProtoGroup\fR] \fIGroup\fR... | \fB-d\fR
{\fB-u\fR \fIUser\fR... | \fB-g\fR
\fIGroup\fR...} | \fB-t\fR {\fB-u\fR |
\fB-g\fR}}
.SH "Description"
.PP
The \fBmmedquota\fR command serves two purposes:
.RS +3
.HP 3
1. Sets or changes quota limits or grace periods for users and groups
in the cluster from which the command is issued.
.HP 3
2. Reestablishes user or group default quotas for all file systems with
default quotas enabled in the cluster.
.RE
.PP
The \fBmmedquota\fR command displays the current values for these
limits, if any, and prompts you to enter new values using your default
editor:
.RS +3
.HP 3
\(bu Current block usage (display only)
.HP 3
\(bu Current inode usage (display only)
.HP 3
\(bu Inode soft limit
.HP 3
\(bu Inode hard limit
.HP 3
\(bu Block soft limit
.sp
Displayed in \fBKB\fR, but may be specified using \fBg\fR,
\fBG\fR, \fBk\fR, \fBK\fR, \fBm\fR, or \fBM\fR. If no
suffix is provided, the number is assumed to be in \fBbytes\fR.
.HP 3
\(bu Block hard limit 
.sp
Displayed in \fBKB\fR, but may be specified using \fBg\fR,
\fBG\fR, \fBk\fR, \fBK\fR, \fBm\fR, or \fBM\fR. If no
suffix is provided, the number is assumed to be in \fBbytes\fR.
.RE
.RS +3
\fBNote:\fR
.RE
.RS +9
A block or inode limit of 0 indicates no limit.
.RE
.PP
The \fBmmedquota\fR command waits for the edit window to be closed
before checking and applying new values. If an incorrect entry is made,
you must reissue the command and enter the correct values.
.PP
You can also use the \fBmmedquota\fR command to change the file
system-specific grace periods for block and file usage if the default of one
week is unsatisfactory. The grace period is the time during which users
can exceed the soft limit. If the user or group does not reduce usage
below the soft limit before the grace period expires, the soft limit becomes
the new hard limit.
.PP
When setting quota limits for a file system, replication within the file
system should be considered. GPFS quota management takes replication
into account when reporting on and determining if quota limits have been
exceeded for both block and file usage. In a file system that has
either type of replication set to a value of two, the values reported by both
the \fBmmlsquota\fR command and the \fBmmrepquota\fR command are double the value reported
by the \fBls\fR command.
.PP
The EDITOR environment variable must contain a complete path name, for
example:
.sp
.nf
export EDITOR=/bin/vi
.fi
.sp
.SH "Parameters"
.PP
.RS +3
\fB\fIUser\fR
\fR
.RE
.RS +9
Name or user id of target user for quota editing.
.RE
.PP
.RS +3
\fB\fIGroup\fR
\fR
.RE
.RS +9
Name or group id of target group for quota editing.
.RE
.SH "Options"
.PP
.RS +3
\fB-d
\fR
.RE
.RS +9
Reestablish default quota limits for a specific user or group that has had
an explicit quota limit set by a previous invocation of the \fBmmedquota\fR
command.
.RE
.PP
.RS +3
\fB-g
\fR
.RE
.RS +9
Sets quota limits or grace times for groups.
.RE
.PP
.RS +3
\fB-p  
\fR
.RE
.RS +9
Applies already-established limits to a particular user or group.
.PP
When invoked with the \fB-u\fR option, \fIProtoUser\fR limits are
automatically applied to the specified \fIUser\fR or space-delimited list
of users.
.PP
When invoked with the \fB-g\fR option, \fIProtoGroup\fR limits are
automatically applied to the specified \fIGroup\fR or space-delimited list
of groups.
.PP
You can specify any user as a \fIProtoUser\fR for another \fIUser\fR,
or any group as a \fIProtoGroup\fR for another \fIGroup\fR.
.RE
.PP
.RS +3
\fB-u 
\fR
.RE
.RS +9
Sets quota limits or grace times for users.
.RE
.PP
.RS +3
\fB-t
\fR
.RE
.RS +9
Sets grace period during which quotas can exceed the soft limit before it
is imposed as a hard limit. The default grace period is one
week.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmedquota\fR command.
.PP
GPFS must be running on the node from which the \fBmmedquota\fR command
is issued.
.SH "Examples"
.RS +3
.HP 3
1. To set user quotas for userid \fBpaul\fR, enter:
.sp
.nf
mmedquota -u paul
.fi
.sp
The system displays information in your default editor similar to:
.sp
.nf
*** Edit quota limits for USR paul:
NOTE: block limits will be rounded up to the next multiple of
the block size.  block units may be: K, M, or G
gpfs0: blocks in use: 864K, limits (soft = 2500K , hard = 10M)
inodes in use: 9, limits (soft = 100, hard = 1000)
.fi
.sp
.HP 3
2. To reset default group quota values for the group blueteam,
enter:
.sp
.nf
mmedquota -d -g blueteam
.fi
.sp
To verify the change, enter:
.sp
.nf
mmrepquota -q fs1
.fi
.sp
The system displays information similar to:
.sp
.nf
fs1: USR quota is on; default quota is on
fs1: GRP quota is on; default quota is on
.fi
.sp
.HP 3
3. To change the grace periods for all users, enter:
.sp
.nf
mmedquota -t -u
.fi
.sp
The system displays information in your default editor similar to:
.sp
.nf
*** Edit grace times:
Time units may be : days, hours, minutes, or seconds
Grace period before enforcing soft limits for USRs:
gpfs0: block grace period: 7 days, file grace period: 7 days
.fi
.sp
.RE
.SH "See also"
.PP
mmcheckquota Command
.PP
mmdefedquota Command
.PP
mmdefquotaoff Command
.PP
mmdefquotaon Command
.PP
mmlsquota Command
.PP
mmquotaon Command
.PP
mmquotaoff Command
.PP
mmrepquota Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
kX? VJ4@$        W  AAA             W      ./usr/share/man/man8/mmexportfs.8 .8 3 n         $          $          .TH mmexportfs 11/01/04
mmexportfs Command
.SH "Name"
.PP
\fBmmexportfs\fR - Retrieves the information needed to move a file
system to a different cluster.
.SH "Synopsis"
.PP
\fBmmexportfs\fR {\fIdevice\fR | \fBall\fR} \fB-o\fR
\fIExportFilesysData\fR
.SH "Description"
.PP
The \fBmmexportfs\fR command, in conjunction with the \fBmmimportfs\fR command, can be used to move one or more
GPFS file systems from one GPFS cluster to another GPFS cluster or to
temporarily remove file systems from the cluster and restore them back at a
later time. The \fBmmexportfs\fR command retrieves all relevant file
system and disk information and stores it in the file specified with the
\fB-o\fR parameter. This file must later be provided as input to the
\fBmmimportfs\fR command. When running the
\fBmmexportfs\fR command, the file system must be unmounted on all
nodes.
.PP
When \fBall\fR is specified in place of a file system name, any disks
that are not associated with a file system will be exported as well.
.PP
Exported file systems remain unusable until they are imported back with the
\fBmmimportfs\fR command to the same or a different
GPFS cluster.
.SH "Results"
.PP
Upon successful completion of the \fBmmexportfs\fR command, all
configuration information pertaining to the exported file system and its disks
is removed from the configuration data of the current GPFS cluster and is
stored in the user specified file \fIExportFilesysData\fR.
.SH "Parameters"
.PP
.RS +3
\fB\fIdevice\fR | all
\fR
.RE
.RS +9
The device name of the file system to be exported. File system
names need not be fully-qualified. \fBfs0\fR is as acceptable as
\fB/dev/fs0\fR. Specify \fBall\fR to export all GPFS file
systems, as well as all disks that do not belong to a file system yet.
This must be the first parameter.
.RE
.PP
.RS +3
\fB-o \fIExportFilesysData\fR
\fR
.RE
.RS +9
The path name of a file to which the file system information is to be
written. This file must be provided as input to the subsequent \fBmmimportfs\fR command.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmexportfs\fR command.
.PP
You may issue the \fBmmexportfs\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or \fBmmchcluster\fR command, you must ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To export all file systems in the current cluster, enter:
.sp
.nf
mmexportfs all -o /u/admin/exportfile
.fi
.sp
.PP
The output is similar to this:
.sp
.nf
mmexportfs: Processing file system fs1 ...
mmexportfs: Processing file system fs2 ...
mmexportfs: Processing disks that do not belong to any file system ...
mmexportfs: 6027-1371 Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.SH "See also"
.PP
mmimportfs Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
kk XJ4@$        -  AAA             -      ./usr/share/man/man8/mmfsck.8 s.         $          $          .TH mmfsck 11/01/04
mmfsck Command
.SH "Name"
.PP
\fBmmfsck\fR - Checks and repairs a GPFS file system.
.SH "Synopsis"
.PP
\fBmmfsck\fR \fIDevice\fR [\fB-n\fR | \fB-y\fR]
[\fB-c\fR] [\fB-o\fR] [\fB-t\fR
\fIDirectory\fR] [\fB-v\fR | \fB-V\fR]
[\fB-N\fR {\fB\fIall\fR\fR | \fBmount\fR |
\fINodeName\fR[,\fINodeName\fR ... ]
}]
.PP
The file system must be unmounted before you can run the \fBmmfsck\fR
command with any option other than \fB-o\fR.
.PP
\fBDescription\fR
.PP
The \fBmmfsck\fR command in offline mode is intended to be used only in 
situations where there have been disk or communications failures that
have caused \fBMMFS_FSSTRUCT\fR error log entries to be issued, or
where it is known that disks have been forcibly removed or otherwise
permanently unavailable for use in the file system, and other unexpected
symptoms are seen by users.
In general it is unnecessary to run \fBmmfsck\fR in offline mode
unless under the direction of the IBM Support Center.
.PP
If neither the \fB-n\fR nor \fB-y\fR flag is specified, the
\fBmmfsck\fR command runs interactively prompting you for permission to
repair each consistency error as reported. It is suggested that in all
but the most severely damaged file systems, you run the \fBmmfsck\fR
command interactively (the default).
.PP
The occurrence of I/O errors, or the appearance of a message telling you to
run the \fBmmfsck\fR command, may indicate file system
inconsistencies. If either situation occurs, use the \fBmmfsck\fR
command to check file system consistency and interactively repair the file
system.
.PP
The \fBmmfsck\fR command checks for these inconsistencies:
.RS +3
.HP 3
\(bu Blocks marked allocated that do not belong to any file. The
corrective action is to mark the block free in the allocation map.
.HP 3
\(bu Files for which an inode is allocated and no directory entry exists
(orphaned files). The corrective action is to create directory entries
for these files in a \fBlost+found\fR subdirectory at the root of this file
system. The index number of the inode is assigned as the name.
If you do not allow the \fBmmfsck\fR command to reattach an orphaned file,
it asks for permission to delete the file.
.HP 3
\(bu Directory entries pointing to an inode that is not allocated. The
corrective action is to remove the directory entry.
.HP 3
\(bu Incorrectly formed directory entries. A directory file contains the
inode number and the generation number of the file to which it refers.
When the generation number in the directory does not match the generation
number stored in the file's inode, the corrective action is to remove the
directory entry.
.HP 3
\(bu Incorrect link counts on files and directories. The corrective
action is to update them with accurate counts.
.HP 3
\(bu Cycles in the directory structure. The corrective action is to
break any detected cycles. If the cycle was a disconnected cycle, the
new top level directory is moved to the \fBlost+found\fR directory.
.RE
.PP
If you are repairing a file system due to node failure and the file system
has quotas enabled, it is suggested that you run the \fBmmcheckquota\fR command to recreate the quota
files.
.PP
Indications leading you to the conclusion that you should run the
\fBmmfsck\fR command include:
.RS +3
.HP 3
1. An \fBMMFS_FSSTRUCT\fR along with an \fBMMFS_SYSTEM_UNMOUNT\fR error
log entry on any node indicating some critical piece of the file system is
inconsistent.
.HP 3
2. Disk media failures
.HP 3
3. Partial disk failure
.HP 3
4. \fBE_VALIDATE=214\fR, Invalid checksum or other consistency check
failure on a disk data structure, reported in error logs or returned to an
application.
.RE
.PP
For further information on recovery actions and how to contact the IBM
Support Center, see the \fIGeneral
Parallel File System For Clusters: Problem Determination
Guide\fR.
.PP
If you are running the online \fBmmfsck\fR command to free allocated
blocks that do not belong to any files, plan to make file system repairs when
system demand is low. This is an I/O intensive activity and it can
affect system performance.
.PP
\fBResults\fR
.PP
If the file system is inconsistent, the \fBmmfsck\fR command displays
information about the inconsistencies and (depending on the option entered)
may prompt you for permission to repair them. The \fBmmfsck\fR
command tries to avoid actions that may result in loss of data. In some
cases, however, it may indicate the destruction of a damaged file.
.PP
If there are no file system inconsistencies to detect, the \fBmmfsck\fR
command reports this information for the file system:
.RS +3
.HP 3
\(bu Number of files
.HP 3
\(bu Used blocks
.HP 3
\(bu Free blocks
.RE
.PP
All corrective actions, with the exception of recovering lost disk blocks
(blocks that are marked as allocated but do not belong to any file), require
that the file system be unmounted on all nodes. If the \fBmmfsck\fR
command is run on a mounted file system, lost blocks are recovered but any
other inconsistencies are only reported, not repaired.
.PP
If a bad disk is detected, the \fBmmfsck\fR command stops the disk and
writes an entry to the error log. The operator must manually start and
resume the disk when the problem is fixed.
.PP
The file system must be unmounted on all nodes before the \fBmmfsck\fR
command can repair file system inconsistencies.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to be checked and repaired. File
system names need not be fully-qualified. \fBfs0\fR is as acceptable
as \fB/dev/fs0\fR. 
.PP
This must be the first parameter.
.RE
.PP
.RS +3
\fB-N { \fB\fIall\fR\fR | \fBmount\fR |
\fINodeName\fR[,\fINodeName\fR ... ] }
\fR
.RE
.RS +9
Specify the nodes to participate in the check and repair of the file
system. Valid values are: 
.PP
.RS +3
\fBall
\fR
.RE
.RS +9
Indicates that all nodes in the GPFS cluster, whether they have the
file system mounted or not, participate. This is the default when the
\fB-N\fR option has not been specified.
.RE
.PP
.RS +3
\fBmount
\fR
.RE
.RS +9
When used in conjunction with the \fB-o\fR option, this indicates that
only the nodes that have the file system mounted participate.
.PP
If the \fB-o\fR option is not specified, only the file system manager
node participates.
.RE
.PP
.RS +3
\fB\fINodeName\fR[,\fINodeName\fR ... ]
\fR
.RE
.RS +9
A comma-separated list of nodes that participate.
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.RE
.SH "Options"
.PP
.RS +3
\fB-c
\fR
.RE
.RS +9
When the file system log has been lost and the file system is replicated,
this option specifies that the \fBmmfsck\fR command attempt corrective
action by comparing the replicas of metadata and data. If this error
condition occurs, it is indicated by an error log entry.
.RE
.PP
.RS +3
\fB-n
\fR
.RE
.RS +9
Specifies a \fBno\fR response to all prompts from the \fBmmfsck\fR
command. The option reports inconsistencies but it does not change the
file system. To save this information, redirect it to an output file
when you enter the \fBmmfsck\fR command.
.RE
.PP
.RS +3
\fB-y
\fR
.RE
.RS +9
Specifies a \fByes\fR response to all prompts from the \fBmmfsck\fR
command. Use this option only on severely damaged file systems.
It allows the \fBmmfsck\fR command to take any action necessary for
repairs.
.RE
.PP
.RS +3
\fB-o
\fR
.RE
.RS +9
Specifies that the file system can be mounted during the operation of the
\fBmmfsck\fR command. Online mode does not perform a full file
system consistency check, but blocks marked as allocated that do not belong to
a file are recovered.
.RE
.PP
.RS +3
\fB-t \fIDirectory\fR
\fR
.RE
.RS +9
Specifies the directory to be used for temporary storage during
\fBmmfsck\fR command processing. The default directory is
\fB/tmp\fR. The minimum space required (in byes) is equal to the
maximum number of inodes in the file system multiplied by 8.
.RE
.PP
.RS +3
\fB-v
\fR
.RE
.RS +9
Specifies the output is verbose.
.RE
.PP
.RS +3
\fB-V
\fR
.RE
.RS +9
Specifies the output is verbose and contain information for debugging
purposes.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB2
\fR
.RE
.RS +9
The command was interrupted before it completed checks or repairs.
.RE
.PP
.RS +3
\fB4
\fR
.RE
.RS +9
The command changed the file system and it must now be restarted.
.RE
.PP
.RS +3
\fB8
\fR
.RE
.RS +9
The file system contains damage that has not been repaired.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmfsck\fR command.
.PP
You may issue the \fBmmfsck\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.RS +3
.HP 3
1. To run the \fBmmfsck\fR command on the \fBfs1\fR file system,
receive a report, but not fix inconsistencies, enter:
.sp
.nf
mmfsck fs1 -n
.fi
.sp
The system displays information similar to:
.sp
.nf
Checking "fs1"
Checking inodes
Checking inode map file
Checking directories and files
Checking log files
Checking extended attributes file
Checking file reference counts
Checking file system replication status
       33792 inodes
          38   allocated
           0   repairable
           0   repaired
           0   damaged
           0   deallocated
           0   orphaned
           0   attached
     3332520 subblocks
       19723   allocated
           0   unreferenced
           0   deletable
           0   deallocated
         720 addresses
           0   suspended
File system is clean.
.fi
.sp
\fBmmfsck\fR found no inconsistencies in this file system.
.HP 3
2. To run the \fBmmfsck\fR command on the \fB/dev/fs2\fR file system,
receive a report, and fix inconsistencies, enter:
.sp
.nf
mmfsck /dev/fs2 -y\ 
.fi
.sp
The system displays information similar to:
.sp
.nf
Checking "/dev/fs2"
Checking inodes
Checking inode map file
Checking directories and files
Checking log files
Checking extended attributes file
Checking file reference counts
File inode 6912 is not referenced by any directory.
Reattach inode to lost+found? yes
Checking file system replication status
       33792 inodes
          46   allocated
           0   repairable
           0   repaired
           0   damaged
           0   deallocated
           1   orphaned
           1   attached
     3332520 subblocks
       19762   allocated
           0   unreferenced
           0   deletable
           0   deallocated
         728 addresses
           0   suspended
File system is clean.
.fi
.sp
.RE
.SH "See also"
.PP
mmcheckquota Command
.PP
mmcrfs Command
.PP
mmdelfs Command
.PP
mmdf Command
.PP
mmlsfs Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
ok ZJ4@$        }'  AAA             }'      ./usr/share/man/man8/mmfsctl.8 .         $          $          .TH mmfsctl 11/01/04
mmfsctl Command
.SH "Name"
.PP
\fBmmfsctl\fR - Issues a file system control request.
.SH "Synopsis"
.PP
\fBmmfsctl\fR \fIDevice\fR \fBsyncFSconfig\fR
\fB{\fR\fB-n\fR \fIRemoteNodesFile\fR | \fB-C\fR
\fIremoteClusterName\fR\fB}\fR [\fB-S\fR \fISpecFile\fR]
.PP
Or,
.PP
\fBmmfsctl\fR \fIDevice\fR \fB{suspend | resume}\fR
.PP
Or,
.PP
\fBmmfsctl\fR \fIDevice\fR \fB{exclude | include}\fR
\fB{\fR\fB-d\fR \fIDiskList\fR | \fB-F\fR \fIDiskFile\fR |
\fB-G\fR \fIFailureGroup\fR\fB}\fR
.SH "Description"
.PP
Use the \fBmmfsctl\fR command to issue control requests to a
particular GPFS file system. The command is used to temporarily suspend
the processing of all application I/O requests, and later resume them, as well
as to synchronize the file system's configuration state between peer
clusters in disaster recovery environments.
.PP
See \fIEstablishing disaster recovery for your GPFS cluster\fR
in \fIGPFS: Administration and Programming Reference\fR.
.PP
Before creating a FlashCopy image of the file system, the user must run
\fBmmfsctl suspend\fR to temporarily quiesce all file system activity and
flush the internal buffers on all nodes that mount this file system.
The on-disk metadata will be brought to a consistent state, which provides for
the integrity of the FlashCopy snapshot. If a request to the file
system is issued by the application after the invocation of this command, GPFS
suspends this request indefinitely, or until the user issues \fBmmfsctl
resume\fR.
.PP
Once the FlashCopy image has been taken, the \fBmmfsctl resume\fR
command can be issued to resume the normal operation and complete any pending
I/O requests.
.PP
The \fBmmfsctl syncFSconfig\fR command extracts the file
system's related information from the local GPFS configuration data,
transfers this data to one of the nodes in the peer cluster, and attempts to
import it there. The primary cluster configuration server of the peer
cluster must be available and accessible using remote shell and remote copy at
the time of the invocation of this command.
.PP
Once the GPFS file system has been defined in the primary cluster, users
run this command to import the configuration of this file system into the peer
recovery cluster. After producing a FlashCopy image of the file system
and propagating it to the peer cluster using Peer-to-Peer Remote Copy (PPRC),
users similarly run this command to propagate any relevant configuration
changes made in the cluster after the previous snapshot.
.PP
The primary cluster configuration server of the peer cluster must be
available and accessible using remote shell and remote copy at the time of the
invocation of the \fBmmfsctl syncFSconfig\fR command. Also, the peer
GPFS clusters should be defined to use the same remote shell and remote copy
mechanism, and they must be set up to allow nodes in peer clusters to
communicate without the use of a password.
.PP
Not all administrative actions performed on the file system
necessitate this type of resynchronization - only those that modify the file
system information maintained in the local GPFS configuration data, which
includes:
.RS +3
.HP 3
\(bu Additions, removals, and replacements of disks (commands
\fBmmadddisk\fR, \fBmmdeldisk\fR, \fBmmrpldisk\fR)
.HP 3
\(bu Modifications to disk attributes (command \fBmmchdisk\fR)
.HP 3
\(bu Changes to the file system's mount point (command \fBmmchfs -T\fR)
.HP 3
\(bu Changing the file system device name (command \fBmmchfs
-W\fR)
.RE
.PP
The \fBmmfsctl exclude\fR command can be used to manually
override the file system descriptor quorum after a site-wide disaster (see
\fIEstablishing disaster recovery for your GPFS cluster\fR in
\fIGPFS: Administration and Programming Reference\fR). 
This command enables users to restore normal access
to the file system with less than a quorum of available file system descriptor
replica disks, by effectively excluding the specified disks from all
subsequent operations on the file system descriptor. After repairing
the disks, the \fBmmfsctl include\fR command can be issued to restore the
initial quorum configuration.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system whose configuration is to be
re-synchronized. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR. If
\fBall\fR is specified with the \fBsyncFSconfig\fR option, this command
is performed on all GPFS file systems defined in the cluster.
.RE
.PP
.RS +3
\fBexclude
\fR
.RE
.RS +9
Instructs GPFS to exclude the specified group of disks from all subsequent
operations on the file system descriptor, and change their availability state
to \fBdown\fR. If necessary, this command assigns additional disks
to serve as the disk descriptor replica holders, and migrate the disk
descriptor to the new replica set. The excluded disks are not deleted
from the file system, and still appear in the output of the \fBmmlsdisk\fR
command.
.RE
.PP
.RS +3
\fBinclude
\fR
.RE
.RS +9
Informs GPFS that the previously excluded disks have become operational
again. This command writes the up-to-date version of the disk
descriptor to each of the specified disks, and clears the \fBexcl\fR
tag.
.RE
.PP
.RS +3
\fBresume
\fR
.RE
.RS +9
Instructs GPFS to resume the normal processing of I/O requests on all
nodes.
.RE
.PP
.RS +3
\fBsuspend
\fR
.RE
.RS +9
Instructs GPFS to flush the internal buffers on all nodes, bring the file
system to a consistent state on disk, and suspend the processing of all
subsequent application I/O requests.
.RE
.PP
.RS +3
\fBsyncFSconfig
\fR
.RE
.RS +9
Synchronizes the configuration state of a GPFS file system between the
local cluster and its peer in two-cluster disaster recovery
configurations.
.RE
.PP
.RS +3
\fB-C \fIremoteClusterName\fR
\fR
.RE
.RS +9
Specifies the name of the GPFS cluster that owns the remote GPFS file
system.
.RE
.PP
.RS +3
\fB-d \fIDiskList\fR
\fR
.RE
.RS +9
Specifies the names of the NSDs to be included or excluded by the
\fBmmfsctl\fR command. Separate the names with semicolons (;)
and enclose the list of disk names in quotation marks.
.RE
.PP
.RS +3
\fB-F \fIDiskFile\fR
\fR
.RE
.RS +9
Specifies a file containing the names of the NSDs, one per line, to be
included or excluded by the \fBmmfsctl\fR command.
.RE
.PP
.RS +3
\fB-G \fIFailureGroup\fR
\fR
.RE
.RS +9
A number identifying the failure group for disks to be included or
excluded by the \fBmmfsctl\fR command.
.RE
.PP
.RS +3
\fB-n \fIRemoteNodesFile\fR
\fR
.RE
.RS +9
Specifies a list of contact nodes in the peer recovery cluster that GPFS
uses when importing the configuration data into that cluster. Although
any node in the peer cluster can be specified here, users are advised to
specify the identities of the peer cluster's primary and secondary
cluster configuration servers, for efficiency reasons.
.RE
.PP
.RS +3
\fB-S \fISpecFile\fR
\fR
.RE
.RS +9
Specifies the description of changes to be made to the file system, in the
peer cluster during the import step. The format of this file is
identical to that of the \fIChangeSpecFile\fR used as input to the
\fBmmimportfs\fR command. This option can be used, for example, to
define the assignment of the NSD servers for use in the peer cluster.
.RE
.SH "Options"
.PP
None.
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Results"
.PP
The \fBmmfsctl\fR command returns 0 if successful.
.SH "Security"
.PP
You must have root authority to run the \fBmmfsctl\fR command.
.PP
You may issue the \fBmmfsctl\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
This sequence of commands creates a FlashCopy image of the file system and
propagates this image to the recovery cluster using the Peer-to-Peer Remote
Copy technology. The following configuration is assumed:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Site~LUNs
Primary cluster (site A)~lunA1, lunA2
Recovery cluster (site B)~lunB1
.TE
.sp
.fi
.RE
.PP
.RS +3
\fBlunA1
\fR
.RE
.RS +9
FlashCopy source
.RE
.PP
.RS +3
\fBlunA2
\fR
.RE
.RS +9
FlashCopy target, PPRC source
.RE
.PP
.RS +3
\fBlunB1
\fR
.RE
.RS +9
PPRC target
.RE
.PP
A single GPFS file system named \fBfs0\fR has been defined in the
primary cluster over lunA1.
.RS +3
.HP 3
1. In the primary cluster, suspend all file system I/O activity and flush the
GPFS buffers
.sp
.nf
mmfsctl fs0 suspend
.fi
.sp
.sp
The output is similar to this:
.sp
.nf
Writing dirty data to disk
Quiescing all file system operations
Writing dirty data to disk again
.fi
.sp
.HP 3
2. Establish a FlashCopy pair using lunA1 as the source and lunA2 as the
target.
.HP 3
3. Resume the file system I/O activity: 
.sp
.nf
mmfsctl fs0 resume
.fi
.sp
.sp
The output is similar to this:
.sp
.nf
Resuming operations.
.fi
.sp
.HP 3
4. Establish a Peer-to-Peer Remote Copy (PPRC) path and a synchronous PPRC
volume pair lunA2-lunB1 (primary-secondary). Use the 'copy entire
volume' option and leave the 'permit read from secondary'
option disabled.
.HP 3
5. Wait for the completion of the FlashCopy background task. Wait for
the PPRC pair to reach the duplex (fully synchronized) state.
.HP 3
6. Terminate the PPRC volume pair lunA2-lunB1.
.HP 3
7. If this is the first time the snapshot is taken, or if the configuration
state of \fBfs0\fR changed since the previous FlashCopy snapshot, propagate
the most recent configuration to site B: 
.sp
.nf
mmfsctl fs0 syncFSconfig -n recovery_clust_nodelist
.fi
.sp
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
sctkއ \J4@$        T  xHBxHBxHB             T      ./usr/share/man/man8/mmgetstate.8 .8 3 n         $          $          .TH mmgetstate 03/25/05
mmgetstate Command
.SH "Name"
.PP
The \fBmmgetstate\fR command displays the state of the GPFS daemon on
one or more nodes.
.SH "Synopsis"
.PP
\fBmmgetstate\fR [\fB-L\fR] [\fB-v\fR]
[\fB-a\fR | \fB-W\fR \fINodeNameFile\fR | [\fB-w\fR
\fINodeName\fR [,\fINodeName\fR...]]
[\fB-n\fR
\fINodeNumber\fR[,\fINodeNumber\fR...]]]
.SH "Description"
.PP
Use the \fBmmgetstate\fR command to show the state of the GPFS daemon on
one or more nodes.
.SH "Parameters"
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
List all nodes in the GPFS cluster. The option does not
display information for nodes that cannot be reached. You may obtain
more information if you specify the \fB-v\fR option.
.RE
.PP
.RS +3
\fB-W \fINodeNameFile\fR
\fR
.RE
.RS +9
List all nodes whose hostnames are listed in the file. The
hostnames must be listed one per line.
.RE
.PP
.RS +3
\fB-w \fINodeName\fR [,\fINodeName\fR...]
\fR
.RE
.RS +9
List all nodes whose hostnames are specified in the list. This list
is combined with the nodes specified on the \fB-n\fR option.
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.PP
.RS +3
\fB-n \fINodeNumber\fR[,\fINodeNumber\fR...]
\fR
.RE
.RS +9
List all nodes whose node numbers are specified in the list. This
list is combined with the nodes specified on the \fB-w\fR option.
.RE
.SH "Options"
.PP
.RS +3
\fB-L
\fR
.RE
.RS +9
Additionally display quorum, number of nodes up, and total number of
nodes.
.RE
.PP
.RS +3
\fB-v
\fR
.RE
.RS +9
Display intermediate error messages.
.RE
.PP
The GPFS states recognized and displayed by this command are:
.PP
.RS +3
\fBactive
\fR
.RE
.RS +9
GPFS is ready for operations.
.RE
.PP
.RS +3
\fBarbitrating
\fR
.RE
.RS +9
A node is trying to form a quorum with the other available
nodes.
.RE
.PP
.RS +3
\fBdown
\fR
.RE
.RS +9
GPFS daemon is not running on the node.
.RE
.PP
.RS +3
\fBunknown
\fR
.RE
.RS +9
Unknown value. Node cannot be reached or some other error
occurred.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmgetstate\fR command.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on the \fBmmcrcluster\fR or the
\fBmmchcluster\fR command, you must ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.RS +3
.HP 3
1. To display the quorum, the number of nodes up, and the total number of
nodes for the GPFS cluster, issue:
.sp
.nf
mmgetstate -a -L
.fi
.sp
.sp
The system displays output similar to:
.sp
.nf
 Node number Node name  Quorum  Nodes up  Total nodes GPFS state\ 
----------------------------------------------------------------
       1     k154gn01      3        5          8      active
       2     k154gn02      3        5          8      active
       3     k154gn09      3        5          8      active
       4     k154gn10      3        5          8      active
       5     k155gn01      3        5          8      active
       6     k154gn02      3        5          8      down
       7     k154gn09      3        5          8      down
       8     k154gn10      3        5          8      down
.fi
.sp
.sp
The 3 under the Quorum column means that you must have three quorum nodes
up to achieve quorum.
.HP 3
2. This is an example of a cluster using node quorum with tiebreaker
disks. Note the * in the Quorum field, which indicates that tiebreaker
disks are being used:
.sp
.nf
mmgetstate -a -L
.fi
.sp
.sp
.nf
 Node number Node name  Quorum  Nodes up  Total nodes GPFS state\ 
----------------------------------------------------------------
       1     k154gn05     1*        6         6       active
       2     k154gn06     1*        6         6       active
       3     k155gn05     1*        6         6       active
       4     k155gn06     1*        6         6       active
       5     k155gn07     1*        6         6       active
       6     k155gn08     1*        6         6       active
.fi
.sp
.RE
.SH "See also"
.PP
mmchconfig Command
.PP
mmcrcluster Command
.PP
mmshutdown Command
.PP
mmstartup Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
3
\fkN ^J4@$        
  AAA             
      ./usr/share/man/man8/mmimportfs.8 .8 3 n         $          $          .TH mmimportfs 11/01/04
mmimportfs Command
.SH "Name"
.PP
\fBmmimportfs\fR - Imports into the cluster one or more file systems
that were created in another GPFS cluster.
.SH "Synopsis"
.PP
\fBmmimportfs\fR {\fIdevice\fR | \fBall\fR} \fB-i\fR
\fIExportFilesysData\fR [\fB-S\fR \fIChangeSpecFile\fR]
.SH "Description"
.PP
The \fBmmimportfs\fR command, in conjunction with the \fBmmexportfs\fR command, can be used to move into the
current GPFS cluster one or more file systems that were created in another
GPFS cluster. The \fBmmimportfs\fR command extracts all relevant
file system and disk information from the \fIExportFilesysData\fR file
specified with the \fB-i\fR parameter. This file must have been
created by the \fBmmexportfs\fR command.
.PP
When \fBall\fR is specified in place of a file system name, any disks
that are not associated with a file system will be imported as well.
.PP
If the file systems being imported were created on nodes that do not belong
to the current GPFS cluster, the \fBmmimportfs\fR command assumes that all
disks have been properly moved and made accessible from nodes in the current
cluster.
.PP
If any node in the cluster, including the node on which you are running the
\fBmmimportfs\fR command, does not have access to one or more disks, use
the \fB-S\fR option to assign NSD servers to those disks.
.PP
The \fBmmimportfs\fR command attempts to preserve any NSD server assignments
that were in effect when the file system was exported.
.PP
If the file system was exported from a cluster created with a version of GPFS
prior to 2.3, it is possible that the disks of the file system are not NSDs.
Such disks will be automatically converted into NSDs by the \fBmmimportfs\fR
command.
.PP
After the \fBmmimportfs\fR command completes, use \fBmmlsnsd\fR to display the NSD server names that are
assigned to each of the disks in the imported file system. Use \fBmmchnsd\fR to change the current NSD server
assignments as needed.
.PP
After the \fBmmimportfs\fR command completes, use \fBmmlsdisk\fR to display the failure groups to which
each disk belongs. Use \fBmmchdisk\fR to make
adjustments if necessary.
.PP
If you are importing file systems into a cluster that already contains GPFS
file systems it is possible to encounter name conflicts. You must
resolve such conflicts before the \fBmmimportfs\fR command can
succeed. You can use the \fBmmchfs\fR command
to change the device name and mount point of an existing file system.
If there are disk name conflicts, use the \fBmmcrnsd\fR command to define new disks and specify
unique names (rather than let the command generate names). Then replace
the conflicting disks using \fBmmrpldisk\fR and
remove them from the cluster using \fBmmdelnsd\fR.
.SH "Results"
.PP
Upon successful completion of the \fBmmimportfs\fR command, all
configuration information pertaining to the file systems being imported is
added to configuration data of the current GPFS cluster.
.SH "Parameters"
.PP
.RS +3
\fB\fIdevice\fR | all
\fR
.RE
.RS +9
The device name of the file system to be imported. File system
names need not be fully-qualified. \fBfs0\fR is as acceptable as
\fB/dev/fs0\fR. Specify \fBall\fR to import all GPFS file
systems, as well as all disks that do not belong to a file system yet.
This must be the first parameter.
.RE
.PP
.RS +3
\fB-i \fIExportFilesysData\fR
\fR
.RE
.RS +9
The path name of the file containing the file system information.
This file must have previously been created with the \fBmmexportfs\fR command.
.RE
.PP
.RS +3
\fB-S \fIChangeSpecFile\fR
\fR
.RE
.RS +9
The path name of an optional file containing disk descriptors, one per
line, in the format: 
.sp
.nf
DiskName:PrimaryServer:BackupServer:
.fi
.sp
.PP
.RS +3
\fBDiskName
\fR
.RE
.RS +9
The name of a disk from the file system being imported.
.RE
.PP
.RS +3
\fBPrimaryServer
\fR
.RE
.RS +9
The name of the primary NSD server node you want to assign to the
disk.
.RE
.PP
.RS +3
\fBBackupServer
\fR
.RE
.RS +9
The name of the backup NSD server node you want to assign to the
disk.
.RE
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. You cannot change the name of a disk. You cannot change the
disk usage or failure group assignment with the \fBmmimportfs\fR
command. Use the \fBmmchdisk\fR command for
this purpose.
.sp
.HP 3
2. All disks that do not have descriptors in \fIChangeSpecFile\fR
are assigned the NSD servers that they had at the time the file system was
exported. All disks with NSD servers that are not valid are assumed to
be SAN-attached to all nodes in the cluster. Use the \fBmmchnsd\fR command to assign new or change existing
NSD server nodes.
.RE
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmimportfs\fR command.
.PP
You may issue the \fBmmimportfs\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or \fBmmchcluster\fR command, you must ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To import all file systems in the current cluster, enter:
.sp
.nf
mmimportfs all -i /u/admin/exportfile
.fi
.sp
.PP
The output is similar to this:
.sp
.nf
mmimportfs: Processing file system fs1 ...
mmimportfs: Processing disk gpfs2nsd
mmimportfs: Processing disk gpfs3nsd
mmimportfs: Processing disk gpfs4nsd
mmimportfs: Processing file system fs2 ...
mmimportfs: Processing disk gpfs1nsd1
mmimportfs: Processing disk gpfs5nsd
mmimportfs: Processing disks that do not belong to any file system ...
mmimportfs: Processing disk gpfs6nsd
mmimportfs: Processing disk gpfs1001nsd
mmimportfs: Committing the changes ...
mmimportfs: The following file systems were successfully imported:
        fs1
        fs2
mmimportfs: 6027-1371 Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.SH "See also"
.PP
mmexportfs Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
fore tk `J4@$          AAA                   ./usr/share/man/man8/mmlscluster.8 8 3 n         $          $          .TH mmlscluster 11/01/04
mmlscluster Command
.SH "Name"
.PP
\fBmmlscluster\fR - Displays the current configuration information
for a GPFS cluster.
.SH "Synopsis"
.PP
\fBmmlscluster\fR
.SH "Description"
.PP
Use the \fBmmlscluster\fR command to display the current configuration
information for a GPFS cluster.
.PP
For the GPFS cluster, the \fBmmlscluster\fR command
displays:
.RS +3
.HP 3
\(bu The cluster name
.HP 3
\(bu The cluster id
.HP 3
\(bu GPFS UID domain
.HP 3
\(bu The remote shell command being used
.HP 3
\(bu The remote file copy command being used
.HP 3
\(bu The primary GPFS cluster configuration server
.HP 3
\(bu The secondary GPFS cluster configuration server
.HP 3
\(bu A list of nodes belonging the GPFS cluster
.RE
.PP
For each node, the command displays:
.RS +3
.HP 3
\(bu The node number assigned to the node by GPFS
.HP 3
\(bu Short hostname
.HP 3
\(bu Primary network IP address
.HP 3
\(bu Long hostname
.HP 3
\(bu Remarks, such as whether the node is a quorum node or not
.RE
.SH "Parameters"
.PP
NONE
.SH "Options"
.PP
NONE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmlscluster\fR
command.
.PP
You may issue the \fBmmlscluster\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To display the current configuration information for the GPFS cluster,
enter:
.sp
.nf
mmlscluster\ 
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
GPFS cluster information
========================
  GPFS cluster name:         cluster1.kgn.ibm.com
  GPFS cluster id:           680681562214606028
  GPFS UID domain:           cluster1.kgn.ibm.com
  Remote shell command:      /usr/bin/rsh
  Remote file copy command:  /usr/bin/rcp
GPFS cluster configuration servers:
-------------------------------------
  Primary server:    k164n06.kgn.ibm.com
  Secondary server:  k164n05.kgn.ibm.com
Node number  Node name IP address     Full node name       Remarks
--------------------------------------------------------------------
       1     k164n04   198.117.68.68  k164n04.kgn.ibm.com  quorum node
       2     k164n05   198.117.68.69  k164n05.kgn.ibm.com  quorum node
       3     k164n06   198.117.68.70  k164n06.kgn.ibm.com\ 
.fi
.sp
.SH "See also"
.PP
mmaddnode Command
.PP
mmchcluster Command
.PP
mmcrcluster Command
.PP
mmdelnode Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
um -k bJ4@$          AAA                   ./usr/share/man/man8/mmlsconfig.8  8 3 n         $          $          .TH mmlsconfig 11/01/04
mmlsconfig Command
.SH "Name"
.PP
\fBmmlsconfig\fR - Displays the current configuration data
for a GPFS cluster.
.SH "Synopsis"
.PP
\fBmmlsconfig\fR
.SH "Description"
.PP
Use the \fBmmlsconfig\fR command to display the current configuration
data for a GPFS cluster.
.PP
Depending on your configuration, additional information that is set by GPFS
may be displayed to assist in problem determination when contacting the  IBM Support Center. If a configuration
parameter is not shown in the output of this command, the default value for
that parameter, as documented in the \fBmmchconfig\fR command, is in
effect.
.SH "Parameters"
.PP
NONE
.SH "Options"
.PP
NONE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmlsconfig\fR command.
.PP
You may issue the \fBmmlsconfig\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To display the current configuration data for the GPFS cluster that
you are running on, enter:
.sp
.nf
mmlsconfig
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
Configuration data for cluster cluster1.kgn.ibm.com:
------------------------------------------------------------
clusterName cluster1.kgn.ibm.com
clusterId 680681562214606028
clusterType lc
multinode yes
autoload no
useDiskLease yes
cipherList AUTHONLY
maxFeatureLevelAllowed 800
File systems in cluster luster1.kgn.ibm.com:
-----------------------------------------------------
/dev/fs1
/dev/fs2
.fi
.sp
.SH "See also"
.PP
mmchconfig Command
.PP
mmchcluster Command
.PP
mmcrcluster Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
.kgn.k= dJ4@$          AAA                   ./usr/share/man/man8/mmlsdisk.8          $          $          .TH mmlsdisk 11/01/04
mmlsdisk Command
.SH "Name"
.PP
\fBmmlsdisk\fR - Displays the current configuration and state of
the disks in a file system.
.SH "Synopsis"
.PP
\fBmmlsdisk\fR \fIDevice\fR [\fB-d\fR
\fB"\fR\fIDiskName\fR;[\fIDiskName\fR...\fB"\fR]]
[\fB-e\fR] [\fBL\fR]
.SH "Description"
.PP
Use the \fBmmlsdisk\fR command to display the current state of the disks
in the file system.
.PP
The \fBmmlsdisk\fR command may be run against a mounted or unmounted
file system.
.PP
For each disk in the list, the \fBmmlsdisk\fR command displays:
.RS +3
.HP 3
\(bu disk name
.HP 3
\(bu driver type
.HP 3
\(bu sector size
.HP 3
\(bu failure group
.HP 3
\(bu whether it holds metadata
.HP 3
\(bu whether it holds data
.HP 3
\(bu current status:
.sp
.RS +3
\fBready
\fR
.RE
.RS +9
Normal status
.RE
.sp
.RS +3
\fBsuspended
\fR
.RE
.RS +9
Indicates that data is to be migrated off this disk
.RE
.sp
.RS +3
\fBbeing emptied
\fR
.RE
.RS +9
Transitional status in effect while a disk deletion is pending
.RE
.sp
.RS +3
\fBreplacing
\fR
.RE
.RS +9
Transitional status in effect for old disk while replacement is pending
.RE
.sp
.RS +3
\fBreplacement
\fR
.RE
.RS +9
Transitional status in effect for new disk while replacement is pending
.RE
.HP 3
\(bu availability:
.sp
.RS +3
\fBup
\fR
.RE
.RS +9
Disk is available to GPFS for normal \fBread\fR and \fBwrite\fR
operations
.RE
.sp
.RS +3
\fBdown
\fR
.RE
.RS +9
No \fBread\fR and \fBwrite\fR operations can be performed on
this disk
.RE
.sp
.RS +3
\fBrecovering
\fR
.RE
.RS +9
An intermediate state for disks coming up, during which GPFS verifies and
corrects data. \fBread\fR operations can be performed while a disk
is in this state but \fBwrite\fR operations cannot.
.RE
.sp
.RS +3
\fBunrecovered
\fR
.RE
.RS +9
The disks was not successfully brought up.
.RE
.HP 3
\(bu disk id
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to which the disks belong. File
system names need not be fully-qualified. \fBfs0\fR is as acceptable
as \fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.PP
.RS +3
\fB-d "\fIDiskName\fR[;\fIDiskName\fR..."]
\fR
.RE
.RS +9
The name of the disks for which you want to display current configuration
and state information. When you enter multiple \fIDiskName\fRs, you
must separate them with semicolons and enclose the list in quotation marks.
.sp
.nf
gpfs3nsd;gpfs4nsd;gpfs5nsd
.fi
.sp
.RE
.SH "Options"
.PP
.RS +3
\fB-e
\fR
.RE
.RS +9
Display all of the disks in the file system that do not have an
availability of \fBup\fR and a status of \fBready\fR. If all
disks in the file system are \fBup\fR and \fBready\fR, the message
displayed is:
.sp
.nf
6027-623 All disks up and ready\ 
.fi
.sp
.RE
.PP
.RS +3
\fB-L
\fR
.RE
.RS +9
Displays an extended list of the disk parameters, including the disk id
field and the \fBremarks\fR field. The \fBremarks\fR column shows
the current file system descriptor quorum assignments, and displays the
excluded disks. The \fBremarks\fR field contains \fBdesc\fR for
all disks assigned as the file system descriptor holders and \fBexcl\fR for
all excluded disks.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
If you are a root user:
.RS +3
.HP 3
1. You may issue the \fBmmlsdisk\fR command from any node in the GPFS
cluster.
.HP 3
2. When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
a. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
b. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.RE
.PP
If you are a non-root user, you may specify only file systems that
belong to the same cluster as the node on which the \fBmmlsdisk\fR command
was issued.
.SH "Examples"
.PP
.RS +3
.HP 3
1. To display the current state of \fBgpfs2nsd\fR, enter:
.sp
.nf
mmlsdisk /dev/fs0 -d gpfs2nsd
.fi
.sp
.sp
The system displays information similar to:
.sp
.nf
disk         driver   sector failure holds    holds
name         type       size   group metadata data  status        availability
------------ -------- ------ ------- -------- ----- ------------- ------------
gpfs2nsd     nsd         512    4002 yes      yes   ready         up
.fi
.sp
.HP 3
2. To display the current states of \fBgpfs2nsd\fR,
\fBgpfs3nsd\fR, and \fBgpfs4nsd\fR, and display their respective disk
ids and the descriptor quorum assignment, enter:
.sp
.nf
mmlsdisk /dev/fs0 -d "gpfs2nsd;gpfs3nsd;gpfs4nsd" -L
.fi
.sp
.sp
The system displays information similar to:
.sp
.nf
disk      driver sector failure holds    holds
name      type     size   group metadata data  status availability disk id remarks\ 
--------  -----  ------ ------- -------- ----- ------ ------------ ------- ---------
gpfs2nsd  nsd     512       1   yes      yes   ready         up      1     desc
gpfs3nsd  nsd     512       1   yes      yes   ready         up      2
gpfs4nsd  nsd     512       2   yes      yes   ready         up      3     desc
Number of quorum disks: 3
Read quorum value:      2
Write quorum value:     2
.fi
.sp
.RE
.SH "See also"
.PP
mmadddisk Command
.PP
mmchdisk Command
.PP
mmdeldisk Command
.PP
mmrpldisk Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP

k fJ4@$        \  !A!A!A             \      ./usr/share/man/man8/mmlsfs.8 8          $          $          .TH mmlsfs 11/01/04
mmlsfs Command
.SH "Name"
.PP
\fBmmlsfs\fR - Displays file system attributes.
.SH "Synopsis"
.PP
\fBmmlsfs\fR \fIDevice\fR [\fB-A\fR]
[\fB-a\fR] [\fB-B\fR] [\fB-D\fR]
[\fB-d\fR] [\fB-E\fR] [\fB-F\fR]
[\fB-f\fR] [\fB-I\fR] [\fB-i\fR]
[\fB-j\fR] [\fB-k\fR] [\fB-M\fR]
[\fB-m\fR] [\fB-n\fR] [\fB-o\fR]
[\fB-Q\fR] [\fB-R\fR] [\fB-r\fR]
[\fB-S\fR] [\fB-s\fR] [\fB-u\fR]
[\fB-V\fR]  [\fB-z\fR]
.SH "Description"
.PP
Use the \fBmmlsfs\fR command to list the attributes of a file
system.
.PP
Depending on your configuration, additional information that is set by GPFS
may be displayed to assist in problem determination when contacting the  IBM Support Center.
.PP
\fBResults\fR
.PP
If you do not specify any options, all attributes of the file system are
displayed. When you specify options, only those attributes specified
are listed, in the order issued in the command. Some parameters are
preset for optimum performance and, although they display in the
\fBmmlsfs\fR command output, you cannot change them.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system for which the attributes are
listed. File system names need not be fully-qualified.
\fBfs0\fR is as acceptable as \fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.SH "Options"
.PP
.RS +3
\fB-A
\fR
.RE
.RS +9
Automatically mount the file system when the GPFS daemon starts
.RE
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
Estimated average file size, in bytes
.RE
.PP
.RS +3
\fB-B
\fR
.RE
.RS +9
Size of data block, in bytes
.RE
.PP
.RS +3
\fB-D
\fR
.RE
.RS +9
The type of file locking semantics that are in effect (\fBnfs4\fR or
\fBposix\fR)
.RE
.PP
.RS +3
\fB-d
\fR
.RE
.RS +9
Names of all of the disks in the file system
.RE
.PP
.RS +3
\fB-E
\fR
.RE
.RS +9
Exact \fBmtime\fR values reported
.RE
.PP
.RS +3
\fB-F
\fR
.RE
.RS +9
Maximum number of files currently supported
.RE
.PP
.RS +3
\fB-f
\fR
.RE
.RS +9
Minimum fragment size, in bytes
.RE
.PP
.RS +3
\fB-I
\fR
.RE
.RS +9
Indirect block size, in bytes
.RE
.PP
.RS +3
\fB-i
\fR
.RE
.RS +9
Inode size, in bytes
.RE
.PP
.RS +3
\fB-j
\fR
.RE
.RS +9
Block allocation type
.RE
.PP
.RS +3
\fB-k
\fR
.RE
.RS +9
Type of authorization supported by the file system
.RE
.PP
.RS +3
\fB-M
\fR
.RE
.RS +9
Maximum number of metadata replicas
.RE
.PP
.RS +3
\fB-m
\fR
.RE
.RS +9
Default number of metadata replicas
.RE
.PP
.RS +3
\fB-n
\fR
.RE
.RS +9
Estimated number of nodes for mounting the file system
.RE
.PP
.RS +3
\fB-o
\fR
.RE
.RS +9
Additional mount options
.RE
.PP
.RS +3
\fB-Q
\fR
.RE
.RS +9
Which quotas are currently enforced on the file system
.RE
.PP
.RS +3
\fB-R
\fR
.RE
.RS +9
Maximum number of data replicas
.RE
.PP
.RS +3
\fB-r
\fR
.RE
.RS +9
Default number of data replicas
.RE
.PP
.RS +3
\fB-s
\fR
.RE
.RS +9
Stripe method
.RE
.PP
.RS +3
\fB-S
\fR
.RE
.RS +9
Whether the updating of \fBatime\fR is suppressed for the \fBgpfs_stat()\fR, \fBgpfs_fstat()\fR, \fBstat()\fR, and \fBfstat()\fR
calls
.RE
.PP
.RS +3
\fB-u
\fR
.RE
.RS +9
Whether support for large LUNs is enabled
.RE
.PP
.RS +3
\fB-V
\fR
.RE
.RS +9
Current format version of the file system
.RE
.PP
.RS +3
\fB-z
\fR
.RE
.RS +9
DMAPI is enabled for this file system
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
If you are a root user:
.RS +3
.HP 3
1. You may issue the \fBmmlsfs\fR command from any node in the GPFS
cluster.
.HP 3
2. When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
a. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
b. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.RE
.PP
If you are a non-root user, you may specify only file systems that
belong to the same cluster as the node on which the \fBmmlsfs\fR command
was issued.
.SH "Examples"
.PP
If you issue the \fBmmlsfs\fR command with no options for the file
system \fBgpfssa\fR:
.sp
.nf
mmlsfs gpfssa
.fi
.sp
.PP
The system displays information similar to this. Output appears in
the order the option were specified in the command.
.sp
.nf
flag value          description
---- -------------- -------------------------------------------------
 -s  roundRobin     Stripe method
 -f  8192           Minimum fragment size in bytes
 -i  512            Inode size in bytes
 -I  16384          Indirect block size in bytes
 -m  1              Default number of metadata replicas
 -M  1              Maximum number of metadata replicas
 -r  1              Default number of data replicas
 -R  1              Maximum number of data replicas
 -j  cluster        Block allocation type
 -D  posix          File locking semantics in effect
 -k  posix          ACL semantics in effect
 -a  1048576        Estimated average file size
 -n  32             Estimated number of nodes that will mount file
                    system
 -B  262144         Block size
 -Q  none           Quotas enforced
     none           Default quotas enabled
 -F  33792          Maximum number of inodes
 -V  9.00           File system version. Highest supported version:
                    9.00
 -u  yes            Support for large LUNs?
 -z  no             Is DMAPI enabled?
 -d  gpfs2nsd;gpfsnsd;gpfs4nsd;gpfs5nsd
                    Disks in file system
 -A  no             Automatic mount option
 -E  yes            Exact mtime default mount option
 -S  no             Suppress atime default mount option
 -o  none           Additional mount options
.fi
.sp
.SH "See also"
.PP
mmcrfs Command
.PP
mmchfs Command
.PP
mmdelfs Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
ed
.k갞 hJ4@$          %A%A%A                   ./usr/share/man/man8/mmlsmgr.8           $          $          .TH mmlsmgr 11/01/04
mmlsmgr Command
.SH "Name"
.PP
\fBmmlsmgr\fR - Displays which node is the file system manager for
the specified file systems.
.SH "Synopsis"
.PP
\fBmmlsmgr\fR [\fIDevice\fR [\fIDevice\fR
\&.\&.\&.]]
.SH "Description"
.PP
Use the \fBmmlsmgr\fR command to display which node is the file system
manager for the file system.
.PP
If you do not provide a \fIDevice\fR operand, file system
managers for all file systems within the current cluster for which a file
system manager has been appointed are displayed.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device names of the file systems for which the file system manager
information is displayed. 
.PP
If more than one file system is listed, the names must be delimited by a
space. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR.
.PP
If no file system is specified, information about all file systems is
displayed.
.RE
.SH "Options"
.PP
NONE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
If you are a root user:
.RS +3
.HP 3
1. You may issue the \fBmmlsmgr\fR command from any node in the GPFS
cluster.
.HP 3
2. When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
a. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
b. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.RE
.PP
If you are a non-root user, you may specify only file systems that
belong to the same cluster as the node on which the \fBmmlsmgr\fR command
was issued.
.SH "Examples"
.RS +3
.HP 3
1. To display the file system manager node information for all the file
systems, enter:
.sp
.nf
mmlsmgr\ 
.fi
.sp
The system displays information similar to:
.sp
.nf
file system      manager node [from 199.114.68.69 (k156gn02)]
---------------- ------------------
fs1              199.114.68.70 (k154gn01)\ 
fs2              199.114.68.71 (k154gn02)\ 
fs3              199.114.68.72 (kolt2g_r1b42)
.fi
.sp
.sp
The output shows the device name of the file system and the file system
manager's node number and name, in parenthesis, as they are recorded in
the GPFS cluster data.
.HP 3
2. To display the file system manager information for file systems
\fBgpfs2\fR and \fBgpfs3\fR, enter:
.sp
.nf
mmlsmgr gpfs2 gpfs3
.fi
.sp
The system displays information similar to:
.sp
.nf
file system      manager node [from 199.114.68.69 (k156gn02)]
---------------- ------------------
gpfs2              199.114.68.70 (k154gn02)\ 
gpfs3              199.114.68.72 (kolt2g_r1b42)
.fi
.RE
.SH "See also"
.PP
mmchmgr Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
/bin/sk jJ4@$          mBmBmB                   ./usr/share/man/man8/mmlsnsd.8           $          $          .TH mmlsnsd 04/21/05
mmlsnsd Command
.SH "Name"
.PP
\fBmmlsnsd\fR - Displays the current Network Shared Disk (NSD) information in the GPFS
cluster.
.SH "Synopsis"
.PP
\fBmmlsnsd\fR [\fB\fI-a\fR\fR | \fB-F\fR | \fB-f\fR
\fIDevice\fR | \fB-d\fR
"\fIDiskName\fR[;\fIDiskName\fR...]" ]
[\fB-L\fR | \fB-m\fR | \fB-M\fR] [\fB-v\fR]
.SH "Description"
.PP
Use the \fBmmlsnsd\fR command to display the current information for the
NSDs belonging to the GPFS cluster. The default is to display
information for all NSDs defined to the cluster (\fB-a\fR).
Otherwise, you may choose to display the information for a particular file
system (\fB-f\fR) or for all disks that do not belong to any file system
(\fB-F\fR).
.SH "Parameters"
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
Display information for all of the NSDs belonging to the GPFS
cluster. This is the default.
.RE
.PP
.RS +3
\fB-f \fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system for which you want NSD information
displayed. File system names need not be fully-qualified.
\fBfs0\fR is as acceptable as \fB/dev/fs0\fR.
.RE
.PP
.RS +3
\fB-F
\fR
.RE
.RS +9
Display the NSDs that do \fInot\fR belong to any file system in the
GPFS cluster.
.RE
.PP
.RS +3
\fB-d \fIDiskName\fR[;\fIDiskName\fR...]
\fR
.RE
.RS +9
.PP
The name of the NSDs for which you want information displayed. When
you enter multiple \fIDiskName\fRs, you must separate them with semicolons
and enclose the entire string of disk names in quotation marks:
.sp
.nf
"gpfs3nsd;gpfs4nsd;gpfs5nsd"
.fi
.sp
.RE
.SH "Options"
.PP
.RS +3
\fB-L
\fR
.RE
.RS +9
Display the information in extended format.
.RE
.PP
.RS +3
\fB-m
\fR
.RE
.RS +9
Map the NSD name to its disk device name in \fB/dev\fR on the
local node and, if applicable, on the primary and backup NSD server
nodes.
.RE
.PP
.RS +3
\fB-M
\fR
.RE
.RS +9
Map the NSD names to its disk device name in \fB/dev\fR on all
nodes.
.PP
This is a slow operation and its usage is suggested for problem
determination only.
.RE
.PP
.RS +3
\fB-v
\fR
.RE
.RS +9
Specifies the output should contain error information, where
available.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to issue the \fBmmlsnsd\fR command.
.PP
You may issue the \fBmmlsnsd\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.RS +3
.HP 3
1. To display the default information for all of the NSDs belonging to the
cluster, enter:
.sp
.nf
mmlsnsd
.fi
.sp
The system displays information similar to:
.sp
.nf
File system Disk name Primary node       Backup node
----------------------------------------------------
gpfsa       gpfs16nsd k145n06            k145n07
gpfsa       gpfs17nsd k145n06            k145n07
gpfsa       gpfs18nsd k145n06            k145n07
gpfsa       gpfs19nsd k145n06            k145n07
gpfsa       gpfs20nsd k145n06            k145n07
gpfs1       gpfs1nsd (directly attached)
gpfs1       gpfs2nsd (directly attached)
gpfs1       gpfs3nsd (directly attached)
gpfs1       gpfs4nsd (directly attached)
\ 
.fi
.sp
.HP 3
2. To display all of the NSDs attached to the node from which the command is
issued, enter:
.sp
.nf
mmlsnsd -m
.fi
.sp
The system displays information similar to:
.sp
.nf
Disk name NSD volume ID     Device    Node name Remarks
------------------------------------------------------------
gpfs16nsd 097284213AA842EC  /dev/sdb  k145n06   primary node\ 
gpfs16nsd 097284213AA842EC  /dev/sdb  k145n07   backup node\ 
gpfs17nsd 097284213AA842EF  /dev/sdc  k145n06   primary node
gpfs17nsd 097284213AA842EF  /dev/sdc  k145n07   backup node\ 
gpfs18nsd 097284213AA842F2  /dev/sdd  k145n06   primary node
gpfs18nsd 097284213AA842F2  /dev/sdd  k145n07   backup node\ 
gpfs19nsd 097284213AA842F5  /dev/sde  k145n06   primary node
gpfs19nsd 097284213AA842F5  /dev/sde  k145n07   backup node\ 
gpfs20nsd 097284213AA842F7  /dev/sdf  k145n06   primary node
gpfs20nsd 097284213AA842F7  /dev/sdf  k145n07   backup node\ 
gpfs1nsd  0972841D3AA8420A  -         k145n06   (not found)
                                            directly attached
gpfs2nsd  0972841D3AA8420B  -         k145n06 (not found)
                                            directly attached
gpfs3nsd  0972841D3AA8420C  -         k145n06 (not found)
                                            directly attached
gpfs4nsd  0972841D3AA8420D  -         k145n06 (not found)
                                            directly attached
.fi
.sp
.HP 3
3. To display all of the NSDs in the GPFS cluster in extended format,
enter:
.sp
.nf
mmlsnsd -L
.fi
.sp
The system displays information similar to:
.sp
.nf
File system Disk name NSD volume ID   Primary node  Backup node
---------------------------------------------------------------
gpfsa      gpfs16nsd  097284213AA842EC  k145n06        k145n07
gpfsa      gpfs17nsd  097284213AA842EF  k145n06        k145n07
gpfsa      gpfs18nsd  097284213AA842F2  k145n06        k145n07
gpfsa      gpfs19nsd  097284213AA842F5  k145n06        k145n07
gpfsa      gpfs20nsd  097284213AA842F7  k145n06        k145n07
gpfs1      gpfs1nsd   0972841D3AA8420A  (directly attached)
gpfs1      gpfs2nsd   0972841D3AA8420B  (directly attached)
gpfs1      gpfs3nsd   0972841D3AA8420C  (directly attached)
gpfs1      gpfs4nsd   0972841D3AA8420D  (directly attached)
.fi
.sp
.RE
.SH "See also"
.PP
mmcrnsd Command
.PP
mmdelnsd Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
ed fork lJ4@$        &  )A)A)A             &      ./usr/share/man/man8/mmpmon.8            $          $          .TH mmpmon 11/01/04
mmpmon Command
.SH "Name"
.PP
\fBmmpmon\fR - Manages performance monitoring and displays
performance information.
.SH "Synopsis"
.PP
\fBmmpmon\fR [\fB-i\fR \fIcommandFile\fR]
[\fB-d\fR \fIintegerDelayValue\fR] [\fB-p\fR]
[\fB-r\fR \fIintegerRepeatValue\fR] [\fB-s\fR]
[\fB-t\fR \fIintegerTimeoutValue\fR]
.SH "Description"
.PP
Before attempting to use \fBmmpmon\fR, IBM suggests that you review this
command entry, then read the entire chapter, \fIMonitoring GPFS I/O performance with the mmpmon command\fR
in \fIGPFS: Administration and Programming Reference\fR.
.PP
Use the \fBmmpmon\fR command to manage GPFS performance monitoring
functions and display performance monitoring data. The \fBmmpmon\fR
command reads requests from an input file or standard input (stdin), and
writes responses to standard output (stdout). Error messages go to
standard error (stderr). Prompts, if not suppressed, go to
stderr.
.PP
When running \fBmmpmon\fR in such a way that it continually reads
input from a pipe (the driving script or application never intends to send an
end-of-file to \fBmmpmon\fR), set the \fB-r\fR option value to 1 (or use
the default value of 1) to prevent \fBmmpmon\fR from caching the input
records. This avoids unnecessary memory consumption.
.SH "Results"
.PP
The performance monitoring request is sent to the GPFS daemon running on
the same node that is running the \fBmmpmon\fR command.
.PP
All results from the request are written to stdout.
.PP
There are two output formats:
.RS +3
.HP 3
\(bu Human readable, intended for direct viewing.
.sp
In this format, the results are keywords that describe the value presented,
followed by the value. For example: 
.sp
.nf
disks: 2
.fi
.sp
.HP 3
\(bu Machine readable, an easily parsed format intended for further analysis by
scripts or applications.
.sp
In this format, the results are strings with values presented as
keyword/value pairs. The keywords are delimited by underscores (_) and
blanks to make them easier to locate.
.RE
.SH "Parameters"
.PP
.RS +3
\fB-i \fIcommandFile\fR
\fR
.RE
.RS +9
The input file contains \fBmmpmon\fR command requests, one per
line. Use of the \fB-i\fR flag implies use of the \fB-s\fR
flag. For interactive use, just omit the \fB-i\fR flag. In
this case, the input is then read from stdin, allowing \fBmmpmon\fR to take
keyboard input or output piped from a user script or application
program. 
.PP
Leading blanks in the input file are ignored. A line beginning with
a pound sign (#) is treated as a comment. Leading blanks in a line
whose first non-blank character is a pound sign (#) are ignored.
.PP
This table describes the \fBmmpmon\fR requests. 
.br
.PP
\fBTable 19. Input requests to the mmpmon command\fR
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Request~Description
fs_io_s~Display I/O statistics per mounted file system
io_s~Display I/O statistics for the entire node
reset~Reset statistics to zero
rhist nr~Change the request histogram facility request 
~size and latency ranges
rhist off~Disable the request histogram facility.
~This is the default.
rhist on~Enable the request histogram facility
rhist p~Display the request histogram facility pattern
rhist reset~Reset the request histogram facility data
~to zero
rhist s~Display the request histogram facility 
~statistics values
ver~Display mmpmon version
.TE
.sp
.fi
.RE
.RE
.SH "Options"
.PP
.RS +3
\fB-d \fIintegerDelayValue\fR
\fR
.RE
.RS +9
Specifies a number of milliseconds to sleep after one invocation of all
the requests in the input file. The default value is 1000. This
value must be an integer greater than or equal to 500 and less than or equal
to 8000000.
.PP
The input file is processed as follows: The first request is
processed, it is sent to the GPFS daemon, the responses for this request are
received and processed, the results for this request are displayed, and then
the next request is processed and so forth. When the all requests from
the input file have been processed once, the \fBmmpmon\fR command sleeps
for the specified number of milliseconds. When this time elapses,
\fBmmpmon\fR wakes up and processes the input file again, depending on the
value of the \fB-r\fR flag.
.RE
.PP
.RS +3
\fB-p
\fR
.RE
.RS +9
Indicates to generate output that can be parsed by a script or
program. If this option is not specified, human readable output is
produced.
.RE
.PP
.RS +3
\fB-r \fIintegerRepeatValue\fR
\fR
.RE
.RS +9
Specifies the number of times to run all the requests in the input
file. The default value is one. Specify an integer between zero
and 8000000. Zero means to run forever, in which case processing
continues until it is interrupted. This feature is used, for example,
by a driving script or application program that repeatedly reads the result
from a pipe.
.RE
.PP
.RS +3
\fB-s
\fR
.RE
.RS +9
Indicates to suppress the prompt on input. Use of the \fB-i\fR
flag implies use of the \fB-s\fR flag. For use in a pipe or with
redirected input (<), the \fB-s\fR flag is preferred. If not
suppressed, the prompts go to standard error (stderr).
.RE
.PP
.RS +3
\fB-t \fIintegerTimeoutValue\fR
\fR
.RE
.RS +9
Specifies a number of seconds to wait for responses from the GPFS daemon
before considering the connection to have failed. 
.PP
The default value is 60. This value must be an integer greater than
or equal to 1 and less than or equal to 8000000.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
Various errors (insufficient memory, input file not found, incorrect
option, and so forth).
.RE
.PP
.RS +3
\fB3
\fR
.RE
.RS +9
Either no commands were entered interactively, or there were no
\fBmmpmon\fR commands in the input file. The input file was empty,
or consisted of all blanks or comments.
.RE
.PP
.RS +3
\fB4
\fR
.RE
.RS +9
\fBmmpmon\fR terminated due to a request that was not valid.
.RE
.PP
.RS +3
\fB5
\fR
.RE
.RS +9
An internal error has occurred.
.RE
.PP
.RS +3
\fB111
\fR
.RE
.RS +9
An internal error has occurred. A message will follow.
.RE
.SH "Restrictions"
.PP
.RS +3
.HP 3
1. Up to five instances of \fBmmpmon\fR may be run on a given node
concurrently. However, concurrent users may interfere with each
other.
.HP 3
2. Do not alter the input file while \fBmmpmon\fR is running.
.HP 3
3. The input file must contain valid input requests, one per line.
When an incorrect request is detected by \fBmmpmon\fR, it issues an error
message and terminates. Input requests that appear in the input file
before the first incorrect request are processed by \fBmmpmon\fR.
.RE
.SH "Security"
.PP
The \fBmmpmon\fR command must be run by a user with root authority, on
the node for which statistics are desired.
.SH "Examples"
.RS +3
.HP 3
1. Assume that \fBinfile\fR contains these requests:
.sp
.nf
ver
io_s
fs_io_s
rhist off
.fi
.sp
and this command is issued:
.sp
.nf
mmpmon -i infile -r 10 -d 5000
.fi
.sp
The output (sent to stdout) is similar to this:
.sp
.nf
mmpmon node 192.168.1.8 name node1 version 2.3.0
mmpmon node 192.168.1.8 name node1 io_s OK
timestamp:      1083350358/935524
bytes read:              0
bytes written:           0
opens:                   0
closes:                  0
reads:                   0
writes:                  0
readdir:                 0
inode updates:           0
mmpmon node 192.168.1.8 name node1 fs_io_s status 1
no file systems mounted
mmpmon node 192.168.1.8 name node1 rhist off OK
.fi
.sp
.sp
The requests in the input file are run 10 times, with a delay of 5000
milliseconds (5 seconds) between invocations.
.HP 3
2. Here is the previous example with the \fB-p\fR flag:
.sp
.nf
mmpmon -i infile -p -r 10 -d 5000
.fi
.sp
The output (sent to stdout) is similar to this:
.sp
.nf
_ver_ _n_ 192.168.1.8 _nn_ node1 _v_ 2 _lv_ 3 _vt_ 0
_io_s_ _n_ 192.168.1.8 _nn_ node1 _rc_ 0 _t_ 1084195701 _tu_ 350714 _br_ 0 _bw_ 0 _oc_ 0
     _cc_ 0 _rdc_ 0 _wc_ 0 _dir_ 0 _iu_ 0
_fs_io_s_ _n_ 192.168.1.8 _nn_ node1 _rc_ 1 _t_ 1084195701 _tu_ 364489 _cl_ - _fs_ -_rhist_\ 
     _n_ 192.168.1.8 _nn_ node1 _req_ off _rc_ 0 _t_ 1084195701 _tu_ 378217
.fi
.sp
.HP 3
3. This is an example of \fBfs_io_s\fR with a mounted file system:
.sp
.nf
mmpmon node 198.168.1.8 name node1 fs_io_s OK
cluster: node1.localdomain
filesystem: gpfs1
disks: 1
timestamp: 1093352136/799285
bytes read: 52428800
bytes written: 87031808
opens: 6
closes: 4
reads: 51
writes: 83
readdir: 0
inode updates: 11
mmpmon node 198.168.1.8 name node1 fs_io_s OK
cluster: node1.localdomain
filesystem: gpfs2
disks: 2
timestamp: 1093352136/799285
bytes read: 87031808
bytes written: 52428800
opens: 4
closes: 3
reads: 12834
writes: 50
readdir: 0
inode updates: 9
.fi
.sp
.HP 3
4. Here is the previous example with the \fB-p\fR flag:
.sp
.nf
_fs_io_s_ _n_ 198.168.1.8 _nn_ node1 _rc_ 0 _t_ 1093352061 _tu_ 93867 _cl_ node1.localdomain
 _fs_ gpfs1 _d_ 1 _br_ 52428800 _bw_ 87031808 _oc_ 6 _cc_ 4 _rdc_ 51 _wc_ 83 _dir_ 0 _iu_ 10
_fs_io_s_ _n_ 198.168.1.8 _nn_ node1 _rc_ 0 _t_ 1093352061 _tu_ 93867 _cl_ node1.localdomain
  _fs_ gpfs2 _d_ 2 _br_ 87031808 _bw_ 52428800 _oc_ 4 _cc_ 3 _rdc_ 12834 _wc_ 50 _dir_ 0 _iu_ 8
.fi
.sp
This output consists of two strings.
.HP 3
5. This is an example of \fBio_s\fR with a mounted file system:
.sp
.nf
mmpmon node 198.168.1.8 name node1 io_s OK
timestamp: 1093351951/587570
bytes read: 139460608
bytes written: 139460608
opens: 10
closes: 7
reads: 12885
writes: 133
readdir: 0
inode updates: 14
.fi
.sp
.HP 3
6. Here is the previous example with the \fB-p\fR flag:
.sp
.nf
_io_s_ _n_ 198.168.1.8 _nn_ node1 _rc_ 0 _t_ 1093351982 _tu_ 356420 _br_ 139460608\ 
 _bw_ 139460608 _oc_ 10 _cc_ 7 _rdc_ 0 _wc_ 133 _dir_ 0 _iu_ 14
.fi
.sp
This output consists of one string.
.PP
For several more examples, see \fIMonitoring GPFS I/O performance with the mmpmon command\fR
in \fIGPFS: Administration and Programming Reference\fR.
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
B4
kB nJ4@$          ,A,A,A                   ./usr/share/man/man8/mmquotaoff.8  8 3 n         $          $          .TH mmquotaoff 11/01/04
mmquotaoff Command
.SH "Name"
.PP
\fBmmquotaoff\fR - Deactivates quota limit checking.
.SH "Synopsis"
.PP
\fBmmquotaoff\fR [\fB-u\fR | \fB-g\fR]
[\fB-v\fR] {\fIDevice\fR[ \fIDevice\fR
\&.\&.\&. ] | \fB-a\fR}
.SH "Description"
.PP
The \fBmmquotaoff\fR command disables quota limit checking by
GPFS.
.PP
If neither the \fB-u\fR nor the \fB-g\fR option is specified, the
\fBmmquotaoff\fR command deactivates quota limit checking for both users
and groups.
.PP
If the \fB-a\fR option is not specified, \fIDevice\fR must be the
last parameter entered.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR[ \fIDevice\fR ... ]
\fR
.RE
.RS +9
The device name of the file system to have quota limit checking
deactivated.
.PP
If more than one file system is listed, the names must be delimited by a
space. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR.
.RE
.SH "Options"
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
Deactivates quota limit checking for all GPFS file systems in the
cluster. When used in combination with the \fB-g\fR option, only
group quota limit checking is deactivated. When used in combination
with the \fB-u\fR option, only user quota limit checking is
deactivated.
.RE
.PP
.RS +3
\fB-g
\fR
.RE
.RS +9
Specifies that only group quota limit checking is to be
deactivated.
.RE
.PP
.RS +3
\fB-u
\fR
.RE
.RS +9
Specifies that only user quota limit checking is to be deactivated.
.RE
.PP
.RS +3
\fB-v
\fR
.RE
.RS +9
Prints a message for each file system in which quotas are
deactivated.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmquotaoff\fR command.
.PP
GPFS must be running on the node from which the \fBmmquotaoff\fR command
is issued.
.SH "Examples"
.RS +3
.HP 3
1. To deactivate user quota limit checking on file system \fBfs0\fR,
enter:
.sp
.nf
mmquotaoff -u fs0
.fi
.sp
To confirm the change, enter:
.sp
.nf
mmlsfs fs0 -Q
.fi
.sp
The system displays information similar to:
.sp
.nf
flag value          description
---- -------------- -------------------------------------------
 -Q  group          Quotas enforced
.fi
.sp
.HP 3
2. To deactivate group quota limit checking on all file systems, enter:
.sp
.nf
mmquotaoff -g -a
.fi
.sp
To confirm the change, individually for each file system, enter:
.sp
.nf
mmlsfs fs2 -Q
.fi
.sp
The system displays information similar to:
.sp
.nf
flag value          description
---- -------------- -------------------------------------------
 -Q  user           Quotas enforced
.fi
.sp
.HP 3
3. To deactivate all quota limit checking on fs0, enter:
.sp
.nf
mmquotaoff fs0
.fi
.sp
To confirm the change, enter:
.sp
.nf
mmlsfs fs0 -Q
.fi
.sp
The system displays information similar to:
.sp
.nf
flag value          description
---- -------------- -----------------------------------------
 -Q  none           Quotas enforced
.fi
.sp
.RE
.SH "See also"
.PP
mmcheckquota Command
.PP
mmdefedquota Command
.PP
mmdefquotaoff Command
.PP
mmdefquotaon Command
.PP
mmedquota Command
.PP
mmlsquota Command
.PP
mmquotaon Command
.PP
mmrepquota Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
e sysk
 pJ4@$        \  AAA             \      ./usr/share/man/man8/mmquotaon.8   8 3 n         $          $          .TH mmquotaon 11/01/04
mmquotaon Command
.SH "Name"
.PP
\fBmmquotaon\fR - Activates quota limit checking.
.SH "Synopsis"
.PP
\fBmmquotaon\fR [\fB-u\fR | \fB-g\fR]
[\fB-v\fR] {\fIDevice\fR[ \fIDevice\fR
\&.\&.\&. ] | \fB-a\fR}
.SH "Description"
.PP
The \fBmmquotaon\fR command enables quota limit checking by GPFS.
.PP
If neither the \fB-u\fR nor the \fB-g\fR option is specified, the
\fBmmquotaon\fR command activates quota limit checking for both users and
groups.
.PP
If the \fB-a\fR option is not used, \fIDevice\fR must be the last
parameter specified.
.PP
After quota limit checking has been activated by issuing the
\fBmmquotaon\fR command, issue the \fBmmcheckquota\fR command to count inode and space
usage.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR[ \fIDevice\fR ... ]
\fR
.RE
.RS +9
The device name of the file system to have quota limit checking
activated.
.PP
If more than one file system is listed, the names must be delimited by a
space. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR.
.RE
.SH "Options"
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
Activates quota limit checking for all of the GPFS file systems in
the cluster. When used in combination with the \fB-g\fR option, only
group quota limit checking is activated. When used in combination with
the \fB-u\fR option, only user quota limit checking is
activated.
.RE
.PP
.RS +3
\fB-g
\fR
.RE
.RS +9
Specifies that only group quota limit checking is to be activated.
.RE
.PP
.RS +3
\fB-u
\fR
.RE
.RS +9
Specifies that only user quota limit checking is to be activated.
.RE
.PP
.RS +3
\fB-v
\fR
.RE
.RS +9
Prints a message for each file system in which quota limit checking is
activated.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmquotaon\fR command.
.PP
GPFS must be running on the node from which the \fBmmquotaon\fR command
is issued.
.SH "Examples"
.RS +3
.HP 3
1. To activate user quotas on file system \fBfs0\fR, enter:
.sp
.nf
mmquotaon -u fs0
.fi
.sp
To confirm the change, enter:
.sp
.nf
mmlsfs fs0 -Q
.fi
.sp
The system displays information similar to:
.sp
.nf
flag value          description
---- -------------- --------------------------------------------
 -Q  user           Quotas enforced
.fi
.sp
.HP 3
2. To activate group quota limit checking on all file systems, enter:
.sp
.nf
mmquotaon -g -a
.fi
.sp
To confirm the change, individually for each file system, enter:
.sp
.nf
mmlsfs  fs1 -Q
.fi
.sp
The system displays information similar to:
.sp
.nf
flag value          description
---- -------------- ---------------------------------------------
 -Q  group          Quotas enforced
.fi
.sp
.HP 3
3. To activate both user and group quota limit checking on file system
\fBfs2\fR, enter:
.sp
.nf
mmquotaon fs2
.fi
.sp
To confirm the change, enter:
.sp
.nf
mmlsfs  fs2 -Q
.fi
.sp
The system displays information similar to:
.sp
.nf
flag value          description
---- -------------- --------------------------------------------
 -Q  user;group     Quotas enforced
.fi
.sp
.RE
.SH "See also"
.PP
mmcheckquota Command
.PP
mmdefedquota Command
.PP
mmdefquotaoff Command
.PP
mmdefquotaon Command
.PP
mmedquota Command
.PP
mmlsquota Command
.PP
mmquotaoff Command
.PP
mmrepquota Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
ds) k|c tJ4@$          xHBxHBxHB                   ./usr/share/man/man8/mmremotecluster.8 n         $          $          .TH mmremotecluster 03/08/05
mmremotecluster Command
.SH "Name"
.PP
\fBmmremotecluster\fR - Manages the information about other GPFS
clusters that this cluster can access when mounting remote GPFS file
systems.
.SH "Synopsis"
.PP
\fBmmremotecluster\fR \fBadd \fR \fIremoteClusterName\fR
[\fB-n\fR \fIcontactNodes\fR] [\fB-k\fR
\fIkeyFile\fR]
.PP
Or,
.PP
\fBmmremotecluster\fR \fBupdate\fR \fIremoteClusterName\fR
[\fB-C\fR \fInewClusterName\fR] [\fB-n\fR
\fIcontactNodes\fR] [\fB-k\fR \fIkeyFile\fR]
.PP
Or,
.PP
\fBmmremotecluster\fR \fBdelete {\fR\fIremoteClusterName\fR\fB |
all}\fR
.PP
Or,
.PP
\fBmmremotecluster\fR \fBshow\fR [\fIremoteClusterName\fR\fB
| all\fR]
.SH "Description"
.PP
The \fBmmremotecluster\fR command is used to make remote GPFS clusters
known to the local cluster, and to maintain the attributes associated with
those remote clusters. The keyword appearing after
\fBmmremotecluster\fR determines which action is performed:
.PP
.RS +3
\fBadd
\fR
.RE
.RS +9
Adds a remote GPFS cluster to the set of remote clusters known to the
local cluster.
.RE
.PP
.RS +3
\fBdelete
\fR
.RE
.RS +9
Deletes the information for a remote GPFS cluster.
.RE
.PP
.RS +3
\fBshow
\fR
.RE
.RS +9
Displays information about a remote GPFS cluster.
.RE
.PP
.RS +3
\fBupdate
\fR
.RE
.RS +9
Updates the attributes of a remote GPFS cluster.
.RE
.PP
To be able to mount file systems that belong to some other GPFS cluster,
you must first make the nodes in this cluster aware of the GPFS cluster that
owns those file systems. This is accomplished with the
\fBmmremotecluster add\fR command. The information that the command
requires must be provided to you by the administrator of the remote GPFS
cluster. You will need this information:
.RS +3
.HP 3
\(bu The name of the remote cluster.
.HP 3
\(bu The names or IP addresses of a few nodes that belong to the remote GPFS
cluster.
.HP 3
\(bu The public key file generated by the administrator of the remote cluster
by running the \fBmmauth genkey\fR command for the remote cluster.
.RE
.PP
Since each cluster is managed independently, there is no automatic
coordination and propagation of changes between clusters like there is between
the nodes within a cluster. This means that once a remote cluster is
defined with the \fBmmremotecluster\fR command, the information about that
cluster is automatically propagated across all nodes that belong to this
cluster. But if the administrator of the remote cluster decides to
rename it, or deletes some or all of the contact nodes, or change the public
key file, the information in this cluster becomes obsolete. It is the
responsibility of the administrator of the remote GPFS cluster to notify you
of such changes so that you can update your information using the appropriate
options of the \fBmmremotecluster update\fR command.
.SH "Parameters"
.PP
.RS +3
\fB\fIremoteClusterName\fR
\fR
.RE
.RS +9
Specifies the cluster name associated with the remote cluster that owns
the remote GPFS file system. The value \fBall\fR indicates all
remote clusters defined to this cluster, when using the \fBmmremotecluster
delete\fR or \fBmmremotecluster show \fR commands.
.RE
.PP
.RS +3
\fB-C \fInewClusterName\fR
\fR
.RE
.RS +9
Specifies the new cluster name to be associated with the remote
cluster.
.RE
.PP
.RS +3
\fB-k \fIkeyFile\fR
\fR
.RE
.RS +9
Specifies the name of the public key file provided to you by the
administrator of the remote GPFS cluster.
.RE
.PP
.RS +3
\fB-n \fIcontactNodes\fR
\fR
.RE
.RS +9
A comma separated list of a few nodes that belong to the remote GPFS
cluster. The nodes can be identified with their node names or IP
addresses as known to the remote GPFS cluster.
.RE
.SH "Options"
.PP
None.
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion. After successful completion of the
\fBmmremotecluster\fR command, the new configuration information is
propagated to all nodes in the cluster.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmremotecluster\fR
command.
.PP
You may issue the \fBmmremotecluster\fR command from any node in the
GPFS cluster.
.SH "Examples"
.RS +3
.HP 3
1. This command adds remote cluster
\fBk164.kgn.ibm.com\fR to the set of remote clusters
known to the local cluster, specifying \fBk164n02\fR and \fBk164n03\fR
as remote contact nodes. File \fBk164.id_rsa.pub\fR is
the name of the public key file provided to you by the administrator of the
remote cluster.
.sp
.nf
mmremotecluster add k164.kgn.ibm.com -n k164n02,k164n03\\
 -k k164.id_rsa.pub
.fi
.sp
The output is similar to this:
.sp
.nf
mmremotecluster: 6027-1371 Propagating the changes to all 
affected nodes. This is an asynchronous process.
.fi
.sp
.HP 3
2. This command shows information for the remote cluster
\fBk164.kgn.ibm.com\fR.
.sp
.nf
mmremotecluster show k164.kgn.ibm.com
.fi
.sp
The output is similar to this:
.sp
.nf
Cluster name:    k164.kgn.ibm.com
Contact nodes:   k164n02,k164n03
SHA digest:      a3917c8282fca7a27d951566940768dcd241902b
File systems:    (none defined)
.fi
.sp
.HP 3
3. This command updates information for the remote cluster
\fBk164.kgn.ibm.com\fR, changing the remote contact
nodes to \fBk164n02\fR and \fBk164n01\fR.
.sp
.nf
mmremotecluster update k164.kgn.ibm.com -n k164n02,k164n01
.fi
.sp
The output is similar to this:
.sp
.nf
mmremotecluster: 6027-1371 Propagating the changes to all 
affected nodes. This is an asynchronous process.
.fi
.sp
.sp
The \fBmmremotecluster show\fR command can then be used to see
the changes.
.sp
.nf
mmremotecluster show k164.kgn.ibm.com
.fi
.sp
.sp
The output is similar to this:
.sp
.nf
Cluster name:    k164.kgn.ibm.com
Contact nodes:   k164n02,k164n01
SHA digest:      a3917c8282fca7a27d951566940768dcd241902b
File systems:    (none defined)
.fi
.sp
.HP 3
4. This command deletes information for remote cluster
\fBk164.kgn.ibm.com\fR from the local cluster.
.sp
.nf
mmremotecluster delete k164.kgn.ibm.com
.fi
.sp
The output is similar to this:
.sp
.nf
mmremotecluster: 6027-1371 Propagating the changes to all 
affected nodes. This is an asynchronous process.
.fi
.sp
.RE
.SH "See also"
.PP
mmauth Command
.PP
mmremotefs Command
.PP
\fIAccessing GPFS file systems from other GPFS clusters\fR in
\fIGPFS: Administration and Programming Reference\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
fBkP vJ4@$          AAA                   ./usr/share/man/man8/mmremotefs.8 er.8 n         $          $          .TH mmremotefs 11/01/04
mmremotefs Command
.SH "Name"
.PP
\fBmmremotefs\fR - Manages the information about GPFS file systems
from other clusters that this cluster can mount.
.SH "Synopsis"
.PP
\fBmmremotefs\fR \fBadd\fR \fIDevice\fR \fB-f\fR
\fIRemoteDevice\fR \fB-C\fR \fIremoteClusterName\fR \fB-T\fR
\fImountPoint\fR [\fB-A {yes | \fR\fB\fIno\fR\fR\fB |
automount}\fR] [\fB-o\fR \fImountOptions\fR]
.PP
Or,
.PP
\fBmmremotefs\fR \fBdelete {\fR\fIDevice\fR\fB | all | \fR
\fB-C\fR \fIremoteClusterName\fR\fB}\fR
.PP
Or,
.PP
\fBmmremotefs\fR \fBshow\fR [\fIDevice\fR\fB | all |
\fR\fB-C\fR \fIremoteClusterName\fR]
.PP
Or,
.PP
\fBmmremotefs\fR \fBupdate\fR \fIDevice\fR [\fB-f\fR
\fIRemoteDevice\fR] [\fB-C\fR \fIremoteClusterName\fR]
[\fB-T\fR \fImountPoint\fR] [\fB-A {yes | \fR\fB\fIno\fR\fR\fB | automount}\fR] [\fB-o\fR
\fImountOptions\fR]
.SH "Description"
.PP
The \fBmmremotefs\fR command is used to make GPFS file systems that
belong to other GPFS clusters known to the nodes in this cluster, and to
maintain the attributes associated with these file systems. The keyword
appearing after \fBmmremotefs\fR determines which action is
performed:
.PP
.RS +3
\fBadd
\fR
.RE
.RS +9
Define a new remote GPFS file system.
.RE
.PP
.RS +3
\fBdelete
\fR
.RE
.RS +9
Delete the information for a remote GPFS file system.
.RE
.PP
.RS +3
\fBshow
\fR
.RE
.RS +9
Display the information associated with a remote GPFS file system.
.RE
.PP
.RS +3
\fBupdate
\fR
.RE
.RS +9
Update the information associated with a remote GPFS file system.
.RE
.PP
Use the \fBmmremotefs\fR command to make the nodes in this cluster aware
of file systems that belong to other GPFS clusters. The cluster that
owns the given file system must have already been defined with the
\fBmmremotecluster\fR command. The \fBmmremotefs\fR command is
used to assign a local name under which the remote file system will be known
in this cluster, the mount point where the file system is to be mounted in
this cluster, and any local mount options that you may want.
.PP
Once a remote file system has been successfully defined and a local device
name associated with it, you can issue normal commands using that local name,
the same way you would issue them for file systems that are owned by this
cluster
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
Specifies the name by which the remote GPFS file system will be known in
the cluster.
.RE
.PP
.RS +3
\fB-C \fIremoteClusterName\fR
\fR
.RE
.RS +9
Specifies the name of the GPFS cluster that owns the remote GPFS file
system.
.RE
.PP
.RS +3
\fB-f \fIRemoteDevice\fR
\fR
.RE
.RS +9
Specifies the actual name of the remote GPFS file system. This is
the device name of the file system as known to the remote cluster that owns
the file system.
.RE
.SH "Options"
.PP
.RS +3
\fB-A {yes | \fB\fIno\fR\fR | automount}
\fR
.RE
.RS +9
Indicates when the file system is to be mounted:
.PP
.RS +3
\fByes
\fR
.RE
.RS +9
When the GPFS daemon starts.
.RE
.PP
.RS +3
\fBno
\fR
.RE
.RS +9
Manual mount. This is the default.
.RE
.PP
.RS +3
\fBautomount
\fR
.RE
.RS +9
When the file system is first accessed.
.RE
.RE
.PP
.RS +3
\fB-o \fImountOptions\fR
\fR
.RE
.RS +9
Specifies mount options to pass to the \fBmount\fR command when
mounting the file system.
.RE
.PP
.RS +3
\fB-T \fImountPoint\fR
\fR
.RE
.RS +9
Indicates the local mount point for the remote GPFS file system.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion. After successful completion of the
\fBmmremotefs\fR command, the new configuration information is propagated
to all nodes in the cluster.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmremotefs\fR command.
.PP
You may issue the \fBmmremotefs\fR command from any node in the GPFS
cluster.
.SH "Examples"
.PP
This command adds remote file system \fBgpfsn\fR, owned by remote
cluster \fBk164.kgn.ibm.com\fR, to the local cluster,
assigning \fBrgpfsn\fR as the local name for the file system, and
\fB/gpfs/rgpfsn\fR as the local mount point. 
.sp
.nf
mmremotefs add rgpfsn -f gpfsn -C k164.kgn.ibm.com -T /gpfs/rgpfsn
.fi
.sp
.PP
The output is similar to this:
.sp
.nf
mmremotefs: 6027-1371 Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.PP
The \fBmmremotefs show\fR command can be used to see the changes.
.sp
.nf
mmremotefs show rgpfsn
.fi
.sp
.PP
The output is similar to this:
.sp
.nf
Local Name  Remote Name Cluster name  Mount Point Mount Options Automount
rgpfsn      gpfsn      k164n.kgn.ibm.com  /rgpfsn  rw,mtime,noatime    no
.fi
.sp
.SH "See also"
.PP
mmauth Command
.PP
mmremotecluster Command
.PP
\fIAccessing GPFS file systems from other GPFS clusters\fR in
\fIGPFS: Administration and Programming Reference\fR.
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
ounk<( xJ4@$          AAA                   ./usr/share/man/man8/mmrepquota.8 er.8 n         $          $          .TH mmrepquota 11/01/04
mmrepquota Command
.SH "Name"
.PP
\fBmmrepquota\fR - Reports file system user and group
quotas.
.SH "Synopsis"
.PP
\fBmmrepquota\fR [\fB-e\fR] [\fB-g\fR]
[\fB-q\fR] [\fB-u\fR] [\fB-n\fR]
[\fB-v\fR] {\fIDevice\fR[ \fIDevice\fR
\&.\&.\&. ] | \fB-a\fR}
.SH "Description"
.PP
The \fBmmrepquota\fR command reports file system usage and quota
information for both a user or group.
.PP
If neither the \fB-g\fR nor the \fB-u\fR option is specified, then
both user and group quotas are listed.
.PP
If the \fB-a\fR option is not specified, \fIDevice\fR must be the
last parameter entered.
.PP
For each file system in the cluster, the \fBmmrepquota\fR command
displays:
.RS +3
.HP 3
1. Block limits:
.RS +3
.HP 3
\(bu quota type (USR or GROUP)
.HP 3
\(bu current usage in KB
.HP 3
\(bu soft limit in KB
.HP 3
\(bu hard limit in KB
.HP 3
\(bu space in doubt
.HP 3
\(bu grace period
.RE
.HP 3
2. File limits:
.RS +3
.HP 3
\(bu current number of files
.HP 3
\(bu soft limit
.HP 3
\(bu hard limit
.HP 3
\(bu files in doubt
.HP 3
\(bu grace period
.RE
.HP 3
3. Entry Type
.sp
.RS +3
\fBdefault on
\fR
.RE
.RS +9
default quotas are enabled for this file system
.RE
.sp
.RS +3
\fBdefault off
\fR
.RE
.RS +9
default quotas are not enabled for this file system
.RE
.sp
.RS +3
\fBe
\fR
.RE
.RS +9
explicit quotas - the quota limits have been explicitly set using
the \fBmmedquota\fR command
.RE
.sp
.RS +3
\fBd
\fR
.RE
.RS +9
default quotas - the quota limits are the default values set using
the \fBmmdefedquota\fR command
.RE
.sp
.RS +3
\fBi
\fR
.RE
.RS +9
initial quotas - default quotas were not enabled when this initial
entry was established. Initial quota limits have a value of zero
indicating no limit.
.RE
.RE
.PP
Because the sum of the in doubt value and the current usage may not exceed
the hard limit, the actual block space and number of files available to the
user or the group may be constrained by the \fIin doubt\fR value. If
the \fIin doubt\fR value approach a significant percentage of the quota,
run the \fBmmcheckquota\fR command to account for the
lost space and files.
.PP
GPFS quota management takes replication into account when reporting on and
determining if quota limits have been exceeded for both block and file
usage. In a file system that has either type of replication set to a
value of two, the values reported on by both the \fBmmlsquota\fR command and the \fBmmrepquota\fR
command are double the value reported by the \fBls\fR command.
.PP
When issuing the \fBmmrepquota\fR command on a mounted file system,
negative in doubt values may be reported if the quota server processes a
combination of up-to-date and back-level information. This is a
transient situation and may be ignored.
.PP
When a quota management enabled file system is SANergy exported, the block
usage accounting of a file accessed through SANergy include the blocks
actually used by the file and the extra blocks temporarily allocated (hyper
allocation) by SANergy. Hyper allocation is a SANergy performance
feature and can be tuned using SANergy configuration tools. For more
information, see \fITivoli SANergy: Administrator's Guide\fR at
publib.boulder.ibm.com/tividd/td/SANergy2.2.4.html.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR[ \fIDevice\fR...]
\fR
.RE
.RS +9
The device name of the file system to be listed.
.PP
If more than one file system is listed, the names must be delimited by a
space. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR.
.RE
.SH "Options"
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
Lists quotas for all file systems in the cluster. A header
line is printed automatically with this option.
.RE
.PP
.RS +3
\fB-e
\fR
.RE
.RS +9
Specifies that the \fBmmrepquota\fR command is to collect updated quota
usage data from all nodes before displaying results. If this option is
not specified, there is the potential to display negative usage values as the
quota server may process a combination of up-to-date and back-level
information.
.RE
.PP
.RS +3
\fB-g
\fR
.RE
.RS +9
List only group quotas.
.RE
.PP
.RS +3
\fB-n
\fR
.RE
.RS +9
Displays a numerical user ID.
.RE
.PP
.RS +3
\fB-q
\fR
.RE
.RS +9
Show whether quota enforcement is active.
.RE
.PP
.RS +3
\fB-u
\fR
.RE
.RS +9
List only user quotas.
.RE
.PP
.RS +3
\fB-v
\fR
.RE
.RS +9
Print a header line.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmrepquota\fR command.
.PP
GPFS must be running on the node from which the \fBmmrepquota\fR command
is issued.
.SH "Examples"
.RS +3
.HP 3
1. To report on user quotas for file system \fBfs2\fR and display a header
line, enter:
.sp
.nf
mmrepquota -u -v fs2
.fi
.sp
The system displays information similar to:
.sp
.nf
*** Report for USR quotas on fs2
                Block Limits        |        File Limits
                           in                            in       entry
Name type KB quota limit doubt grace|files quota limit doubt grace Type
root  USR    8   0    0    0   none |  1     0   0   0  none default
                                                                on
user2 USR 2016 256  512    0  6days |  7    10  20   0   none d
user3 USR  104 256  512    0   none |  1    10  20   0   none d
user4 USR    0 256  512    0   none |  0    10  20   0   none d
user5 USR  368 256  512    0 23hours|  5     4  10   0 23hours d
user6 USR    0 256  512    0   none |  0    10  20   0   none d
user7 USR 1024 1024 5120 4096  none |  1     0   0  19   none e
.fi
.sp
.HP 3
2. To report on quota enforcement for \fBfs2\fR, enter:
.sp
.nf
mmrepquota -q fs2
.fi
.sp
 The system displays information similar to:
.sp
.nf
fs2: USR quota is on; default quota is on
fs2: GRP quota is on; default quota is off\ 
.fi
.sp
.RE
.SH "See also"
.PP
mmcheckquota Command
.PP
mmdefedquota Command
.PP
mmdefquotaoff Command
.PP
mmdefquotaon Command
.PP
mmedquota Command
.PP
mmlsquota Command
.PP
mmquotaoff Command
.PP
mmquotaon Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
quokX zJ4@$          
A
A
A                   ./usr/share/man/man8/mmrestorefs.8 r.8 n         $          $          .TH mmrestorefs 11/01/04
mmrestorefs Command
.SH "Name"
.PP
\fBmmrestorefs\fR - Restores a file system from a GPFS
snapshot.
.SH "Synopsis"
.PP
\fBmmrestorefs\fR \fIDevice\fR \fIDirectory\fR
[\fB-c\fR]
.SH "Description"
.PP
Use the \fBmmrestorefs\fR command to restore user data and attribute
files in a file system using those of the specified snapshot.
.PP
Prior to issuing the \fBmmrestorefs\fR command, you must unmount
the file system from all nodes in the cluster. The file system may not
be remounted until the \fBmmrestorefs\fR command has successfully
completed, unless you have specified the \fB-c\fR option to force the
restore to continue even in the event errors are encountered. Automatic
quota activation upon mounting the file system is \fInot\fR restored by
the \fBmmrestorefs\fR command. You must issue the \fBmmchfs -Q yes\fR command to restore automatic quota
activation.
.PP
Snapshots are not affected by the \fBmmrestorefs\fR command.
Consequently:
.RS +3
.HP 3
1. A failure while restoring one snapshot may possibly be recovered by
restoring a different snapshot.
.HP 3
2. Restoring a file system with a snapshot other than the latest snapshot
taken, causes broken links to snapshots taken after the restored point in
time.
.RE
.PP
Because snapshots are not copies of the entire file system, they should not
be used as protection against media failures. For protection against
media failures, see \fIGeneral Parallel
File System: Concepts, Planning and Installation Guide\fR and
search on \fIrecoverability considerations\fR.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system for which the snapshot is to be
created. File system names need not be fully-qualified.
\fBfs0\fR is just as acceptable as \fB/dev/fs0\fR.
.PP
This must be the first parameter.
.RE
.PP
.RS +3
\fB\fIDirectory\fR
\fR
.RE
.RS +9
The snapshot with which to restore the file system.
.RE
.SH "Options"
.PP
.RS +3
\fB-c
\fR
.RE
.RS +9
Continue to restore the file system in the event errors occur.
.PP
Upon completion of the \fBmmrestorefs -c\fR command, the file system is
inconsistent, but can be mounted to recover data from the snapshot. If
necessary, the command may be issued to recover as much data as
possible. The \fBmmfsck\fR command may be run on an inconsistent
file system.
.PP
After the \fBmmrestorefs -c\fR command has been issued, use the \fBmmfsck\fR command to clean up the files or
directories that could not be restored.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmrestorefs\fR
command.
.PP
You may issue the \fBmmrestorefs\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
We have a directory structure similar to:
.sp
.nf
/fs1/file1
/fs1/userA/file2
/fs1/userA/file3
/fs1/.snapshots/snap1/file1
/fs1/.snapshots/snap1/userA/file2
/fs1/.snapshots/snap1/userA/file3
.fi
.sp
.PP
If the directory \fBuserA\fR is then deleted, we would have:
.sp
.nf
/fs1/file1
/fs1/.snapshots/snap1/file1
/fs1/.snapshots/snap1/userA/file2
/fs1/.snapshots/snap1/userA/file3
.fi
.sp
.PP
The directory \fBuserB\fR is then created using the inode originally
assigned to \fBuserA\fR. We take another snapshot:
.sp
.nf
mmcrsnapshot fs1 snap2
.fi
.sp
.PP
The output is similar to this:
.sp
.nf
Writing dirty data to disk
Quiescing all file system operations
Writing dirty data to disk again
Creating snapshot.
Resuming operations.
.fi
.sp
.PP
After the command is issued, the directory structure would appear similar
to:
.sp
.nf
/fs1/file1
/fs1/userB/file2b
/fs1/userB/file3b
/fs1/.snapshots/snap1/file1
/fs1/.snapshots/snap1/userA/file2
/fs1/.snapshots/snap1/userA/file3
/fs1/.snapshots/snap2/file1
/fs1/.snapshots/snap2/userB/file2b
/fs1/.snapshots/snap2/userB/file3b
.fi
.sp
.PP
If the file system is then to be restored from \fBsnap1\fR:
.sp
.nf
mmrestorefs fs1 snap1
.fi
.sp
.PP
After the command has been issued, the directory structure would appear
similar to:
.sp
.nf
/fs1/file1
/fs1/userA/file2
/fs1/userA/file3
/fs1/.snapshots/snap1/file1
/fs1/.snapshots/snap1/userA/file2
/fs1/.snapshots/snap1/userA/file3
/fs1/.snapshots/snap2/file1
/fs1/.snapshots/snap2/userB/file2b
/fs1/.snapshots/snap2/userB/file3b
.fi
.sp
.SH "See also"
.PP
mmcrsnapshot Command
.PP
mmdelsnapshot Command
.PP
mmlssnapshot Command
.PP
mmsnapdir Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
nk |J4@$         &  AAA              &      ./usr/share/man/man8/mmrestripefs.8 .8 n         $          $          .TH mmrestripefs 11/01/04
mmrestripefs Command
.SH "Name"
.PP
\fBmmrestripefs\fR - Rebalances or restores the replication factor of
all files in a file system.
.SH "Synopsis"
.PP
\fBmmrestripefs\fR \fIDevice\fR {\fB-m\fR | \fB-r \fR |
\fB-b\fR} [\fB-N\fR { \fB\fIall\fR\fR | \fBmount\fR |
\fINodeName\fR[,\fINodeName\fR ... ]
}]
.SH "Description"
.PP
Use the \fBmmrestripefs\fR command to rebalance or restore the
replication factor of all files in a file system. The
\fBmmrestripefs\fR command moves existing file system data between
different disks in the file system based on changes to the disk state made by
the \fBmmchdisk\fR, \fBmmadddisk\fR, and \fBmmdeldisk\fR commands.
.PP
The \fBmmrestripefs\fR command attempts to restore the metadata or data
replication factor of any file in the file system.
.PP
You must specify one of the three options (\fB-b\fR, \fB-m\fR, or
\fB-r\fR) to indicate how much file system data to move. You can
issue this command against a mounted or unmounted file system.
.PP
If you do not use replication, the \fB-m\fR and \fB-r\fR options are
equivalent. Their behavior differs only on replicated files.
After a successful replicate (\fB-r\fR option), all suspended disks are
empty. A migrate operation, using the \fB-m\fR option, leaves data
on a suspended disk as long as at least one other replica of the data remains
on a disk that is not suspended. Restriping a file system includes
replicating it. The \fB-b\fR option performs all the operations of
the \fB-m\fR and \fB-r\fR options.
.PP
Consider the necessity of restriping and the current demands on the
system. New data which is added to the file system is correctly
striped. Restriping a large file system requires a large number of
insert and delete operations and may affect system performance. Plan to
perform this task when system demand is low.
.PP
When using SANergy, consider these points:
.RS +3
.HP 3
\(bu If the \fBmmrestripefs\fR command is issued on a file that is locked by
SANergy, the command waits until it is unlocked before proceeding.
.HP 3
\(bu I/O operations from SANergy clients must terminate before using the
\fBmmrestripefs\fR command. If not, the client applications receive
an error.
.RE
.PP
\fBDetermining how long mmrestripefs takes to complete\fR
.PP
To determine how long the \fBmmrestripefs\fR command will take to
complete, consider these points:
.RS +3
.HP 3
1. How much data is to be moved by issuing the \fBdf -k\fR command.
.HP 3
2. How many GPFS client nodes there are to do the work.
.HP 3
3. If used, how much virtual shared disk server or Network Shared Disk (NSD)
server bandwidth is available for I/O.
.HP 3
4. If you have added new disks to a file system, after the disks have been
added determine how much free space is on each of the new disks by issuing the
\fBmmdf \fR\fIDevice\fR \fB- q\fR command.
.RE
.PP
The restriping of a file system is done by having one thread on each node
in the cluster work on a subset of files. Consequently, the more GPFS
client nodes performing work for the restripe, the faster the
\fBmmrestripefs\fR command will complete. The nodes that should
participate in the restripe are specified on the command using the \fB-N\fR
parameter. Based on raw I/O rates you should be able to estimate the
length of time for the restripe. However, to account for the overhead
of scanning all metadata, that value should be doubled.
.PP
In a cluster utilizing virtual shared disk or NSD servers, until you
saturate the disk servers, the more GPFS client nodes doing work for the
restripe, the faster the \fBmmrestripefs\fR command will complete.
After the disk servers have saturated either the disk or network bandwidth,
the number of GPFS client nodes will not affect command completion
rate. Assuming you have enough nodes to saturate the disk servers, and
have to move all of the data, the time to read and write every block of data
is roughly: 
.sp
.nf
  2 * fileSystemSize / averageDiskserverDataRate
.fi
.sp
.PP
As an upper bound, due to overhead of scanning all of the metadata, this
time should be doubled. If other jobs are heavily loading the virtual
shared disk servers, this time may increase even more.
.RS +3
\fBNote:\fR
.RE
.RS +9
There is no particular reason to stop all other jobs while the
\fBmmrestripefs\fR command is running. The CPU load of the command
is minimal on each node and only the files that are being restriped at any
moment are locked to maintain data integrity.
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system to be restriped. File system
names need not be fully-qualified. \fBfs0\fR is as acceptable as
\fB/dev/fs0\fR. 
.PP
This must be the first parameter.
.RE
.PP
.RS +3
\fB-N { \fB\fIall\fR\fR | \fBmount\fR |
\fINodeName\fR[,\fINodeName\fR...]}
\fR
.RE
.RS +9
Specify the nodes that participate in the restripe of the file
system. Valid values are: 
.PP
.RS +3
\fBall
\fR
.RE
.RS +9
Indicates that all nodes in the GPFS cluster, whether they have the
file system mounted or not, participate in the restripe. This is the
default when the \fB-N\fR option has not been specified.
.RE
.PP
.RS +3
\fBmount
\fR
.RE
.RS +9
Indicates that only the nodes that have the file system mounted
participate in the restripe of the file system.
.RE
.PP
.RS +3
\fB\fINodeName\fR[,\fINodeName\fR...]
\fR
.RE
.RS +9
A comma-separated list of target nodes that participate in the
restripe.
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.RE
.SH "Options"
.PP
.RS +3
\fB-b
\fR
.RE
.RS +9
Rebalances all files across all disks that are not suspended, even if they
are stopped. Although blocks are allocated on a stopped disk, they are
not written to a stopped disk, nor are reads allowed from a stopped disk,
until that disk is started and replicated data is copied onto it. The
\fBmmrestripefs\fR command rebalances and restripes the file system.
Use this option to rebalance the file system after adding, changing, or
deleting disks in a file system.
.RS +3
\fBNote:\fR
.RE
.RS +9
Rebalancing of files is an I/O intensive and time consuming
operation, and is important only for file systems with large files that are
mostly invariant. In many cases, normal file update and creation will
rebalance your file system over time, without the cost of the
rebalancing.
.RE
.RE
.PP
.RS +3
\fB-m
\fR
.RE
.RS +9
Migrates all critical data off any suspended disk in this file
system. Critical data is all data that would be lost if currently
suspended disks were removed.
.RE
.PP
.RS +3
\fB-r
\fR
.RE
.RS +9
Migrates all data off suspended disks. It also restores all
replicated files in the file system to their designated degree of replication
when a previous disk failure or removal of a disk has made some replica data
inaccessible. Use this parameter either immediately after a disk
failure to protect replicated data against a subsequent failure, or before
taking a disk offline for maintenance to protect replicated data against
failure of another disk during the maintenance process.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmrestripefs\fR
command.
.PP
You may issue the \fBmmrestripefs\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.RS +3
.HP 3
1. To move all critical data from any suspended disk in file system
\fBfs0\fR, enter:
.sp
.nf
mmrestripefs fs0 -m
.fi
.sp
The system displays information similar to:
.sp
.nf
GPFS: 6027-560 Scanning file system metadata ...
GPFS: 6027-565 Scanning user file metadata ...
3 % completed
   \ .
   \ .
   \ .
100% completed
GPFS: 6027-552 Scan completed successfully.
.fi
.sp
.HP 3
2. To rebalance all files in file system \fBfs1\fR across all defined,
accessible disks that are not stopped or suspended, enter:
.sp
.nf
mmrestripefs fs1 -b
.fi
.sp
The system displays information similar to:
.sp
.nf
GPFS: 6027-589 Scanning file system metadata, phase 1 ...\ 
  48 % complete on Wed Aug 16 16:47:53 2000
  96 % complete on Wed Aug 16 16:47:56 2000
 100 % complete on Wed Aug 16 16:47:56 2000
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-589 Scanning file system metadata, phase 2 ...\ 
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-589 Scanning file system metadata, phase 3 ...\ 
  98 % complete on Wed Aug 16 16:48:02 2000
 100 % complete on Wed Aug 16 16:48:02 2000
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-565 Scanning user file metadata ...
GPFS: 6027-552 Scan completed successfully.
.fi
.sp
.RE
.SH "See also"
.PP
.PP
mmadddisk Command
.PP
mmchdisk Command
.PP
mmdeldisk Command
.PP
mmrpldisk Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
k ~J4@$        G$  AAA             G$      ./usr/share/man/man8/mmrpldisk.8 .8 .8 n         $          $          .TH mmrpldisk 11/01/04
mmrpldisk Command
.SH "Name"
.PP
\fBmmrpldisk\fR - Replaces the specified disk.
.SH "Synopsis"
.PP
\fBmmrpldisk\fR \fIDevice DiskName\fR {\fIDiskDesc\fR |
\fB-F\fR \fIDescFile\fR} [\fB-v\fR \fB\fIyes\fR\fR |
\fBno\fR] [\fB-N\fR {\fB\fIall\fR\fR | \fBmount\fR |
\fINodeName\fR[,\fINodeName\fR...]}]
.SH "Description"
.PP
Use the \fBmmrpldisk\fR command to replace an existing disk in the GPFS
file system with a new one. All data on the old disk is migrated to the
new one.
.PP
To replace disks in a GPFS file system, you must first decide if you
will:
.RS +3
.HP 3
1. Create new disks using the \fBmmcrnsd\fR
command. 
.sp
Use the rewritten disk descriptor file produced by the \fBmmcrnsd\fR command or create a new disk
descriptor. When using the rewritten file, the \fIDisk Usage\fR and
\fIFailure Group\fR specifications remain the same as specified on the
\fBmmcrnsd\fR command.
.HP 3
2. Select disks no longer in use in any file system. Issue the \fBmmlsnsd -F\fR command to display the available
disks.
.RE
.PP
The disk may then be used to replace a disk in the file system using the \fBmmrpldisk\fR command.
.PP
\fBNotes: \fR
.RS +3
.sp
.HP 3
1. You cannot replace a disk when it is the only remaining disk in the file
system.
.sp
.HP 3
2. Under no circumstances should you replace a stopped disk. You need
to start a stopped disk before replacing it. If a disk cannot be
started, you must delete it using the \fBmmdeldisk\fR command. See
the \fIGeneral Parallel File System: Problem
Determination Guide\fR and search for \fIdisk media failure\fR
.sp
.HP 3
3. The file system need not be unmounted before the \fBmmrpldisk\fR
command can be run.
.sp
.HP 3
4. I/O operations from SANergy clients must terminate before using the
\fBmmrpldisk\fR command. If not, the client applications receive an
error.
.RE
.PP
\fBResults\fR
.PP
Upon successful completion of the \fBmmrpldisk\fR command, the disk is
replaced in the file system and data is copied to the new disk without
restriping.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system where the disk is to be
replaced. File system names need not be fully-qualified.
\fBfs0\fR is as acceptable as \fB/dev/fs0\fR. 
.PP
This must be the first parameter.
.RE
.PP
.RS +3
\fB\fIDisk Name\fR
\fR
.RE
.RS +9
The name of the disk to be replaced, which was previously passed to the \fBmmcrfs\fR, \fBmmadddisk\fR,
or \fBmmrpldisk\fR commands. You can display the entire list of disk
names by issuing the \fBmmlsdisk\fR command.
.RE
.PP
.RS +3
\fB\fIDiskDesc\fR
\fR
.RE
.RS +9
A descriptor for the replacement disk.
.RE
.PP
.RS +3
\fB-F \fIDescFile\fR
\fR
.RE
.RS +9
Specifies a file containing the disk descriptor for the replacement
disk.
.PP
The disk descriptor must be specified in the form (Note that the second and
third fields are reserved):
.sp
.nf
DiskName:::DiskUsage:FailureGroup
.fi
.sp
.PP
.RS +3
\fB\fIDiskName\fR
\fR
.RE
.RS +9
.PP
You must specify the name of the NSD previously created by the \fBmmcrnsd\fR command. For a list of available
disks, issue the \fBmmlsnsd -F\fR command.
.RE
.PP
.RS +3
\fB\fIDisk Usage\fR
\fR
.RE
.RS +9
Specify a disk usage or inherit the disk usage of the disk being
replaced: 
.PP
.RS +3
\fBdataAndMetadata
\fR
.RE
.RS +9
Indicates that the disk contains both data and metadata. This is
the default.
.RE
.PP
.RS +3
\fBdataOnly
\fR
.RE
.RS +9
Indicates that the disk contains data and does not contain
metadata.
.RE
.PP
.RS +3
\fBmetadataOnly
\fR
.RE
.RS +9
Indicates that the disk contains metadata and does not contain
data.
.RE
.PP
.RS +3
\fBdescOnly
\fR
.RE
.RS +9
Indicates that the disk contains no data and no metadata. Such a
disk is used solely to keep a copy of the file system descriptor, and can be
used as a third failure group in certain disaster recovery
configurations. 
.RE
.RE
.PP
.RS +3
\fB\fIFailure Group\fR
\fR
.RE
.RS +9
A number identifying the failure group to which this disk belongs.
You can specify any value from -1 (where -1 indicates that the disk has no
point of failure in common with any other disk) to 4000. If you do not
specify a failure group, the new disk inherits the failure group of the disk
being replaced.
.RE
.RS +3
\fBNote:\fR
.RE
.RS +9
While it is not absolutely necessary to specify the same disk descriptor
parameters for the new disk as the old disk, it is suggested you do so.
If the new disk is equivalent in size as the old disk, and if the
\fIDiskUsage\fR and \fIFailureGroup\fR parameters are the same, the data
and metadata can be completely migrated from the old disk to the new
disk. A disk replacement in this manner allows the file system to
maintain its current data and metadata balance.
.PP
If the new disk has a different size, \fIDiskUsage\fR parameter, or
\fIFailureGroup\fR parameter, the operation may leave the file system
unbalanced and require a restripe. Additionally, a change in size or
the \fIDiskUsage\fR parameter may cause the operation to fail since other
disks in the file system may not have sufficient space to absorb more data or
metadata. In this case you must first use the \fBmmadddisk\fR command to add the new disk, the \fBmmdeldisk\fR command to delete the old disk, and
finally the \fBmmrestripefs\fR command to rebalance
the file system.
.RE
.RE
.PP
.RS +3
\fB-N {\fB\fIall\fR\fR | \fBmount\fR |
\fINodeName\fR[,\fINodeName\fR...]}
\fR
.RE
.RS +9
Specify the nodes that participate in the migration of data from the old
to the new disk. Valid values are: 
.PP
.RS +3
\fBall
\fR
.RE
.RS +9
Indicates that all nodes in the GPFS cluster, whether they have the
file system mounted or not, participate in the migration. This is the
default when the \fB-N\fR option has not been specified.
.RE
.PP
.RS +3
\fBmount
\fR
.RE
.RS +9
Indicates that only the nodes that have the file system mounted
participate in the migration.
.RE
.PP
.RS +3
\fB\fINodeName\fR[,\fINodeName\fR ... ]
\fR
.RE
.RS +9
A comma-separated list of nodes that participate in the migration.
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.RE
.SH "Options"
.PP
.RS +3
\fB-v {\fB\fIyes\fR\fR | no}
\fR
.RE
.RS +9
Verify that specified disks do not belong to an existing file
system. The default is \fB-v yes\fR. Specify \fB-v no\fR
only when you want to reuse disks that are no longer needed for an existing
file system. If the command is interrupted for any reason, you must use
the \fB-v no\fR option on the next invocation of the command.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmrpldisk\fR command.
.PP
You may issue the \fBmmrpldisk\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To replace disk \fBgpfs10nsd\fR in \fBfs3\fR with a new disk,
\fBgpfs12nsd\fR allowing the disk usage and failure group parameters to
default to the corresponding values of \fBgpfs10nsd\fR, and have only nodes
\fBk145n01, k145n03, and k145n05\fR participate in the migration of the
data, enter:
.sp
.nf
mmrpldisk fs3 gpfs10nsd gpfs12nsd -N k145n01,k145n03,k145n05
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
Replacing  ...
GPFS: 6027-531 The following disks of fs3 will be formatted on\ 
node k145n03  gpfs12nsd: size 4390912 KB
Extending Allocation Map
GPFS: 6027-1503 Completed adding disks to file system fs3.
GPFS: 6027-589 Scanning file system metadata, phase 1 ...\ 
  77 % complete on Wed Jul 12 17:33:58 2000
 100 % complete on Wed Jul 12 17:33:59 2000
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-589 Scanning file system metadata, phase 2 ...\ 
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-589 Scanning file system metadata, phase 3 ...\ 
GPFS: 6027-552 Scan completed successfully.
GPFS: 6027-565 Scanning user file metadata ...
   1 % complete on Wed Jul 12 17:34:12 2000
 100 % complete on Wed Jul 12 17:34:15 2000
GPFS: 6027-552 Scan completed successfully.
Done
mmrpldisk: 6027-1371 Propagating the changes to all affected nodes.
This is an asynchronous process.
.fi
.sp
.SH "See also"
.PP
mmadddisk Command
.PP
mmchdisk Command
.PP
mmcrnsd Command
.PP
mmlsdisk Command
.PP
mmlsnsd Command
.PP
mmrestripefs Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
 kV J4@$          AAA                   ./usr/share/man/man8/mmsanrepairfs.8 8 n         $          $          .TH mmsanrepairfs 11/01/04
mmsanrepairfs Command
.SH "Name"
.PP
\fBmmsanrepairfs\fR - Repair a file system under the control of
SANergy.
.SH "Synopsis"
.PP
\fBmmsanrepairfs\fR \fIDevice\fR [\fB-n\fR |
\fB-f\fR]
.SH "Description"
.PP
Use the \fBmmsanrepairfs\fR command to remove leftover hyper-allocated
blocks caused by SANergy client failure or SANergy protocol application
failure. This command can be run on a mounted or unmounted file
system.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The device name of the file system. File system names need not be
fully-qualified. \fBfs0\fR is as acceptable as
\fB/dev/fs0\fR.
.RE
.SH "Options"
.PP
.RS +3
\fB\fB-f\fR
\fR
.RE
.RS +9
Specifies to force unlock SANergy locked files after an existing SANergy
lease has expired for those files.
.RE
.PP
.RS +3
\fB\fB-n\fR
\fR
.RE
.RS +9
Specifies to query the current SANergy hyper-allocated blocks state on the
file system. Does not perform the actual removal of the data
blocks.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmsanrepairfs\fR
command.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on the \fBmmcrcluster\fR or the
\fBmmchcluster\fR command, you must ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.RS +3
.HP 3
1. Issue this command to remove the hyper-allocated blocks from the file
system named \fBgpfsfc\fR:
.sp
.nf
mmsanrepairfs gpfsfc\ 
.fi
.sp
Output is similar to:
.sp
.nf
"gpfsfc" number of disks: 6, number of inodes: 25920,
      blockSize 16384
Warning: "gpfsfc" is mounted on 1 node(s) and in use on 2 nodes
Scanning user file metadata...
  Repairing hyper allocated inode 21888...repaired
  Repairing hyper allocated inode 21891...repaired
  Repairing hyper allocated inode 21893...repaired
  Repairing hyper allocated inode 21897...repaired
  Repairing hyper allocated inode 21900...repaired
  Total number of sanergy hyper allocated files 35,
     inconsistent 0, repaired 35
.fi
.sp
.HP 3
2. Issue this command to unlock SANergy-locked files in the file system named
\fBgpfsfc\fR:
.sp
.nf
mmsanrepairfs gpfsfc -f
.fi
.sp
Output is similar to:
.sp
.nf
"gpfsfc" number of disks: 6, number of inodes: 25920,
         blockSize 16384
Warning: "gpfsfc" is mounted on 1 node(s) and in use on 2 nodes
Scanning user file metadata...
  18432: sanergy unlocked
    71 % complete on Thu Jul 10 16:52:01 2003
Total number of sanergy hyper allocated files 0, inconsistent 0,\ 
           repaired 0
.fi
.sp
.RE
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
ODEk8H J4@$        d  AAA             d      ./usr/share/man/man8/mmshutdown.8 .8 8 n         $          $          .TH mmshutdown 11/01/04
mmshutdown Command
.SH "Name"
.PP
\fBmmshutdown\fR - Unmounts all GPFS file systems and stops GPFS
on one or more nodes.
.SH "Synopsis"
.PP
\fBmmshutdown\fR [\fB-t\fR \fIunmountTimeout\fR ] [
\fB-a\fR | \fB-W\fR \fINodeFilename\fR | [ \fB-w\fR
\fINodeName\fR[,\fINodeName\fR...]]
[\fB-n\fR
\fINodeNumber\fR[,\fINodeNumber\fR...]]]
.SH "Description"
.PP
Use the \fBmmshutdown\fR command to stop the GPFS daemons on one or more
nodes. If no operand is specified, GPFS is stopped only on the node
from which the command was issued.
.PP
The \fBmmshutdown\fR command first attempts to unmount all GPFS file
systems. If the unmount does not complete within the specified
\fItimeout\fR period, the GPFS daemons shut down anyway.
.PP
\fBResults\fR
.PP
Upon successful completion of the \fBmmshutdown\fR command, these tasks
are completed: 
.RS +3
.HP 3
\(bu GPFS file systems are unmounted.
.HP 3
\(bu GPFS daemons are stopped.
.RE
.SH "Parameters"
.PP
.RS +3
\fB\fB-a\fR
\fR
.RE
.RS +9
Stop GPFS on all nodes in a GPFS cluster.
.RE
.PP
.RS +3
\fB-n \fINodeNumber\fR
\fR
.RE
.RS +9
A comma-separated list of node numbers on which to stop GPFS. This
list is combined with the nodes specified on the \fB-w\fR option.
.RE
.PP
.RS +3
\fB\fB-W\fR \fINodeFilename\fR
\fR
.RE
.RS +9
Stop GPFS on all nodes whose hostnames are listed in the file. The
hostnames must be listed one per line.
.RE
.PP
.RS +3
\fB-w \fINodeName\fR[,\fINodeName\fR...]
\fR
.RE
.RS +9
A comma-separated list of hostnames on which to stop GPFS. This
list is combined with the nodes specified on the \fB-n\fR option.
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.SH "Options"
.PP
.RS +3
\fB-t \fIunmountTimeout\fR
\fR
.RE
.RS +9
The maximum amount of time, in seconds, that the unmount command is given
to complete. The default timeout period is equal to: 
.PP
\fB60 + 3 * \fInumber of nodes\fR\fR
.PP
If the unmount does not complete within the specified amount of time, the
command times out and the GPFS daemons shut down.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmshutdown\fR command.
.PP
You may issue the \fBmmshutdown\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.RS +3
.HP 3
1. To \fBstop\fR GPFS on all nodes in the GPFS cluster, enter: 
.sp
.nf
mmshutdown -a
.fi
.sp
The system displays information similar to:
.sp
.nf
Thu Aug 12 13:10:40 EDT 2004: 6027-1341 mmshutdown: Starting
                              force unmount of GPFS file systems.
k164n05.kgn.com:  forced unmount of /fs1
k164n04.kgn.ibm.com:  forced unmount of /fs1
k164n06.kgn.ibm.com:  forced unmount of /fs1
Thu Aug 12 13:10:45 EDT 2004: 6027-1344 mmshutdown: Shutting
                              down GPFS daemons
k164n04.kgn.ibm.com:  Shutting down!
k164n06.kgn.ibm.com:  Shutting down!
k164n05.kgn.ibm.com:  Shutting down!
k164n04.kgn.ibm.com:  'shutdown' command about to kill process
                      49682
k164n05.kgn.ibm.com:  'shutdown' command about to kill process
                      28194
k164n06.kgn.ibm.com:  'shutdown' command about to kill process
\ 
Thu Aug 12 13:10:54 EDT 2004: 6027-1345 mmshutdown: Finished
.fi
.sp
.HP 3
2. To stop GPFS on only node \fBk164n04\fR enter:
.sp
.nf
mmshutdown -w k164n04
.fi
.sp
The system displays information similar to:
.sp
.nf
mmshutdown -w k164n04
Thu Aug 12 13:12:06 EDT 2004: 6027-1341 mmshutdown: Starting
                              force unmount of GPFS file systems
k164n04:  forced unmount of /fs1
Thu Aug 12 13:12:11 EDT 2004: 6027-1344 mmshutdown: Shutting
                              down GPFS daemons
k164n04:  Shutting down!
k164n04:  'shutdown' command about to kill process 65036
Thu Aug 12 13:12:20 EDT 2004: 6027-1345 mmshutdown: Finished
.fi
.sp
.HP 3
3. To stop GPFS on all nodes in the GPFS cluster, allowing not more than
three minutes for unmounting of the file systems, enter:
.sp
.nf
mmshutdown  -a -t 180
.fi
.sp
The system displays information similar to:
.sp
.nf
Thu Aug 12 15:33:00 EDT 2004: 6027-1341 mmshutdown: Starting
                      force unmount of GPFS file systems
k164n05.kgn.ibm.com:  forced unmount of /fs1
k164n04.kgn.ibm.com:  forced unmount of /fs1
k164n06.kgn.ibm.com:  forced unmount of /fs1
Thu Aug 12 15:33:05 EDT 2004: 6027-1344 mmshutdown: Shutting
                    down GPFS daemons
k164n06.kgn.ibm.com:  Shutting down!
k164n05.kgn.ibm.com:  Shutting down!
k164n04.kgn.ibm.com:  Shutting down!
k164n06.kgn.ibm.com:  'shutdown' command about to kill process
                      17364
k164n05.kgn.ibm.com:  'shutdown'command about to kill process
                      29824
k164n04.kgn.ibm.com:  'shutdown' command about to kill process
                      62802
Thu Aug 12 15:33:13 EDT 2004: 6027-1345 mmshutdown: Finished
.fi
.sp
.RE
.SH "See also"
.PP
mmlscluster Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
.PP
formkT "J4@$          6xHB6xHB6xHB                   ./usr/share/man/man8/mmsnapdir.8  .8 8 n         $          $          .TH mmsnapdir 02/01/05
mmsnapdir Command
.SH "Name"
.PP
\fBmmsnapdir\fR - Creates and deletes symbolic links to snapshots
of a GPFS file system, and changes the name of the snapshots
subdirectory.
.SH "Synopsis"
.PP
\fBmmsnapdir\fR \fIDevice\fR {[\fB-r\fR | \fB-a\fR]
[\fB-s\fR \fISnapDirName\fR]}
.PP
Or,
.PP
\fBmmsnapdir\fR \fIDevice\fR [\fB-q\fR]
.SH "Description"
.PP
Use the \fBmmsnapdir\fR command to create or delete symbolic
links for the snapshots of a GPFS file system, or to change the name of the
snapshots subdirectory.
.PP
Snapshots appear in a hidden subdirectory at the root. The
default name is \fB.snapshots\fR, and is located under the root
directory of the file system. If you prefer to link directly to the
snapshot from each subdirectory rather than traversing through the root
directory, you may create a symbolic link by issuing the \fBmmsnapdir\fR
command with the \fB-a\fR flag (see Example 1). The \fB-a\fR flag of the \fBmmsnapdir\fR
command creates a directory which contains a link into the directory of each
snapshot that exists for the file system. Because GPFS snapshots are
read-only and may be deleted by the \fBmmdelsnapshot\fR command, a standard POSIX compliant
\fBlink\fR does not work.
.PP
If the \fBmmsnapdir\fR command is issued while another snapshot command
is running, the \fBmmsnapdir\fR command waits for that command to
complete.
.PP
Valid links are created only for file systems. Links created for a
snapshot produce an empty directory.
.SH "Parameters"
.PP
.RS +3
\fB\fIDevice\fR
\fR
.RE
.RS +9
The fully-qualified name of the link directory to be created. 
.PP
The path name must be qualified by the parent directory whose snapshots are
being linked to. The name of the link directory must be unique within
that parent directory.
.PP
This must be the first parameter.
.RE
.SH "Options"
.PP
.PP
.RS +3
\fB-a 
\fR
.RE
.RS +9
Adds a snapshots subdirectory to all subdirectories in the file
system.
.RE
.PP
.RS +3
\fB-q
\fR
.RE
.RS +9
Displays current settings, if issued without any other flags.
.RE
.PP
.RS +3
\fB-r
\fR
.RE
.RS +9
Changes back to the default snapshot behavior, in which the
snapshots directory is located under the root directory, not under every
subdirectory of the file system.
.RE
.PP
.RS +3
\fB-s
\fR
.RE
.RS +9
Changes the name of the snapshots subdirectory.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmsnapdir\fR command.
.PP
You may issue the \fBmmsnapdir\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.RS +3
.HP 3
1. To create a link directory called \fB.link\fR
for the file system \fBfs1\fR, enter: 
.sp
.nf
mmsnapdir  fs1 -s .link
.fi
.sp
.sp
After the command has been issued, the directory structure would appear
similar to:
.sp
.nf
/fs1/file1
/fs1/userA/file2
/fs1/userA/file3
/fs1/.link/snap1/file1
/fs1/.link/snap1/userA/file2
/fs1/.link/snap1/userA/file3
.fi
.sp
.HP 3
2. Issuing:
.sp
.nf
mmsnapdir fs1 -a
.fi
.sp
After the command has been issued, the directory structure would appear
similar to:
.sp
.nf
/fs1/file1
/fs1/userA/file2
/fs1/userA/file3
/fs1/userA/.link/snap1/file2                 -> hidden directory
/fs1/userA/.link/snap1/file3                 -> hidden directory
/fs1/.link/snap1/file1
/fs1/.link/snap1/userA/file2
/fs1/.link/snap1/userA/file3
.fi
.sp
.HP 3
3. Issuing:
.sp
.nf
mmsnapdir fs1 -r
.fi
.sp
After the command has been issued, the directory structure would appear
similar to:
.sp
.nf
/fs1/file1
/fs1/userA/file2
/fs1/userA/file3
/fs1/.link/snap1/file1
/fs1/.link/snap1/userA/file2
/fs1/.link/snap1/userA/file3
.fi
.sp
.HP 3
4. Issuing:
.sp
.nf
mmsnapdir fs1 -q
.fi
.sp
The output is similar to this:
.sp
.nf
Snapshot directory for "fs1" is ".link" (root directory only)
.fi
.sp
.RE
.SH "See also"
.PP
mmcrsnapshot Command
.PP
mmdelsnapshot Command
.PP
mmlssnapshot Command
.PP
mmrestorefs Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR
me ofk
L J4@$          %A%A%A                   ./usr/share/man/man8/mmstartup.8  .8 8 n         $          $          .TH mmstartup 11/01/04
mmstartup Command
.SH "Name"
.PP
\fBmmstartup\fR - Starts the GPFS subsystem on one or more
nodes.
.SH "Synopsis"
.PP
\fBmmstartup\fR [\fB-a\fR | \fB-W\fR \fINodeFilename\fR |
[ \fB-w\fR
\fINodeName\fR[,\fINodeName\fR...]]
[\fB-n\fR
\fINodeNumber\fR[,\fINodeNumber\fR...]]]
[\fB-E\fR \fIenvVarString\fR]
.SH "Description"
.PP
Use the \fBmmstartup\fR command to start the GPFS daemons on one or more
nodes. If no operand is specified, GPFS is started only on the node
from which the command was issued.
.PP
\fBResults\fR
.PP
Upon successful completion of the \fBmmstartup\fR command, the GPFS
subsystem is started on the specified nodes. 
.RS +3
\fBNote:\fR
.RE
.RS +9
The actual start of the GPFS daemons on the nodes also depends on:
.RS +3
.HP 3
\(bu The availability of all required software for your environment
.HP 3
\(bu The availability of required hardware for your environment
.HP 3
\(bu Quorum requirements
.RE
.PP
For the actual requirements for your operating environment, see the \fIGeneral Parallel File System: Concepts, Planning, and
Installation Guide\fR and search for \fIinstalling GPFS\fR.
.RE
.SH "Parameters"
.PP
.RS +3
\fB-a
\fR
.RE
.RS +9
Start GPFS on all nodes in a GPFS cluster.
.RE
.PP
.RS +3
\fB-n \fINodeNumber\fR
\fR
.RE
.RS +9
A comma-separated list of node numbers on which to start GPFS. This
list is combined with the nodes specified on the \fB-w\fR option.
.RE
.PP
.RS +3
\fB-w \fINodeName\fR[,\fINodeName\fR...]
\fR
.RE
.RS +9
A comma-separated list of hostnames on which to start GPFS. This
list is combined with the nodes specified on the \fB-n\fR option.
.PP
The hostname or IP address must refer to the communications adapter
over which the GPFS daemons communicate. Alias interfaces are not
allowed. Use the original address or a name that is resolved by the
\fBhost\fR command to that original address. You may specify a node
using any of these forms:
.br
.sp
.RS +0.1i
.nf
.TS
tab(~);
 l l.
Format~Example
\fBShort hostname\fR~k145n01
\fBLong hostname\fR~k145n01.kgn.ibm.com
\fBIP address\fR~9.119.19.102
.TE
.sp
.fi
.RE
.RE
.PP
.RS +3
\fB-W \fINodeFilename\fR
\fR
.RE
.RS +9
Start GPFS on all nodes whose hostnames are listed in the file. The
hostnames must be listed one per line.
.RE
.SH "Options"
.PP
.RS +3
\fB\fB-E\fR \fIenvVarString\fR
\fR
.RE
.RS +9
Blank-separated string that specifies the name and value (name=value) for
one or more environment variables to be passed to the GPFS daemon.
Enclose this string in quotes.
.RE
.SH "Exit status"
.PP
.PP
.RS +3
\fB0
\fR
.RE
.RS +9
Successful completion.
.RE
.PP
.RS +3
\fB1
\fR
.RE
.RS +9
A failure has occurred.
.RE
.SH "Security"
.PP
You must have root authority to run the \fBmmstartup\fR command.
.PP
You may issue the \fBmmstartup\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To start GPFS on all nodes in the GPFS cluster, enter: 
.sp
.nf
mmstartup -a
.fi
.sp
.PP
The system displays information similar to:
.sp
.nf
Thu Aug 12 13:22:40 EDT 2004: 6027-1642 mmstartup: Starting GPFS ...
.fi
.sp
.SH "See also"
.PP
mmchfs Command
.PP
mmcrfs Command
.PP
mmlscluster Command
.SH "Location"
.PP
\fB/usr/lpp/mmfs/bin\fR

kB,ndfR command.
.PP
You may issue the \fBmmstartup\fR command from any node in the GPFS
cluster.
.PP
When using the \fBrcp\fR and \fBrsh\fR commands for remote
communication, a properly configured \fB.rhosts\fR file must exist
in the root user's home directory on each node in the GPFS
cluster. If you have designated the use of a different remote
communication program on either the \fBmmcrcluster\fR
or the \fBmmchcluster\fR command, you must
ensure:
.RS +3
.HP 3
1. Proper authorization is granted to all nodes in the GPFS cluster.
.HP 3
2. The nodes in the GPFS cluster can communicate without the use of a
password, and without any extraneous messages.
.RE
.SH "Examples"
.PP
To start GPFS on all nodes in the GPFS cluster, enter: 
.sp
.nf
mmstartup -a
.fi
.sp
.PP
The system