1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_mount.h"
27#include "xfs_inode.h"
28#include "xfs_ialloc.h"
29#include "xfs_alloc.h"
30#include "xfs_error.h"
31#include "xfs_trace.h"
32#include "xfs_cksum.h"
33#include "xfs_trans.h"
34#include "xfs_buf_item.h"
35#include "xfs_bmap_btree.h"
36#include "xfs_alloc_btree.h"
37#include "xfs_ialloc_btree.h"
38
39/*
40 * Physical superblock buffer manipulations. Shared with libxfs in userspace.
41 */
42
43/*
44 * Reference counting access wrappers to the perag structures.
45 * Because we never free per-ag structures, the only thing we
46 * have to protect against changes is the tree structure itself.
47 */
48struct xfs_perag *
49xfs_perag_get(
50	struct xfs_mount	*mp,
51	xfs_agnumber_t		agno)
52{
53	struct xfs_perag	*pag;
54	int			ref = 0;
55
56	rcu_read_lock();
57	pag = radix_tree_lookup(&mp->m_perag_tree, agno);
58	if (pag) {
59		ASSERT(atomic_read(&pag->pag_ref) >= 0);
60		ref = atomic_inc_return(&pag->pag_ref);
61	}
62	rcu_read_unlock();
63	trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
64	return pag;
65}
66
67/*
68 * search from @first to find the next perag with the given tag set.
69 */
70struct xfs_perag *
71xfs_perag_get_tag(
72	struct xfs_mount	*mp,
73	xfs_agnumber_t		first,
74	int			tag)
75{
76	struct xfs_perag	*pag;
77	int			found;
78	int			ref;
79
80	rcu_read_lock();
81	found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
82					(void **)&pag, first, 1, tag);
83	if (found <= 0) {
84		rcu_read_unlock();
85		return NULL;
86	}
87	ref = atomic_inc_return(&pag->pag_ref);
88	rcu_read_unlock();
89	trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
90	return pag;
91}
92
93void
94xfs_perag_put(
95	struct xfs_perag	*pag)
96{
97	int	ref;
98
99	ASSERT(atomic_read(&pag->pag_ref) > 0);
100	ref = atomic_dec_return(&pag->pag_ref);
101	trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
102}
103
104/*
105 * Check the validity of the SB found.
106 */
107STATIC int
108xfs_mount_validate_sb(
109	xfs_mount_t	*mp,
110	xfs_sb_t	*sbp,
111	bool		check_inprogress,
112	bool		check_version)
113{
114	if (sbp->sb_magicnum != XFS_SB_MAGIC) {
115		xfs_warn(mp, "bad magic number");
116		return -EWRONGFS;
117	}
118
119
120	if (!xfs_sb_good_version(sbp)) {
121		xfs_warn(mp, "bad version");
122		return -EWRONGFS;
123	}
124
125	/*
126	 * Version 5 superblock feature mask validation. Reject combinations the
127	 * kernel cannot support up front before checking anything else. For
128	 * write validation, we don't need to check feature masks.
129	 */
130	if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
131		if (xfs_sb_has_compat_feature(sbp,
132					XFS_SB_FEAT_COMPAT_UNKNOWN)) {
133			xfs_warn(mp,
134"Superblock has unknown compatible features (0x%x) enabled.\n"
135"Using a more recent kernel is recommended.",
136				(sbp->sb_features_compat &
137						XFS_SB_FEAT_COMPAT_UNKNOWN));
138		}
139
140		if (xfs_sb_has_ro_compat_feature(sbp,
141					XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
142			xfs_alert(mp,
143"Superblock has unknown read-only compatible features (0x%x) enabled.",
144				(sbp->sb_features_ro_compat &
145						XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
146			if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
147				xfs_warn(mp,
148"Attempted to mount read-only compatible filesystem read-write.\n"
149"Filesystem can only be safely mounted read only.");
150				return -EINVAL;
151			}
152		}
153		if (xfs_sb_has_incompat_feature(sbp,
154					XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
155			xfs_warn(mp,
156"Superblock has unknown incompatible features (0x%x) enabled.\n"
157"Filesystem can not be safely mounted by this kernel.",
158				(sbp->sb_features_incompat &
159						XFS_SB_FEAT_INCOMPAT_UNKNOWN));
160			return -EINVAL;
161		}
162	}
163
164	if (xfs_sb_version_has_pquotino(sbp)) {
165		if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) {
166			xfs_notice(mp,
167			   "Version 5 of Super block has XFS_OQUOTA bits.");
168			return -EFSCORRUPTED;
169		}
170	} else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD |
171				XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) {
172			xfs_notice(mp,
173"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.");
174			return -EFSCORRUPTED;
175	}
176
177	if (unlikely(
178	    sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
179		xfs_warn(mp,
180		"filesystem is marked as having an external log; "
181		"specify logdev on the mount command line.");
182		return -EINVAL;
183	}
184
185	if (unlikely(
186	    sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
187		xfs_warn(mp,
188		"filesystem is marked as having an internal log; "
189		"do not specify logdev on the mount command line.");
190		return -EINVAL;
191	}
192
193	/*
194	 * More sanity checking.  Most of these were stolen directly from
195	 * xfs_repair.
196	 */
197	if (unlikely(
198	    sbp->sb_agcount <= 0					||
199	    sbp->sb_sectsize < XFS_MIN_SECTORSIZE			||
200	    sbp->sb_sectsize > XFS_MAX_SECTORSIZE			||
201	    sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG			||
202	    sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG			||
203	    sbp->sb_sectsize != (1 << sbp->sb_sectlog)			||
204	    sbp->sb_blocksize < XFS_MIN_BLOCKSIZE			||
205	    sbp->sb_blocksize > XFS_MAX_BLOCKSIZE			||
206	    sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG			||
207	    sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG			||
208	    sbp->sb_blocksize != (1 << sbp->sb_blocklog)		||
209	    sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG			||
210	    sbp->sb_inodesize < XFS_DINODE_MIN_SIZE			||
211	    sbp->sb_inodesize > XFS_DINODE_MAX_SIZE			||
212	    sbp->sb_inodelog < XFS_DINODE_MIN_LOG			||
213	    sbp->sb_inodelog > XFS_DINODE_MAX_LOG			||
214	    sbp->sb_inodesize != (1 << sbp->sb_inodelog)		||
215	    sbp->sb_logsunit > XLOG_MAX_RECORD_BSIZE			||
216	    sbp->sb_inopblock != howmany(sbp->sb_blocksize,sbp->sb_inodesize) ||
217	    (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog)	||
218	    (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE)	||
219	    (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE)	||
220	    (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */)	||
221	    sbp->sb_dblocks == 0					||
222	    sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp)			||
223	    sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp)			||
224	    sbp->sb_shared_vn != 0)) {
225		xfs_notice(mp, "SB sanity check failed");
226		return -EFSCORRUPTED;
227	}
228
229	/*
230	 * Until this is fixed only page-sized or smaller data blocks work.
231	 */
232	if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
233		xfs_warn(mp,
234		"File system with blocksize %d bytes. "
235		"Only pagesize (%ld) or less will currently work.",
236				sbp->sb_blocksize, PAGE_SIZE);
237		return -ENOSYS;
238	}
239
240	/*
241	 * Currently only very few inode sizes are supported.
242	 */
243	switch (sbp->sb_inodesize) {
244	case 256:
245	case 512:
246	case 1024:
247	case 2048:
248		break;
249	default:
250		xfs_warn(mp, "inode size of %d bytes not supported",
251				sbp->sb_inodesize);
252		return -ENOSYS;
253	}
254
255	if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
256	    xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
257		xfs_warn(mp,
258		"file system too large to be mounted on this system.");
259		return -EFBIG;
260	}
261
262	if (check_inprogress && sbp->sb_inprogress) {
263		xfs_warn(mp, "Offline file system operation in progress!");
264		return -EFSCORRUPTED;
265	}
266	return 0;
267}
268
269void
270xfs_sb_quota_from_disk(struct xfs_sb *sbp)
271{
272	/*
273	 * older mkfs doesn't initialize quota inodes to NULLFSINO. This
274	 * leads to in-core values having two different values for a quota
275	 * inode to be invalid: 0 and NULLFSINO. Change it to a single value
276	 * NULLFSINO.
277	 *
278	 * Note that this change affect only the in-core values. These
279	 * values are not written back to disk unless any quota information
280	 * is written to the disk. Even in that case, sb_pquotino field is
281	 * not written to disk unless the superblock supports pquotino.
282	 */
283	if (sbp->sb_uquotino == 0)
284		sbp->sb_uquotino = NULLFSINO;
285	if (sbp->sb_gquotino == 0)
286		sbp->sb_gquotino = NULLFSINO;
287	if (sbp->sb_pquotino == 0)
288		sbp->sb_pquotino = NULLFSINO;
289
290	/*
291	 * We need to do these manipilations only if we are working
292	 * with an older version of on-disk superblock.
293	 */
294	if (xfs_sb_version_has_pquotino(sbp))
295		return;
296
297	if (sbp->sb_qflags & XFS_OQUOTA_ENFD)
298		sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ?
299					XFS_PQUOTA_ENFD : XFS_GQUOTA_ENFD;
300	if (sbp->sb_qflags & XFS_OQUOTA_CHKD)
301		sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ?
302					XFS_PQUOTA_CHKD : XFS_GQUOTA_CHKD;
303	sbp->sb_qflags &= ~(XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD);
304
305	if (sbp->sb_qflags & XFS_PQUOTA_ACCT)  {
306		/*
307		 * In older version of superblock, on-disk superblock only
308		 * has sb_gquotino, and in-core superblock has both sb_gquotino
309		 * and sb_pquotino. But, only one of them is supported at any
310		 * point of time. So, if PQUOTA is set in disk superblock,
311		 * copy over sb_gquotino to sb_pquotino.
312		 */
313		sbp->sb_pquotino = sbp->sb_gquotino;
314		sbp->sb_gquotino = NULLFSINO;
315	}
316}
317
318static void
319__xfs_sb_from_disk(
320	struct xfs_sb	*to,
321	xfs_dsb_t	*from,
322	bool		convert_xquota)
323{
324	to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
325	to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
326	to->sb_dblocks = be64_to_cpu(from->sb_dblocks);
327	to->sb_rblocks = be64_to_cpu(from->sb_rblocks);
328	to->sb_rextents = be64_to_cpu(from->sb_rextents);
329	memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
330	to->sb_logstart = be64_to_cpu(from->sb_logstart);
331	to->sb_rootino = be64_to_cpu(from->sb_rootino);
332	to->sb_rbmino = be64_to_cpu(from->sb_rbmino);
333	to->sb_rsumino = be64_to_cpu(from->sb_rsumino);
334	to->sb_rextsize = be32_to_cpu(from->sb_rextsize);
335	to->sb_agblocks = be32_to_cpu(from->sb_agblocks);
336	to->sb_agcount = be32_to_cpu(from->sb_agcount);
337	to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks);
338	to->sb_logblocks = be32_to_cpu(from->sb_logblocks);
339	to->sb_versionnum = be16_to_cpu(from->sb_versionnum);
340	to->sb_sectsize = be16_to_cpu(from->sb_sectsize);
341	to->sb_inodesize = be16_to_cpu(from->sb_inodesize);
342	to->sb_inopblock = be16_to_cpu(from->sb_inopblock);
343	memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
344	to->sb_blocklog = from->sb_blocklog;
345	to->sb_sectlog = from->sb_sectlog;
346	to->sb_inodelog = from->sb_inodelog;
347	to->sb_inopblog = from->sb_inopblog;
348	to->sb_agblklog = from->sb_agblklog;
349	to->sb_rextslog = from->sb_rextslog;
350	to->sb_inprogress = from->sb_inprogress;
351	to->sb_imax_pct = from->sb_imax_pct;
352	to->sb_icount = be64_to_cpu(from->sb_icount);
353	to->sb_ifree = be64_to_cpu(from->sb_ifree);
354	to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks);
355	to->sb_frextents = be64_to_cpu(from->sb_frextents);
356	to->sb_uquotino = be64_to_cpu(from->sb_uquotino);
357	to->sb_gquotino = be64_to_cpu(from->sb_gquotino);
358	to->sb_qflags = be16_to_cpu(from->sb_qflags);
359	to->sb_flags = from->sb_flags;
360	to->sb_shared_vn = from->sb_shared_vn;
361	to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt);
362	to->sb_unit = be32_to_cpu(from->sb_unit);
363	to->sb_width = be32_to_cpu(from->sb_width);
364	to->sb_dirblklog = from->sb_dirblklog;
365	to->sb_logsectlog = from->sb_logsectlog;
366	to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize);
367	to->sb_logsunit = be32_to_cpu(from->sb_logsunit);
368	to->sb_features2 = be32_to_cpu(from->sb_features2);
369	to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2);
370	to->sb_features_compat = be32_to_cpu(from->sb_features_compat);
371	to->sb_features_ro_compat = be32_to_cpu(from->sb_features_ro_compat);
372	to->sb_features_incompat = be32_to_cpu(from->sb_features_incompat);
373	to->sb_features_log_incompat =
374				be32_to_cpu(from->sb_features_log_incompat);
375	/* crc is only used on disk, not in memory; just init to 0 here. */
376	to->sb_crc = 0;
377	to->sb_pad = 0;
378	to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
379	to->sb_lsn = be64_to_cpu(from->sb_lsn);
380	/* Convert on-disk flags to in-memory flags? */
381	if (convert_xquota)
382		xfs_sb_quota_from_disk(to);
383}
384
385void
386xfs_sb_from_disk(
387	struct xfs_sb	*to,
388	xfs_dsb_t	*from)
389{
390	__xfs_sb_from_disk(to, from, true);
391}
392
393static void
394xfs_sb_quota_to_disk(
395	struct xfs_dsb	*to,
396	struct xfs_sb	*from)
397{
398	__uint16_t	qflags = from->sb_qflags;
399
400	to->sb_uquotino = cpu_to_be64(from->sb_uquotino);
401	if (xfs_sb_version_has_pquotino(from)) {
402		to->sb_qflags = cpu_to_be16(from->sb_qflags);
403		to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
404		to->sb_pquotino = cpu_to_be64(from->sb_pquotino);
405		return;
406	}
407
408	/*
409	 * The in-core version of sb_qflags do not have XFS_OQUOTA_*
410	 * flags, whereas the on-disk version does.  So, convert incore
411	 * XFS_{PG}QUOTA_* flags to on-disk XFS_OQUOTA_* flags.
412	 */
413	qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
414			XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
415
416	if (from->sb_qflags &
417			(XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
418		qflags |= XFS_OQUOTA_ENFD;
419	if (from->sb_qflags &
420			(XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
421		qflags |= XFS_OQUOTA_CHKD;
422	to->sb_qflags = cpu_to_be16(qflags);
423
424	/*
425	 * GQUOTINO and PQUOTINO cannot be used together in versions
426	 * of superblock that do not have pquotino. from->sb_flags
427	 * tells us which quota is active and should be copied to
428	 * disk. If neither are active, we should NULL the inode.
429	 *
430	 * In all cases, the separate pquotino must remain 0 because it
431	 * it beyond the "end" of the valid non-pquotino superblock.
432	 */
433	if (from->sb_qflags & XFS_GQUOTA_ACCT)
434		to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
435	else if (from->sb_qflags & XFS_PQUOTA_ACCT)
436		to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
437	else {
438		/*
439		 * We can't rely on just the fields being logged to tell us
440		 * that it is safe to write NULLFSINO - we should only do that
441		 * if quotas are not actually enabled. Hence only write
442		 * NULLFSINO if both in-core quota inodes are NULL.
443		 */
444		if (from->sb_gquotino == NULLFSINO &&
445		    from->sb_pquotino == NULLFSINO)
446			to->sb_gquotino = cpu_to_be64(NULLFSINO);
447	}
448
449	to->sb_pquotino = 0;
450}
451
452void
453xfs_sb_to_disk(
454	struct xfs_dsb	*to,
455	struct xfs_sb	*from)
456{
457	xfs_sb_quota_to_disk(to, from);
458
459	to->sb_magicnum = cpu_to_be32(from->sb_magicnum);
460	to->sb_blocksize = cpu_to_be32(from->sb_blocksize);
461	to->sb_dblocks = cpu_to_be64(from->sb_dblocks);
462	to->sb_rblocks = cpu_to_be64(from->sb_rblocks);
463	to->sb_rextents = cpu_to_be64(from->sb_rextents);
464	memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
465	to->sb_logstart = cpu_to_be64(from->sb_logstart);
466	to->sb_rootino = cpu_to_be64(from->sb_rootino);
467	to->sb_rbmino = cpu_to_be64(from->sb_rbmino);
468	to->sb_rsumino = cpu_to_be64(from->sb_rsumino);
469	to->sb_rextsize = cpu_to_be32(from->sb_rextsize);
470	to->sb_agblocks = cpu_to_be32(from->sb_agblocks);
471	to->sb_agcount = cpu_to_be32(from->sb_agcount);
472	to->sb_rbmblocks = cpu_to_be32(from->sb_rbmblocks);
473	to->sb_logblocks = cpu_to_be32(from->sb_logblocks);
474	to->sb_versionnum = cpu_to_be16(from->sb_versionnum);
475	to->sb_sectsize = cpu_to_be16(from->sb_sectsize);
476	to->sb_inodesize = cpu_to_be16(from->sb_inodesize);
477	to->sb_inopblock = cpu_to_be16(from->sb_inopblock);
478	memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
479	to->sb_blocklog = from->sb_blocklog;
480	to->sb_sectlog = from->sb_sectlog;
481	to->sb_inodelog = from->sb_inodelog;
482	to->sb_inopblog = from->sb_inopblog;
483	to->sb_agblklog = from->sb_agblklog;
484	to->sb_rextslog = from->sb_rextslog;
485	to->sb_inprogress = from->sb_inprogress;
486	to->sb_imax_pct = from->sb_imax_pct;
487	to->sb_icount = cpu_to_be64(from->sb_icount);
488	to->sb_ifree = cpu_to_be64(from->sb_ifree);
489	to->sb_fdblocks = cpu_to_be64(from->sb_fdblocks);
490	to->sb_frextents = cpu_to_be64(from->sb_frextents);
491
492	to->sb_flags = from->sb_flags;
493	to->sb_shared_vn = from->sb_shared_vn;
494	to->sb_inoalignmt = cpu_to_be32(from->sb_inoalignmt);
495	to->sb_unit = cpu_to_be32(from->sb_unit);
496	to->sb_width = cpu_to_be32(from->sb_width);
497	to->sb_dirblklog = from->sb_dirblklog;
498	to->sb_logsectlog = from->sb_logsectlog;
499	to->sb_logsectsize = cpu_to_be16(from->sb_logsectsize);
500	to->sb_logsunit = cpu_to_be32(from->sb_logsunit);
501
502	/*
503	 * We need to ensure that bad_features2 always matches features2.
504	 * Hence we enforce that here rather than having to remember to do it
505	 * everywhere else that updates features2.
506	 */
507	from->sb_bad_features2 = from->sb_features2;
508	to->sb_features2 = cpu_to_be32(from->sb_features2);
509	to->sb_bad_features2 = cpu_to_be32(from->sb_bad_features2);
510
511	if (xfs_sb_version_hascrc(from)) {
512		to->sb_features_compat = cpu_to_be32(from->sb_features_compat);
513		to->sb_features_ro_compat =
514				cpu_to_be32(from->sb_features_ro_compat);
515		to->sb_features_incompat =
516				cpu_to_be32(from->sb_features_incompat);
517		to->sb_features_log_incompat =
518				cpu_to_be32(from->sb_features_log_incompat);
519		to->sb_pad = 0;
520		to->sb_lsn = cpu_to_be64(from->sb_lsn);
521	}
522}
523
524static int
525xfs_sb_verify(
526	struct xfs_buf	*bp,
527	bool		check_version)
528{
529	struct xfs_mount *mp = bp->b_target->bt_mount;
530	struct xfs_sb	sb;
531
532	/*
533	 * Use call variant which doesn't convert quota flags from disk
534	 * format, because xfs_mount_validate_sb checks the on-disk flags.
535	 */
536	__xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
537
538	/*
539	 * Only check the in progress field for the primary superblock as
540	 * mkfs.xfs doesn't clear it from secondary superblocks.
541	 */
542	return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
543				     check_version);
544}
545
546/*
547 * If the superblock has the CRC feature bit set or the CRC field is non-null,
548 * check that the CRC is valid.  We check the CRC field is non-null because a
549 * single bit error could clear the feature bit and unused parts of the
550 * superblock are supposed to be zero. Hence a non-null crc field indicates that
551 * we've potentially lost a feature bit and we should check it anyway.
552 *
553 * However, past bugs (i.e. in growfs) left non-zeroed regions beyond the
554 * last field in V4 secondary superblocks.  So for secondary superblocks,
555 * we are more forgiving, and ignore CRC failures if the primary doesn't
556 * indicate that the fs version is V5.
557 */
558static void
559xfs_sb_read_verify(
560	struct xfs_buf	*bp)
561{
562	struct xfs_mount *mp = bp->b_target->bt_mount;
563	struct xfs_dsb	*dsb = XFS_BUF_TO_SBP(bp);
564	int		error;
565
566	/*
567	 * open code the version check to avoid needing to convert the entire
568	 * superblock from disk order just to check the version number
569	 */
570	if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC) &&
571	    (((be16_to_cpu(dsb->sb_versionnum) & XFS_SB_VERSION_NUMBITS) ==
572						XFS_SB_VERSION_5) ||
573	     dsb->sb_crc != 0)) {
574
575		if (!xfs_buf_verify_cksum(bp, XFS_SB_CRC_OFF)) {
576			/* Only fail bad secondaries on a known V5 filesystem */
577			if (bp->b_bn == XFS_SB_DADDR ||
578			    xfs_sb_version_hascrc(&mp->m_sb)) {
579				error = -EFSBADCRC;
580				goto out_error;
581			}
582		}
583	}
584	error = xfs_sb_verify(bp, true);
585
586out_error:
587	if (error) {
588		xfs_buf_ioerror(bp, error);
589		if (error == -EFSCORRUPTED || error == -EFSBADCRC)
590			xfs_verifier_error(bp);
591	}
592}
593
594/*
595 * We may be probed for a filesystem match, so we may not want to emit
596 * messages when the superblock buffer is not actually an XFS superblock.
597 * If we find an XFS superblock, then run a normal, noisy mount because we are
598 * really going to mount it and want to know about errors.
599 */
600static void
601xfs_sb_quiet_read_verify(
602	struct xfs_buf	*bp)
603{
604	struct xfs_dsb	*dsb = XFS_BUF_TO_SBP(bp);
605
606	if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) {
607		/* XFS filesystem, verify noisily! */
608		xfs_sb_read_verify(bp);
609		return;
610	}
611	/* quietly fail */
612	xfs_buf_ioerror(bp, -EWRONGFS);
613}
614
615static void
616xfs_sb_write_verify(
617	struct xfs_buf		*bp)
618{
619	struct xfs_mount	*mp = bp->b_target->bt_mount;
620	struct xfs_buf_log_item	*bip = bp->b_fspriv;
621	int			error;
622
623	error = xfs_sb_verify(bp, false);
624	if (error) {
625		xfs_buf_ioerror(bp, error);
626		xfs_verifier_error(bp);
627		return;
628	}
629
630	if (!xfs_sb_version_hascrc(&mp->m_sb))
631		return;
632
633	if (bip)
634		XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
635
636	xfs_buf_update_cksum(bp, XFS_SB_CRC_OFF);
637}
638
639const struct xfs_buf_ops xfs_sb_buf_ops = {
640	.verify_read = xfs_sb_read_verify,
641	.verify_write = xfs_sb_write_verify,
642};
643
644const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
645	.verify_read = xfs_sb_quiet_read_verify,
646	.verify_write = xfs_sb_write_verify,
647};
648
649/*
650 * xfs_mount_common
651 *
652 * Mount initialization code establishing various mount
653 * fields from the superblock associated with the given
654 * mount structure
655 */
656void
657xfs_sb_mount_common(
658	struct xfs_mount *mp,
659	struct xfs_sb	*sbp)
660{
661	mp->m_agfrotor = mp->m_agirotor = 0;
662	spin_lock_init(&mp->m_agirotor_lock);
663	mp->m_maxagi = mp->m_sb.sb_agcount;
664	mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
665	mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
666	mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
667	mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
668	mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
669	mp->m_blockmask = sbp->sb_blocksize - 1;
670	mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
671	mp->m_blockwmask = mp->m_blockwsize - 1;
672
673	mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
674	mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
675	mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
676	mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
677
678	mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
679	mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
680	mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
681	mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
682
683	mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
684	mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
685	mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
686	mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
687
688	mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
689	mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
690					sbp->sb_inopblock);
691	mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
692}
693
694/*
695 * xfs_initialize_perag_data
696 *
697 * Read in each per-ag structure so we can count up the number of
698 * allocated inodes, free inodes and used filesystem blocks as this
699 * information is no longer persistent in the superblock. Once we have
700 * this information, write it into the in-core superblock structure.
701 */
702int
703xfs_initialize_perag_data(
704	struct xfs_mount *mp,
705	xfs_agnumber_t	agcount)
706{
707	xfs_agnumber_t	index;
708	xfs_perag_t	*pag;
709	xfs_sb_t	*sbp = &mp->m_sb;
710	uint64_t	ifree = 0;
711	uint64_t	ialloc = 0;
712	uint64_t	bfree = 0;
713	uint64_t	bfreelst = 0;
714	uint64_t	btree = 0;
715	int		error;
716
717	for (index = 0; index < agcount; index++) {
718		/*
719		 * read the agf, then the agi. This gets us
720		 * all the information we need and populates the
721		 * per-ag structures for us.
722		 */
723		error = xfs_alloc_pagf_init(mp, NULL, index, 0);
724		if (error)
725			return error;
726
727		error = xfs_ialloc_pagi_init(mp, NULL, index);
728		if (error)
729			return error;
730		pag = xfs_perag_get(mp, index);
731		ifree += pag->pagi_freecount;
732		ialloc += pag->pagi_count;
733		bfree += pag->pagf_freeblks;
734		bfreelst += pag->pagf_flcount;
735		btree += pag->pagf_btreeblks;
736		xfs_perag_put(pag);
737	}
738
739	/* Overwrite incore superblock counters with just-read data */
740	spin_lock(&mp->m_sb_lock);
741	sbp->sb_ifree = ifree;
742	sbp->sb_icount = ialloc;
743	sbp->sb_fdblocks = bfree + bfreelst + btree;
744	spin_unlock(&mp->m_sb_lock);
745
746	xfs_reinit_percpu_counters(mp);
747
748	return 0;
749}
750
751/*
752 * xfs_log_sb() can be used to copy arbitrary changes to the in-core superblock
753 * into the superblock buffer to be logged.  It does not provide the higher
754 * level of locking that is needed to protect the in-core superblock from
755 * concurrent access.
756 */
757void
758xfs_log_sb(
759	struct xfs_trans	*tp)
760{
761	struct xfs_mount	*mp = tp->t_mountp;
762	struct xfs_buf		*bp = xfs_trans_getsb(tp, mp, 0);
763
764	mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount);
765	mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree);
766	mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
767
768	xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
769	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
770	xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb));
771}
772
773/*
774 * xfs_sync_sb
775 *
776 * Sync the superblock to disk.
777 *
778 * Note that the caller is responsible for checking the frozen state of the
779 * filesystem. This procedure uses the non-blocking transaction allocator and
780 * thus will allow modifications to a frozen fs. This is required because this
781 * code can be called during the process of freezing where use of the high-level
782 * allocator would deadlock.
783 */
784int
785xfs_sync_sb(
786	struct xfs_mount	*mp,
787	bool			wait)
788{
789	struct xfs_trans	*tp;
790	int			error;
791
792	tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_CHANGE, KM_SLEEP);
793	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
794	if (error) {
795		xfs_trans_cancel(tp, 0);
796		return error;
797	}
798
799	xfs_log_sb(tp);
800	if (wait)
801		xfs_trans_set_sync(tp);
802	return xfs_trans_commit(tp, 0);
803}
804