ntfs: remove (un)?likely() from IS_ERR() conditions

"likely(!IS_ERR(x))" is excessive. IS_ERR() already uses
unlikely() internally.

Link: http://lkml.kernel.org/r/20190829165025.15750-11-efremov@linux.com
Signed-off-by: Denis Efremov <efremov@linux.com>
Cc: Anton Altaparmakov <anton@tuxera.com>
Cc: Joe Perches <joe@perches.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Denis Efremov 2019-09-25 16:49:43 -07:00 committed by Linus Torvalds
parent 7b0b692594
commit cc22c800e1
4 changed files with 9 additions and 9 deletions

View File

@ -71,7 +71,7 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
}
/* Read, map, and pin the page. */
page = ntfs_map_page(mft_vi->i_mapping, index);
if (likely(!IS_ERR(page))) {
if (!IS_ERR(page)) {
/* Catch multi sector transfer fixup errors. */
if (likely(ntfs_is_mft_recordp((le32*)(page_address(page) +
ofs)))) {
@ -154,7 +154,7 @@ MFT_RECORD *map_mft_record(ntfs_inode *ni)
mutex_lock(&ni->mrec_lock);
m = map_mft_record_page(ni);
if (likely(!IS_ERR(m)))
if (!IS_ERR(m))
return m;
mutex_unlock(&ni->mrec_lock);
@ -271,7 +271,7 @@ MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
m = map_mft_record(ni);
/* map_mft_record() has incremented this on success. */
atomic_dec(&ni->count);
if (likely(!IS_ERR(m))) {
if (!IS_ERR(m)) {
/* Verify the sequence number. */
if (likely(le16_to_cpu(m->sequence_number) == seq_no)) {
ntfs_debug("Done 1.");
@ -1303,7 +1303,7 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol)
read_unlock_irqrestore(&mftbmp_ni->size_lock, flags);
rl = ntfs_attr_find_vcn_nolock(mftbmp_ni,
(ll - 1) >> vol->cluster_size_bits, NULL);
if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
up_write(&mftbmp_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to determine last allocated "
"cluster of mft bitmap attribute.");
@ -1734,7 +1734,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
read_unlock_irqrestore(&mft_ni->size_lock, flags);
rl = ntfs_attr_find_vcn_nolock(mft_ni,
(ll - 1) >> vol->cluster_size_bits, NULL);
if (unlikely(IS_ERR(rl) || !rl->length || rl->lcn < 0)) {
if (IS_ERR(rl) || unlikely(!rl->length || rl->lcn < 0)) {
up_write(&mft_ni->runlist.lock);
ntfs_error(vol->sb, "Failed to determine last allocated "
"cluster of mft data attribute.");
@ -1776,7 +1776,7 @@ static int ntfs_mft_data_extend_allocation_nolock(ntfs_volume *vol)
do {
rl2 = ntfs_cluster_alloc(vol, old_last_vcn, nr, lcn, MFT_ZONE,
true);
if (likely(!IS_ERR(rl2)))
if (!IS_ERR(rl2))
break;
if (PTR_ERR(rl2) != -ENOSPC || nr == min_nr) {
ntfs_error(vol->sb, "Failed to allocate the minimal "

View File

@ -115,7 +115,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
dent_ino = MREF(mref);
ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino);
dent_inode = ntfs_iget(vol->sb, dent_ino);
if (likely(!IS_ERR(dent_inode))) {
if (!IS_ERR(dent_inode)) {
/* Consistency check. */
if (is_bad_inode(dent_inode) || MSEQNO(mref) ==
NTFS_I(dent_inode)->seq_no ||

View File

@ -951,7 +951,7 @@ mpa_err:
}
/* Now combine the new and old runlists checking for overlaps. */
old_rl = ntfs_runlists_merge(old_rl, rl);
if (likely(!IS_ERR(old_rl)))
if (!IS_ERR(old_rl))
return old_rl;
ntfs_free(rl);
ntfs_error(vol->sb, "Failed to merge runlists.");

View File

@ -1475,7 +1475,7 @@ not_enabled:
kfree(name);
/* Get the inode. */
tmp_ino = ntfs_iget(vol->sb, MREF(mref));
if (unlikely(IS_ERR(tmp_ino) || is_bad_inode(tmp_ino))) {
if (IS_ERR(tmp_ino) || unlikely(is_bad_inode(tmp_ino))) {
if (!IS_ERR(tmp_ino))
iput(tmp_ino);
ntfs_error(vol->sb, "Failed to load $UsnJrnl.");