[<prev] [next>] [day] [month] [year] [list]
Message-ID: <202108200334.1MORkGXI-lkp@intel.com>
Date: Fri, 20 Aug 2021 03:46:42 +0800
From: kernel test robot <lkp@...el.com>
To: David Howells <dhowells@...hat.com>
Cc: kbuild-all@...ts.01.org, linux-kernel@...r.kernel.org
Subject: [dhowells-fs:netfs-folio-regions 22/28]
fs/netfs/write_back.c:763:17: warning: enumeration value
'NETFS_REGION_IS_PENDING' not handled in switch
tree: https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git netfs-folio-regions
head: 215a4ee495a95cc73256ed76f91cb78bcabd6b8e
commit: 1e4f2a70cbedd75d80440425727faa78e2dccb93 [22/28] netfs: Cut out region to be written from dirty regions
config: alpha-randconfig-r011-20210819 (attached as .config)
compiler: alpha-linux-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git/commit/?id=1e4f2a70cbedd75d80440425727faa78e2dccb93
git remote add dhowells-fs https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git
git fetch --no-tags dhowells-fs netfs-folio-regions
git checkout 1e4f2a70cbedd75d80440425727faa78e2dccb93
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross ARCH=alpha
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@...el.com>
All warnings (new ones prefixed by >>):
fs/netfs/write_back.c: In function 'netfs_flush_dirty':
>> fs/netfs/write_back.c:763:17: warning: enumeration value 'NETFS_REGION_IS_PENDING' not handled in switch [-Wswitch]
763 | switch (READ_ONCE(r->state)) {
| ^~~~~~
>> fs/netfs/write_back.c:763:17: warning: enumeration value 'NETFS_REGION_IS_RESERVED' not handled in switch [-Wswitch]
>> fs/netfs/write_back.c:763:17: warning: enumeration value 'NETFS_REGION_IS_COMPLETE' not handled in switch [-Wswitch]
vim +/NETFS_REGION_IS_PENDING +763 fs/netfs/write_back.c
623
624 /*
625 * Flush some of the dirty queue, transforming a part of a sequence of dirty
626 * regions into a block we can flush.
627 *
628 * A number of things constrain us:
629 * - The region we write out should not be undergoing modification
630 * - We may need to expand or split the region for a number of reasons:
631 * - Filesystem storage block/object size
632 * - Filesystem RPC size (wsize)
633 * - Cache block size
634 * - Cache DIO block size
635 * - Crypto/compression block size
636 */
637 static int netfs_flush_dirty(struct address_space *mapping,
638 struct writeback_control *wbc,
639 struct netfs_range *requested,
640 loff_t *next)
641 {
642 struct netfs_i_context *ctx = netfs_i_context(mapping->host);
643 struct netfs_dirty_region *spares[2] = {};
644 struct netfs_dirty_region *head = NULL, *tail = NULL, *r, *q;
645 struct netfs_range block;
646 unsigned long long dirty_start, dirty_to, active_from, limit;
647 unsigned int wsize = ctx->wsize;
648 unsigned int min_bsize = 1U << ctx->min_bshift;
649 int ret;
650
651 kenter("%llx-%llx", requested->start, requested->end);
652
653 BUG_ON(!wsize);
654
655 /* For the moment, place certain restrictions when content crypto is in
656 * use so that we don't write a partial block and corrupt part of the
657 * file into unreadability.
658 */
659 if (ctx->crypto_bshift) {
660 /* If object storage is in use, we don't want a crypto block to
661 * be split across multiple objects.
662 */
663 if (ctx->obj_bshift &&
664 ctx->crypto_bshift > ctx->obj_bshift) {
665 pr_err_ratelimited("Crypto blocksize (2^%u) > objsize (2^%u)\n",
666 ctx->crypto_bshift, ctx->obj_bshift);
667 return -EIO;
668 }
669
670 /* We must be able to write a crypto block in its entirety in a
671 * single RPC call if we're going to do the write atomically.
672 */
673 if ((1U << ctx->crypto_bshift) > wsize) {
674 pr_err_ratelimited("Crypto blocksize (2^%u) > wsize (%u)\n",
675 ctx->crypto_bshift, wsize);
676 return -EIO;
677 }
678 }
679
680 /* Round the requested region out to the minimum block size (eg. for
681 * crypto purposes).
682 */
683 requested->start = round_down(requested->start, min_bsize);
684 requested->end = round_up (requested->end, min_bsize);
685
686 retry:
687 ret = 0;
688
689 spin_lock(&ctx->lock);
690
691 /* Find the first dirty region that overlaps the requested flush region */
692 list_for_each_entry(r, &ctx->dirty_regions, dirty_link) {
693 kdebug("query D=%x", r->debug_id);
694 if (r->dirty.end <= requested->start ||
695 r->dirty.end == r->dirty.start)
696 continue;
697 if (READ_ONCE(r->state) == NETFS_REGION_IS_FLUSHING)
698 continue;
699 if (r->dirty.start >= requested->end)
700 goto out;
701 head = r;
702 break;
703 }
704
705 if (!head || head->dirty.start >= requested->end)
706 goto out;
707
708 /* Determine where we're going to start and the limits on where we
709 * might end.
710 */
711 dirty_start = round_down(head->dirty.start, min_bsize);
712 kdebug("dirty D=%x start %llx", head->debug_id, dirty_start);
713
714 if (ctx->obj_bshift) {
715 /* Handle object storage - we limit the write to one object,
716 * but we round down the start if there's more dirty data that
717 * way.
718 */
719 unsigned long long obj_start;
720 unsigned long long obj_size = 1ULL << ctx->obj_bshift;
721 unsigned long long obj_end;
722
723 obj_start = max(requested->start, dirty_start);
724 obj_start = round_down(obj_start, obj_size);
725 obj_end = obj_start + obj_size;
726 kdebug("object %llx-%llx", obj_start, obj_end);
727
728 block.start = max(dirty_start, obj_start);
729 limit = min(requested->end, obj_end);
730 kdebug("limit %llx", limit);
731 if (limit - block.start > wsize) {
732 kdebug("size %llx", limit - block.start);
733 block.start = max(block.start, requested->start);
734 limit = min(requested->end,
735 block.start + round_down(wsize, min_bsize));
736 }
737 kdebug("object %llx-%llx", block.start, limit);
738 } else if (min_bsize > 1) {
739 /* There's a block size (cache DIO, crypto). */
740 block.start = max(dirty_start, requested->start);
741 if (wsize > min_bsize) {
742 /* A single write can encompass several blocks. */
743 limit = block.start + round_down(wsize, min_bsize);
744 limit = min(limit, requested->end);
745 } else {
746 /* The block will need several writes to send it. */
747 limit = block.start + min_bsize;
748 }
749 kdebug("block %llx-%llx", block.start, limit);
750 } else {
751 /* No blocking factors and no object division. */
752 block.start = max(dirty_start, requested->start);
753 limit = min(block.start + wsize, requested->end);
754 kdebug("plain %llx-%llx", block.start, limit);
755 }
756
757 /* Determine the subset of dirty regions that are going to contribute. */
758 r = head;
759 list_for_each_entry_from(r, &ctx->dirty_regions, dirty_link) {
760 kdebug("- maybe D=%x", r->debug_id);
761 if (r->dirty.start >= limit)
762 break;
> 763 switch (READ_ONCE(r->state)) {
764 case NETFS_REGION_IS_DIRTY:
765 tail = r;
766 continue;
767 case NETFS_REGION_IS_FLUSHING:
768 limit = round_down(r->dirty.start, min_bsize);
769 goto determined_tail;
770 case NETFS_REGION_IS_ACTIVE:
771 /* We can break off part of a region undergoing active
772 * modification, but assume, for now, that we don't
773 * want to include anything that will change under us
774 * or that's only partially uptodate - especially if
775 * we're going to be encrypting or compressing from it.
776 */
777 dirty_to = READ_ONCE(r->dirty.end);
778 active_from = round_down(dirty_to, min_bsize);
779 kdebug("active D=%x from %llx", r->debug_id, active_from);
780 if (active_from > limit) {
781 kdebug(" - >limit");
782 tail = r;
783 goto determined_tail;
784 }
785
786 limit = active_from;
787 if (r->dirty.start < limit) {
788 kdebug(" - reduce limit");
789 tail = r;
790 goto determined_tail;
791 }
792
793 if (limit == block.start || r == head)
794 goto wait_for_active_region;
795
796 if (limit == r->dirty.start) {
797 kdebug("- active contig");
798 goto determined_tail;
799 }
800
801 /* We may need to rewind the subset we're collecting. */
802 q = r;
803 list_for_each_entry_continue_reverse(q, &ctx->dirty_regions,
804 dirty_link) {
805 kdebug(" - rewind D=%x", q->debug_id);
806 tail = q;
807 if (q->dirty.start < limit)
808 goto determined_tail;
809 if (q == head) {
810 kdebug("over rewound");
811 ret = -EAGAIN;
812 goto out;
813 }
814 }
815 goto wait_for_active_region;
816 }
817 }
818
819 determined_tail:
820 if (!tail) {
821 kdebug("netfs: no tail\n");
822 ret = -EAGAIN;
823 goto out;
824 }
825 dirty_to = round_up(tail->dirty.end, min_bsize);
826 kdebug("dto %llx", dirty_to);
827 block.end = min(dirty_to, limit);
828 kdebug("block %llx-%llx", block.start, block.end);
829
830 /* If the leading and/or trailing edges of the selected regions overlap
831 * the ends of the block, we will need to split those blocks.
832 */
833 if ((dirty_start < block.start && !spares[0]) ||
834 (tail->dirty.end > block.end && !spares[1])) {
835 spin_unlock(&ctx->lock);
836 kdebug("need spares");
837 goto need_spares;
838 }
839
840 if (dirty_start < block.start) {
841 kdebug("eject front");
842 netfs_split_off_front(ctx, head, &spares[0], block.start);
843 }
844
845 if (tail->dirty.end > block.end) {
846 kdebug("eject back");
847 r = netfs_split_off_front(ctx, tail, &spares[1], block.end);
848 if (head == tail)
849 head = r;
850 tail = r;
851 }
852
853 /* Flip all the regions to flushing */
854 r = head;
855 kdebug("mark from D=%x", r->debug_id);
856 list_for_each_entry_from(r, &ctx->dirty_regions, dirty_link) {
857 kdebug("- flush D=%x", r->debug_id);
858 set_bit(NETFS_REGION_FLUSH_Q, &r->flags);
859 smp_store_release(&r->state, NETFS_REGION_IS_FLUSHING);
860 trace_netfs_dirty(ctx, r, NULL, netfs_dirty_trace_flushing);
861 wake_up_var(&r->state);
862 list_move_tail(&r->flush_link, &ctx->flush_queue);
863 if (r == tail)
864 break;
865 }
866
867 requested->start = block.end;
868 out:
869 spin_unlock(&ctx->lock);
870
871 out_unlocked:
872 netfs_free_dirty_region(ctx, spares[0]);
873 netfs_free_dirty_region(ctx, spares[1]);
874 kleave(" = %d", ret);
875 return ret;
876
877 wait_for_active_region:
878 /* We have to wait for an active region to progress */
879 kdebug("- wait for active %x", r->debug_id);
880 set_bit(NETFS_REGION_FLUSH_Q, &r->flags);
881
882 if (wbc->sync_mode == WB_SYNC_NONE) {
883 ret = -EBUSY;
884 goto out;
885 }
886
887 netfs_get_dirty_region(ctx, r, netfs_region_trace_get_wait_active);
888 spin_unlock(&ctx->lock);
889
890 wait_var_event(&r->state, (READ_ONCE(r->state) != NETFS_REGION_IS_ACTIVE ||
891 READ_ONCE(r->dirty.end) != dirty_to));
892 netfs_put_dirty_region(ctx, r, netfs_region_trace_put_wait_active);
893
894 need_spares:
895 ret = -ENOMEM;
896 spares[0] = netfs_alloc_dirty_region();
897 if (!spares[0])
898 goto out_unlocked;
899 spares[1] = netfs_alloc_dirty_region();
900 if (!spares[1])
901 goto out_unlocked;
902 goto retry;
903 }
904
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Download attachment ".config.gz" of type "application/gzip" (26792 bytes)
Powered by blists - more mailing lists