lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <201704130449.OPGovLKq%fengguang.wu@intel.com>
Date:   Thu, 13 Apr 2017 04:19:22 +0800
From:   kbuild test robot <lkp@...el.com>
To:     Paolo Valente <paolo.valente@...aro.org>
Cc:     kbuild-all@...org, Jens Axboe <axboe@...nel.dk>,
        Tejun Heo <tj@...nel.org>,
        Fabio Checconi <fchecconi@...il.com>,
        Arianna Avanzini <avanzini.arianna@...il.com>,
        linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
        ulf.hansson@...aro.org, linus.walleij@...aro.org,
        broonie@...nel.org, Paolo Valente <paolo.valente@...aro.org>
Subject: Re: [PATCH V3 01/16] block, bfq: introduce the BFQ-v0 I/O scheduler
 as an extra scheduler

Hi Paolo,

[auto build test ERROR on block/for-next]
[also build test ERROR on v4.11-rc6 next-20170412]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Paolo-Valente/Introduce-the-BFQ-I-O-scheduler/20170412-021320
base:   https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
config: blackfin-allyesconfig (attached as .config)
compiler: bfin-uclinux-gcc (GCC) 6.2.0
reproduce:
        wget https://raw.githubusercontent.com/01org/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=blackfin 

Note: the linux-review/Paolo-Valente/Introduce-the-BFQ-I-O-scheduler/20170412-021320 HEAD 36eb6533f8b6705991185201f75e98880cd223f7 builds fine.
      It only hurts bisectibility.

All error/warnings (new ones prefixed by >>):

   block/bfq-iosched.c: In function 'bfq_update_peak_rate':
>> block/bfq-iosched.c:2674:6: error: 'delta_usecs' undeclared (first use in this function)
     if (delta_usecs < 1000) {
         ^~~~~~~~~~~
   block/bfq-iosched.c:2674:6: note: each undeclared identifier is reported only once for each function it appears in
>> block/bfq-iosched.c:2739:22: error: invalid storage class for function 'bfq_smallest_from_now'
    static unsigned long bfq_smallest_from_now(void)
                         ^~~~~~~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:2739:1: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]
    static unsigned long bfq_smallest_from_now(void)
    ^~~~~~
>> block/bfq-iosched.c:2774:13: error: invalid storage class for function 'bfq_bfqq_expire'
    static void bfq_bfqq_expire(struct bfq_data *bfqd,
                ^~~~~~~~~~~~~~~
>> block/bfq-iosched.c:2823:13: error: invalid storage class for function 'bfq_bfqq_budget_timeout'
    static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
                ^~~~~~~~~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:2839:13: error: invalid storage class for function 'bfq_may_expire_for_budg_timeout'
    static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
                ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:2858:13: error: invalid storage class for function 'bfq_bfqq_may_idle'
    static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
                ^~~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:2901:13: error: invalid storage class for function 'bfq_bfqq_must_idle'
    static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
                ^~~~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:2913:26: error: invalid storage class for function 'bfq_select_queue'
    static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
                             ^~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:3012:24: error: invalid storage class for function 'bfq_dispatch_rq_from_bfqq'
    static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
                           ^~~~~~~~~~~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:3044:13: error: invalid storage class for function 'bfq_has_work'
    static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
                ^~~~~~~~~~~~
>> block/bfq-iosched.c:3056:24: error: invalid storage class for function '__bfq_dispatch_request'
    static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
                           ^~~~~~~~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:3141:24: error: invalid storage class for function 'bfq_dispatch_request'
    static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
                           ^~~~~~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:3160:13: error: invalid storage class for function 'bfq_put_queue'
    static void bfq_put_queue(struct bfq_queue *bfqq)
                ^~~~~~~~~~~~~
>> block/bfq-iosched.c:3173:13: error: invalid storage class for function 'bfq_exit_bfqq'
    static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
                ^~~~~~~~~~~~~
>> block/bfq-iosched.c:3185:13: error: invalid storage class for function 'bfq_exit_icq_bfqq'
    static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
                ^~~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:3203:13: error: invalid storage class for function 'bfq_exit_icq'
    static void bfq_exit_icq(struct io_cq *icq)
                ^~~~~~~~~~~~
>> block/bfq-iosched.c:3216:1: error: invalid storage class for function 'bfq_set_next_ioprio_data'
    bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
    ^~~~~~~~~~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:3262:13: error: invalid storage class for function 'bfq_check_ioprio_change'
    static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
                ^~~~~~~~~~~~~~~~~~~~~~~
>> block/bfq-iosched.c:3290:13: error: invalid storage class for function 'bfq_init_bfqq'
    static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                ^~~~~~~~~~~~~

vim +/delta_usecs +2674 block/bfq-iosched.c

  2668		else
  2669			delta = ktime_get();
  2670		delta = ktime_sub(delta, bfqd->last_budget_start);
  2671		usecs = ktime_to_us(delta);
  2672	
  2673		/* don't use too short time intervals */
> 2674		if (delta_usecs < 1000) {
  2675			return false;
  2676	
  2677		/*
  2678		 * Calculate the bandwidth for the last slice.  We use a 64 bit
  2679		 * value to store the peak rate, in sectors per usec in fixed
  2680		 * point math.  We do so to have enough precision in the estimate
  2681		 * and to avoid overflows.
  2682		 */
  2683		bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
  2684		do_div(bw, (unsigned long)usecs);
  2685	
  2686		timeout = jiffies_to_msecs(bfqd->bfq_timeout);
  2687	
  2688		/*
  2689		 * Use only long (> 20ms) intervals to filter out spikes for
  2690		 * the peak rate estimation.
  2691		 */
  2692		if (usecs > 20000) {
  2693			if (bw > bfqd->peak_rate) {
  2694				bfqd->peak_rate = bw;
  2695				update = 1;
  2696				bfq_log(bfqd, "new peak_rate=%llu", bw);
  2697			}
  2698	
  2699			update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
  2700	
  2701			if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
  2702				bfqd->peak_rate_samples++;
  2703	
  2704			if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
  2705			    update && bfqd->bfq_user_max_budget == 0) {
  2706				bfqd->bfq_max_budget =
  2707					bfq_calc_max_budget(bfqd->peak_rate,
  2708							    timeout);
  2709				bfq_log(bfqd, "new max_budget=%d",
  2710					bfqd->bfq_max_budget);
  2711			}
  2712		}
  2713	
  2714		/*
  2715		 * A process is considered ``slow'' (i.e., seeky, so that we
  2716		 * cannot treat it fairly in the service domain, as it would
  2717		 * slow down too much the other processes) if, when a slice
  2718		 * ends for whatever reason, it has received service at a
  2719		 * rate that would not be high enough to complete the budget
  2720		 * before the budget timeout expiration.
  2721		 */
  2722		expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
  2723	
  2724		/*
  2725		 * Caveat: processes doing IO in the slower disk zones will
  2726		 * tend to be slow(er) even if not seeky. And the estimated
  2727		 * peak rate will actually be an average over the disk
  2728		 * surface. Hence, to not be too harsh with unlucky processes,
  2729		 * we keep a budget/3 margin of safety before declaring a
  2730		 * process slow.
  2731		 */
  2732		return expected > (4 * bfqq->entity.budget) / 3;
  2733	}
  2734	
  2735	/*
  2736	 * Return the farthest past time instant according to jiffies
  2737	 * macros.
  2738	 */
> 2739	static unsigned long bfq_smallest_from_now(void)
  2740	{
  2741		return jiffies - MAX_JIFFY_OFFSET;
  2742	}
  2743	
  2744	/**
  2745	 * bfq_bfqq_expire - expire a queue.
  2746	 * @bfqd: device owning the queue.
  2747	 * @bfqq: the queue to expire.
  2748	 * @compensate: if true, compensate for the time spent idling.
  2749	 * @reason: the reason causing the expiration.
  2750	 *
  2751	 *
  2752	 * If the process associated with the queue is slow (i.e., seeky), or
  2753	 * in case of budget timeout, or, finally, if it is async, we
  2754	 * artificially charge it an entire budget (independently of the
  2755	 * actual service it received). As a consequence, the queue will get
  2756	 * higher timestamps than the correct ones upon reactivation, and
  2757	 * hence it will be rescheduled as if it had received more service
  2758	 * than what it actually received. In the end, this class of processes
  2759	 * will receive less service in proportion to how slowly they consume
  2760	 * their budgets (and hence how seriously they tend to lower the
  2761	 * throughput).
  2762	 *
  2763	 * In contrast, when a queue expires because it has been idling for
  2764	 * too much or because it exhausted its budget, we do not touch the
  2765	 * amount of service it has received. Hence when the queue will be
  2766	 * reactivated and its timestamps updated, the latter will be in sync
  2767	 * with the actual service received by the queue until expiration.
  2768	 *
  2769	 * Charging a full budget to the first type of queues and the exact
  2770	 * service to the others has the effect of using the WF2Q+ policy to
  2771	 * schedule the former on a timeslice basis, without violating the
  2772	 * service domain guarantees of the latter.
  2773	 */
> 2774	static void bfq_bfqq_expire(struct bfq_data *bfqd,
  2775				    struct bfq_queue *bfqq,
  2776				    bool compensate,
  2777				    enum bfqq_expiration reason)
  2778	{
  2779		bool slow;
  2780		int ref;
  2781	
  2782		/*
  2783		 * Update device peak rate for autotuning and check whether the
  2784		 * process is slow (see bfq_update_peak_rate).
  2785		 */
  2786		slow = bfq_update_peak_rate(bfqd, bfqq, compensate);
  2787	
  2788		/*
  2789		 * As above explained, 'punish' slow (i.e., seeky), timed-out
  2790		 * and async queues, to favor sequential sync workloads.
  2791		 */
  2792		if (slow || reason == BFQQE_BUDGET_TIMEOUT)
  2793			bfq_bfqq_charge_full_budget(bfqq);
  2794	
  2795		if (reason == BFQQE_TOO_IDLE &&
  2796		    bfqq->entity.service <= 2 * bfqq->entity.budget / 10)
  2797			bfq_clear_bfqq_IO_bound(bfqq);
  2798	
  2799		bfq_log_bfqq(bfqd, bfqq,
  2800			"expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
  2801			slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
  2802	
  2803		/*
  2804		 * Increase, decrease or leave budget unchanged according to
  2805		 * reason.
  2806		 */
  2807		__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
  2808		ref = bfqq->ref;
  2809		__bfq_bfqq_expire(bfqd, bfqq);
  2810	
  2811		/* mark bfqq as waiting a request only if a bic still points to it */
  2812		if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
  2813		    reason != BFQQE_BUDGET_TIMEOUT &&
  2814		    reason != BFQQE_BUDGET_EXHAUSTED)
  2815			bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
  2816	}
  2817	
  2818	/*
  2819	 * Budget timeout is not implemented through a dedicated timer, but
  2820	 * just checked on request arrivals and completions, as well as on
  2821	 * idle timer expirations.
  2822	 */
> 2823	static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
  2824	{
  2825		if (bfq_bfqq_budget_new(bfqq) ||
  2826		    time_is_after_jiffies(bfqq->budget_timeout))
  2827			return false;
  2828		return true;
  2829	}
  2830	
  2831	/*
  2832	 * If we expire a queue that is actively waiting (i.e., with the
  2833	 * device idled) for the arrival of a new request, then we may incur
  2834	 * the timestamp misalignment problem described in the body of the
  2835	 * function __bfq_activate_entity. Hence we return true only if this
  2836	 * condition does not hold, or if the queue is slow enough to deserve
  2837	 * only to be kicked off for preserving a high throughput.
  2838	 */
> 2839	static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
  2840	{
  2841		bfq_log_bfqq(bfqq->bfqd, bfqq,
  2842			"may_budget_timeout: wait_request %d left %d timeout %d",
  2843			bfq_bfqq_wait_request(bfqq),
  2844				bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3,
  2845			bfq_bfqq_budget_timeout(bfqq));
  2846	
  2847		return (!bfq_bfqq_wait_request(bfqq) ||
  2848			bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3)
  2849			&&
  2850			bfq_bfqq_budget_timeout(bfqq);
  2851	}
  2852	
  2853	/*
  2854	 * For a queue that becomes empty, device idling is allowed only if
  2855	 * this function returns true for the queue. And this function returns
  2856	 * true only if idling is beneficial for throughput.
  2857	 */
> 2858	static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
  2859	{
  2860		struct bfq_data *bfqd = bfqq->bfqd;
  2861		bool idling_boosts_thr;

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Download attachment ".config.gz" of type "application/gzip" (44066 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ