Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/block/blk-wbt.h

  1 #ifndef WB_THROTTLE_H
  2 #define WB_THROTTLE_H
  3 
  4 #include <linux/kernel.h>
  5 #include <linux/atomic.h>
  6 #include <linux/wait.h>
  7 #include <linux/timer.h>
  8 #include <linux/ktime.h>
  9 
 10 #include "blk-stat.h"
 11 
 12 enum wbt_flags {
 13         WBT_TRACKED             = 1,    /* write, tracked for throttling */
 14         WBT_READ                = 2,    /* read */
 15         WBT_KSWAPD              = 4,    /* write, from kswapd */
 16 
 17         WBT_NR_BITS             = 3,    /* number of bits */
 18 };
 19 
 20 enum {
 21         WBT_NUM_RWQ             = 2,
 22 };
 23 
 24 /*
 25  * Enable states. Either off, or on by default (done at init time),
 26  * or on through manual setup in sysfs.
 27  */
 28 enum {
 29         WBT_STATE_ON_DEFAULT    = 1,
 30         WBT_STATE_ON_MANUAL     = 2,
 31 };
 32 
 33 static inline void wbt_clear_state(struct blk_issue_stat *stat)
 34 {
 35         stat->time &= BLK_STAT_TIME_MASK;
 36 }
 37 
 38 static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat)
 39 {
 40         return (stat->time & BLK_STAT_MASK) >> BLK_STAT_SHIFT;
 41 }
 42 
 43 static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct)
 44 {
 45         stat->time |= ((u64) wb_acct) << BLK_STAT_SHIFT;
 46 }
 47 
 48 static inline bool wbt_is_tracked(struct blk_issue_stat *stat)
 49 {
 50         return (stat->time >> BLK_STAT_SHIFT) & WBT_TRACKED;
 51 }
 52 
 53 static inline bool wbt_is_read(struct blk_issue_stat *stat)
 54 {
 55         return (stat->time >> BLK_STAT_SHIFT) & WBT_READ;
 56 }
 57 
 58 struct rq_wait {
 59         wait_queue_head_t wait;
 60         atomic_t inflight;
 61 };
 62 
 63 struct rq_wb {
 64         /*
 65          * Settings that govern how we throttle
 66          */
 67         unsigned int wb_background;             /* background writeback */
 68         unsigned int wb_normal;                 /* normal writeback */
 69         unsigned int wb_max;                    /* max throughput writeback */
 70         int scale_step;
 71         bool scaled_max;
 72 
 73         short enable_state;                     /* WBT_STATE_* */
 74 
 75         /*
 76          * Number of consecutive periods where we don't have enough
 77          * information to make a firm scale up/down decision.
 78          */
 79         unsigned int unknown_cnt;
 80 
 81         u64 win_nsec;                           /* default window size */
 82         u64 cur_win_nsec;                       /* current window size */
 83 
 84         struct timer_list window_timer;
 85 
 86         s64 sync_issue;
 87         void *sync_cookie;
 88 
 89         unsigned int wc;
 90         unsigned int queue_depth;
 91 
 92         unsigned long last_issue;               /* last non-throttled issue */
 93         unsigned long last_comp;                /* last non-throttled comp */
 94         unsigned long min_lat_nsec;
 95         struct request_queue *queue;
 96         struct rq_wait rq_wait[WBT_NUM_RWQ];
 97 };
 98 
 99 static inline unsigned int wbt_inflight(struct rq_wb *rwb)
100 {
101         unsigned int i, ret = 0;
102 
103         for (i = 0; i < WBT_NUM_RWQ; i++)
104                 ret += atomic_read(&rwb->rq_wait[i].inflight);
105 
106         return ret;
107 }
108 
109 #ifdef CONFIG_BLK_WBT
110 
111 void __wbt_done(struct rq_wb *, enum wbt_flags);
112 void wbt_done(struct rq_wb *, struct blk_issue_stat *);
113 enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *);
114 int wbt_init(struct request_queue *);
115 void wbt_exit(struct request_queue *);
116 void wbt_update_limits(struct rq_wb *);
117 void wbt_requeue(struct rq_wb *, struct blk_issue_stat *);
118 void wbt_issue(struct rq_wb *, struct blk_issue_stat *);
119 void wbt_disable_default(struct request_queue *);
120 
121 void wbt_set_queue_depth(struct rq_wb *, unsigned int);
122 void wbt_set_write_cache(struct rq_wb *, bool);
123 
124 u64 wbt_default_latency_nsec(struct request_queue *);
125 
126 #else
127 
128 static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
129 {
130 }
131 static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
132 {
133 }
134 static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio,
135                                       spinlock_t *lock)
136 {
137         return 0;
138 }
139 static inline int wbt_init(struct request_queue *q)
140 {
141         return -EINVAL;
142 }
143 static inline void wbt_exit(struct request_queue *q)
144 {
145 }
146 static inline void wbt_update_limits(struct rq_wb *rwb)
147 {
148 }
149 static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
150 {
151 }
152 static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
153 {
154 }
155 static inline void wbt_disable_default(struct request_queue *q)
156 {
157 }
158 static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
159 {
160 }
161 static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
162 {
163 }
164 static inline u64 wbt_default_latency_nsec(struct request_queue *q)
165 {
166         return 0;
167 }
168 
169 #endif /* CONFIG_BLK_WBT */
170 
171 #endif
172 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us