summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-policy.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-cache-policy.h')
-rw-r--r--drivers/md/dm-cache-policy.h187
1 files changed, 54 insertions, 133 deletions
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index aa10b1493f34..c05fc3436cef 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -13,183 +13,100 @@
/*----------------------------------------------------------------*/
-/* FIXME: make it clear which methods are optional. Get debug policy to
- * double check this at start.
- */
-
/*
* The cache policy makes the important decisions about which blocks get to
* live on the faster cache device.
- *
- * When the core target has to remap a bio it calls the 'map' method of the
- * policy. This returns an instruction telling the core target what to do.
- *
- * POLICY_HIT:
- * That block is in the cache. Remap to the cache and carry on.
- *
- * POLICY_MISS:
- * This block is on the origin device. Remap and carry on.
- *
- * POLICY_NEW:
- * This block is currently on the origin device, but the policy wants to
- * move it. The core should:
- *
- * - hold any further io to this origin block
- * - copy the origin to the given cache block
- * - release all the held blocks
- * - remap the original block to the cache
- *
- * POLICY_REPLACE:
- * This block is currently on the origin device. The policy wants to
- * move it to the cache, with the added complication that the destination
- * cache block needs a writeback first. The core should:
- *
- * - hold any further io to this origin block
- * - hold any further io to the origin block that's being written back
- * - writeback
- * - copy new block to cache
- * - release held blocks
- * - remap bio to cache and reissue.
- *
- * Should the core run into trouble while processing a POLICY_NEW or
- * POLICY_REPLACE instruction it will roll back the policies mapping using
- * remove_mapping() or force_mapping(). These methods must not fail. This
- * approach avoids having transactional semantics in the policy (ie, the
- * core informing the policy when a migration is complete), and hence makes
- * it easier to write new policies.
- *
- * In general policy methods should never block, except in the case of the
- * map function when can_migrate is set. So be careful to implement using
- * bounded, preallocated memory.
*/
enum policy_operation {
- POLICY_HIT,
- POLICY_MISS,
- POLICY_NEW,
- POLICY_REPLACE
-};
-
-/*
- * When issuing a POLICY_REPLACE the policy needs to make a callback to
- * lock the block being demoted. This doesn't need to occur during a
- * writeback operation since the block remains in the cache.
- */
-struct policy_locker;
-typedef int (*policy_lock_fn)(struct policy_locker *l, dm_oblock_t oblock);
-
-struct policy_locker {
- policy_lock_fn fn;
+ POLICY_PROMOTE,
+ POLICY_DEMOTE,
+ POLICY_WRITEBACK
};
/*
* This is the instruction passed back to the core target.
*/
-struct policy_result {
+struct policy_work {
enum policy_operation op;
- dm_oblock_t old_oblock; /* POLICY_REPLACE */
- dm_cblock_t cblock; /* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */
+ dm_oblock_t oblock;
+ dm_cblock_t cblock;
};
/*
- * The cache policy object. Just a bunch of methods. It is envisaged that
- * this structure will be embedded in a bigger, policy specific structure
- * (ie. use container_of()).
+ * The cache policy object. It is envisaged that this structure will be
+ * embedded in a bigger, policy specific structure (ie. use container_of()).
*/
struct dm_cache_policy {
-
- /*
- * FIXME: make it clear which methods are optional, and which may
- * block.
- */
-
/*
* Destroys this object.
*/
void (*destroy)(struct dm_cache_policy *p);
/*
- * See large comment above.
- *
- * oblock - the origin block we're interested in.
- *
- * can_block - indicates whether the current thread is allowed to
- * block. -EWOULDBLOCK returned if it can't and would.
- *
- * can_migrate - gives permission for POLICY_NEW or POLICY_REPLACE
- * instructions. If denied and the policy would have
- * returned one of these instructions it should
- * return -EWOULDBLOCK.
+ * Find the location of a block.
*
- * discarded_oblock - indicates whether the whole origin block is
- * in a discarded state (FIXME: better to tell the
- * policy about this sooner, so it can recycle that
- * cache block if it wants.)
- * bio - the bio that triggered this call.
- * result - gets filled in with the instruction.
+ * Must not block.
*
- * May only return 0, or -EWOULDBLOCK (if !can_migrate)
+ * Returns 0 if in cache (cblock will be set), -ENOENT if not, < 0 for
+ * other errors (-EWOULDBLOCK would be typical). data_dir should be
+ * READ or WRITE. fast_copy should be set if migrating this block would
+ * be 'cheap' somehow (eg, discarded data). background_queued will be set
+ * if a migration has just been queued.
*/
- int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
- bool can_block, bool can_migrate, bool discarded_oblock,
- struct bio *bio, struct policy_locker *locker,
- struct policy_result *result);
+ int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
+ int data_dir, bool fast_copy, bool *background_queued);
/*
- * Sometimes we want to see if a block is in the cache, without
- * triggering any update of stats. (ie. it's not a real hit).
- *
- * Must not block.
+ * Sometimes the core target can optimise a migration, eg, the
+ * block may be discarded, or the bio may cover an entire block.
+ * In order to optimise it needs the migration immediately though
+ * so it knows to do something different with the bio.
*
- * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
- * (-EWOULDBLOCK would be typical).
+ * This method is optional (policy-internal will fallback to using
+ * lookup).
*/
- int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
-
- void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
- void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
+ int (*lookup_with_work)(struct dm_cache_policy *p,
+ dm_oblock_t oblock, dm_cblock_t *cblock,
+ int data_dir, bool fast_copy,
+ struct policy_work **work);
/*
- * Called when a cache target is first created. Used to load a
- * mapping from the metadata device into the policy.
+ * Retrieves background work. Returns -ENODATA when there's no
+ * background work.
*/
- int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
- dm_cblock_t cblock, uint32_t hint, bool hint_valid);
+ int (*get_background_work)(struct dm_cache_policy *p, bool idle,
+ struct policy_work **result);
/*
- * Gets the hint for a given cblock. Called in a single threaded
- * context. So no locking required.
+ * You must pass in the same work pointer that you were given, not
+ * a copy.
*/
- uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock);
+ void (*complete_background_work)(struct dm_cache_policy *p,
+ struct policy_work *work,
+ bool success);
+
+ void (*set_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock);
+ void (*clear_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock);
/*
- * Override functions used on the error paths of the core target.
- * They must succeed.
+ * Called when a cache target is first created. Used to load a
+ * mapping from the metadata device into the policy.
*/
- void (*remove_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock);
- void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
- dm_oblock_t new_oblock);
+ int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
+ dm_cblock_t cblock, bool dirty,
+ uint32_t hint, bool hint_valid);
/*
- * This is called via the invalidate_cblocks message. It is
- * possible the particular cblock has already been removed due to a
- * write io in passthrough mode. In which case this should return
- * -ENODATA.
+ * Drops the mapping, irrespective of whether it's clean or dirty.
+ * Returns -ENODATA if cblock is not mapped.
*/
- int (*remove_cblock)(struct dm_cache_policy *p, dm_cblock_t cblock);
+ int (*invalidate_mapping)(struct dm_cache_policy *p, dm_cblock_t cblock);
/*
- * Provide a dirty block to be written back by the core target. If
- * critical_only is set then the policy should only provide work if
- * it urgently needs it.
- *
- * Returns:
- *
- * 0 and @cblock,@oblock: block to write back provided
- *
- * -ENODATA: no dirty blocks available
+ * Gets the hint for a given cblock. Called in a single threaded
+ * context. So no locking required.
*/
- int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock,
- bool critical_only);
+ uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock);
/*
* How full is the cache?
@@ -202,6 +119,8 @@ struct dm_cache_policy {
* queue merging has occurred). To stop the policy being fooled by
* these, the core target sends regular tick() calls to the policy.
* The policy should only count an entry as hit once per tick.
+ *
+ * This method is optional.
*/
void (*tick)(struct dm_cache_policy *p, bool can_block);
@@ -213,6 +132,8 @@ struct dm_cache_policy {
int (*set_config_value)(struct dm_cache_policy *p,
const char *key, const char *value);
+ void (*allow_migrations)(struct dm_cache_policy *p, bool allow);
+
/*
* Book keeping ptr for the policy register, not for general use.
*/