When we convert bch_btree_insert() to bch_btree_map_leaf_nodes(), we
won't be passing struct btree_op to bch_btree_insert() anymore - so we
need a different way of returning whether there was a collision (really,
a replace collision).
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
+ if (op->insert_collision)
+ return -ESRCH;
+
bio_for_each_segment_all(bv, bio, i)
__free_page(bv->bv_page);
bio_for_each_segment_all(bv, bio, i)
__free_page(bv->bv_page);
- if (io->s.op.insert_collision)
+ if (io->s.insert_collision)
trace_bcache_gc_copy_collision(&io->w->key);
bch_keybuf_del(&io->s.c->moving_gc_keys, io->w);
trace_bcache_gc_copy_collision(&io->w->key);
bch_keybuf_del(&io->s.c->moving_gc_keys, io->w);
struct search *s = container_of(cl, struct search, btree);
atomic_t *journal_ref = NULL;
struct bkey *replace_key = s->replace ? &s->replace_key : NULL;
struct search *s = container_of(cl, struct search, btree);
atomic_t *journal_ref = NULL;
struct bkey *replace_key = s->replace ? &s->replace_key : NULL;
/*
* If we're looping, might already be waiting on
/*
* If we're looping, might already be waiting on
s->flush_journal
? &s->cl : NULL);
s->flush_journal
? &s->cl : NULL);
- if (bch_btree_insert(&s->op, s->c, &s->insert_keys,
- journal_ref, replace_key)) {
+ ret = bch_btree_insert(&s->op, s->c, &s->insert_keys,
+ journal_ref, replace_key);
+ if (ret == -ESRCH) {
+ s->insert_collision = true;
+ } else if (ret) {
s->error = -ENOMEM;
s->insert_data_done = true;
}
s->error = -ENOMEM;
s->insert_data_done = true;
}
{
struct search *s = container_of(cl, struct search, cl);
{
struct search *s = container_of(cl, struct search, cl);
- if (s->op.insert_collision)
+ if (s->insert_collision)
bch_mark_cache_miss_collision(s);
if (s->cache_bio) {
bch_mark_cache_miss_collision(s);
if (s->cache_bio) {
unsigned insert_data_done:1;
unsigned replace:1;
unsigned insert_data_done:1;
unsigned replace:1;
+ unsigned insert_collision:1;
unsigned i;
struct btree_op op;
struct keylist keys;
unsigned i;
struct btree_op op;
struct keylist keys;
bch_btree_op_init(&op, -1);
bch_keylist_init(&keys);
bch_btree_op_init(&op, -1);
bch_keylist_init(&keys);
for (i = 0; i < KEY_PTRS(&w->key); i++)
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
for (i = 0; i < KEY_PTRS(&w->key); i++)
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
- bch_btree_insert(&op, dc->disk.c, &keys, NULL, &w->key);
+ ret = bch_btree_insert(&op, dc->disk.c, &keys, NULL, &w->key);
- if (op.insert_collision)
trace_bcache_writeback_collision(&w->key);
trace_bcache_writeback_collision(&w->key);
- atomic_long_inc(op.insert_collision
? &dc->disk.c->writeback_keys_failed
: &dc->disk.c->writeback_keys_done);
}
? &dc->disk.c->writeback_keys_failed
: &dc->disk.c->writeback_keys_done);
}