This source file includes following definitions.
- update_fastmap_work_fn
 
- find_anchor_wl_entry
 
- return_unused_pool_pebs
 
- anchor_pebs_available
 
- ubi_wl_get_fm_peb
 
- ubi_refill_pools
 
- produce_free_peb
 
- ubi_wl_get_peb
 
- get_peb_for_wl
 
- ubi_ensure_anchor_pebs
 
- ubi_wl_put_fm_peb
 
- ubi_is_erase_work
 
- ubi_fastmap_close
 
- may_reserve_for_fm
 
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 static void update_fastmap_work_fn(struct work_struct *wrk)
  13 {
  14         struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
  15 
  16         ubi_update_fastmap(ubi);
  17         spin_lock(&ubi->wl_lock);
  18         ubi->fm_work_scheduled = 0;
  19         spin_unlock(&ubi->wl_lock);
  20 }
  21 
  22 
  23 
  24 
  25 
  26 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
  27 {
  28         struct rb_node *p;
  29         struct ubi_wl_entry *e, *victim = NULL;
  30         int max_ec = UBI_MAX_ERASECOUNTER;
  31 
  32         ubi_rb_for_each_entry(p, e, root, u.rb) {
  33                 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
  34                         victim = e;
  35                         max_ec = e->ec;
  36                 }
  37         }
  38 
  39         return victim;
  40 }
  41 
  42 
  43 
  44 
  45 
  46 
  47 static void return_unused_pool_pebs(struct ubi_device *ubi,
  48                                     struct ubi_fm_pool *pool)
  49 {
  50         int i;
  51         struct ubi_wl_entry *e;
  52 
  53         for (i = pool->used; i < pool->size; i++) {
  54                 e = ubi->lookuptbl[pool->pebs[i]];
  55                 wl_tree_add(e, &ubi->free);
  56                 ubi->free_count++;
  57         }
  58 }
  59 
  60 static int anchor_pebs_available(struct rb_root *root)
  61 {
  62         struct rb_node *p;
  63         struct ubi_wl_entry *e;
  64 
  65         ubi_rb_for_each_entry(p, e, root, u.rb)
  66                 if (e->pnum < UBI_FM_MAX_START)
  67                         return 1;
  68 
  69         return 0;
  70 }
  71 
  72 
  73 
  74 
  75 
  76 
  77 
  78 
  79 
  80 
  81 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
  82 {
  83         struct ubi_wl_entry *e = NULL;
  84 
  85         if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
  86                 goto out;
  87 
  88         if (anchor)
  89                 e = find_anchor_wl_entry(&ubi->free);
  90         else
  91                 e = find_mean_wl_entry(ubi, &ubi->free);
  92 
  93         if (!e)
  94                 goto out;
  95 
  96         self_check_in_wl_tree(ubi, e, &ubi->free);
  97 
  98         
  99 
 100         rb_erase(&e->u.rb, &ubi->free);
 101         ubi->free_count--;
 102 out:
 103         return e;
 104 }
 105 
 106 
 107 
 108 
 109 
 110 void ubi_refill_pools(struct ubi_device *ubi)
 111 {
 112         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
 113         struct ubi_fm_pool *pool = &ubi->fm_pool;
 114         struct ubi_wl_entry *e;
 115         int enough;
 116 
 117         spin_lock(&ubi->wl_lock);
 118 
 119         return_unused_pool_pebs(ubi, wl_pool);
 120         return_unused_pool_pebs(ubi, pool);
 121 
 122         wl_pool->size = 0;
 123         pool->size = 0;
 124 
 125         for (;;) {
 126                 enough = 0;
 127                 if (pool->size < pool->max_size) {
 128                         if (!ubi->free.rb_node)
 129                                 break;
 130 
 131                         e = wl_get_wle(ubi);
 132                         if (!e)
 133                                 break;
 134 
 135                         pool->pebs[pool->size] = e->pnum;
 136                         pool->size++;
 137                 } else
 138                         enough++;
 139 
 140                 if (wl_pool->size < wl_pool->max_size) {
 141                         if (!ubi->free.rb_node ||
 142                            (ubi->free_count - ubi->beb_rsvd_pebs < 5))
 143                                 break;
 144 
 145                         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
 146                         self_check_in_wl_tree(ubi, e, &ubi->free);
 147                         rb_erase(&e->u.rb, &ubi->free);
 148                         ubi->free_count--;
 149 
 150                         wl_pool->pebs[wl_pool->size] = e->pnum;
 151                         wl_pool->size++;
 152                 } else
 153                         enough++;
 154 
 155                 if (enough == 2)
 156                         break;
 157         }
 158 
 159         wl_pool->used = 0;
 160         pool->used = 0;
 161 
 162         spin_unlock(&ubi->wl_lock);
 163 }
 164 
 165 
 166 
 167 
 168 
 169 
 170 
 171 
 172 
 173 
 174 static int produce_free_peb(struct ubi_device *ubi)
 175 {
 176         int err;
 177 
 178         while (!ubi->free.rb_node && ubi->works_count) {
 179                 dbg_wl("do one work synchronously");
 180                 err = do_work(ubi);
 181 
 182                 if (err)
 183                         return err;
 184         }
 185 
 186         return 0;
 187 }
 188 
 189 
 190 
 191 
 192 
 193 
 194 
 195 
 196 
 197 int ubi_wl_get_peb(struct ubi_device *ubi)
 198 {
 199         int ret, attempts = 0;
 200         struct ubi_fm_pool *pool = &ubi->fm_pool;
 201         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
 202 
 203 again:
 204         down_read(&ubi->fm_eba_sem);
 205         spin_lock(&ubi->wl_lock);
 206 
 207         
 208 
 209         if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
 210                 spin_unlock(&ubi->wl_lock);
 211                 up_read(&ubi->fm_eba_sem);
 212                 ret = ubi_update_fastmap(ubi);
 213                 if (ret) {
 214                         ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
 215                         down_read(&ubi->fm_eba_sem);
 216                         return -ENOSPC;
 217                 }
 218                 down_read(&ubi->fm_eba_sem);
 219                 spin_lock(&ubi->wl_lock);
 220         }
 221 
 222         if (pool->used == pool->size) {
 223                 spin_unlock(&ubi->wl_lock);
 224                 attempts++;
 225                 if (attempts == 10) {
 226                         ubi_err(ubi, "Unable to get a free PEB from user WL pool");
 227                         ret = -ENOSPC;
 228                         goto out;
 229                 }
 230                 up_read(&ubi->fm_eba_sem);
 231                 ret = produce_free_peb(ubi);
 232                 if (ret < 0) {
 233                         down_read(&ubi->fm_eba_sem);
 234                         goto out;
 235                 }
 236                 goto again;
 237         }
 238 
 239         ubi_assert(pool->used < pool->size);
 240         ret = pool->pebs[pool->used++];
 241         prot_queue_add(ubi, ubi->lookuptbl[ret]);
 242         spin_unlock(&ubi->wl_lock);
 243 out:
 244         return ret;
 245 }
 246 
 247 
 248 
 249 
 250 
 251 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
 252 {
 253         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
 254         int pnum;
 255 
 256         ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
 257 
 258         if (pool->used == pool->size) {
 259                 
 260 
 261 
 262                 if (!ubi->fm_work_scheduled) {
 263                         ubi->fm_work_scheduled = 1;
 264                         schedule_work(&ubi->fm_work);
 265                 }
 266                 return NULL;
 267         }
 268 
 269         pnum = pool->pebs[pool->used++];
 270         return ubi->lookuptbl[pnum];
 271 }
 272 
 273 
 274 
 275 
 276 
 277 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
 278 {
 279         struct ubi_work *wrk;
 280 
 281         spin_lock(&ubi->wl_lock);
 282         if (ubi->wl_scheduled) {
 283                 spin_unlock(&ubi->wl_lock);
 284                 return 0;
 285         }
 286         ubi->wl_scheduled = 1;
 287         spin_unlock(&ubi->wl_lock);
 288 
 289         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
 290         if (!wrk) {
 291                 spin_lock(&ubi->wl_lock);
 292                 ubi->wl_scheduled = 0;
 293                 spin_unlock(&ubi->wl_lock);
 294                 return -ENOMEM;
 295         }
 296 
 297         wrk->anchor = 1;
 298         wrk->func = &wear_leveling_worker;
 299         __schedule_ubi_work(ubi, wrk);
 300         return 0;
 301 }
 302 
 303 
 304 
 305 
 306 
 307 
 308 
 309 
 310 
 311 
 312 
 313 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
 314                       int lnum, int torture)
 315 {
 316         struct ubi_wl_entry *e;
 317         int vol_id, pnum = fm_e->pnum;
 318 
 319         dbg_wl("PEB %d", pnum);
 320 
 321         ubi_assert(pnum >= 0);
 322         ubi_assert(pnum < ubi->peb_count);
 323 
 324         spin_lock(&ubi->wl_lock);
 325         e = ubi->lookuptbl[pnum];
 326 
 327         
 328 
 329 
 330 
 331         if (!e) {
 332                 e = fm_e;
 333                 ubi_assert(e->ec >= 0);
 334                 ubi->lookuptbl[pnum] = e;
 335         }
 336 
 337         spin_unlock(&ubi->wl_lock);
 338 
 339         vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
 340         return schedule_erase(ubi, e, vol_id, lnum, torture, true);
 341 }
 342 
 343 
 344 
 345 
 346 
 347 int ubi_is_erase_work(struct ubi_work *wrk)
 348 {
 349         return wrk->func == erase_worker;
 350 }
 351 
 352 static void ubi_fastmap_close(struct ubi_device *ubi)
 353 {
 354         int i;
 355 
 356         return_unused_pool_pebs(ubi, &ubi->fm_pool);
 357         return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
 358 
 359         if (ubi->fm) {
 360                 for (i = 0; i < ubi->fm->used_blocks; i++)
 361                         kfree(ubi->fm->e[i]);
 362         }
 363         kfree(ubi->fm);
 364 }
 365 
 366 
 367 
 368 
 369 
 370 
 371 
 372 
 373 
 374 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
 375                                            struct ubi_wl_entry *e,
 376                                            struct rb_root *root) {
 377         if (e && !ubi->fm_disabled && !ubi->fm &&
 378             e->pnum < UBI_FM_MAX_START)
 379                 e = rb_entry(rb_next(root->rb_node),
 380                              struct ubi_wl_entry, u.rb);
 381 
 382         return e;
 383 }