Lines Matching refs:d

19 static void freetgt(struct aoedev *d, struct aoetgt *t);
20 static void skbpoolfree(struct aoedev *d);
139 aoedev_put(struct aoedev *d) in aoedev_put() argument
144 d->ref--; in aoedev_put()
151 struct aoedev *d; in dummy_timer() local
153 d = (struct aoedev *)vp; in dummy_timer()
154 if (d->flags & DEVFL_TKILL) in dummy_timer()
156 d->timer.expires = jiffies + HZ; in dummy_timer()
157 add_timer(&d->timer); in dummy_timer()
161 aoe_failip(struct aoedev *d) in aoe_failip() argument
167 aoe_failbuf(d, d->ip.buf); in aoe_failip()
169 rq = d->ip.rq; in aoe_failip()
172 while ((bio = d->ip.nxbio)) { in aoe_failip()
174 d->ip.nxbio = bio->bi_next; in aoe_failip()
179 aoe_end_request(d, rq, 0); in aoe_failip()
191 aoe_failbuf(f->t->d, f->buf); in downdev_frame()
197 aoedev_downdev(struct aoedev *d) in aoedev_downdev() argument
204 d->flags &= ~DEVFL_UP; in aoedev_downdev()
208 head = &d->factive[i]; in aoedev_downdev()
212 head = &d->rexmitq; in aoedev_downdev()
217 tt = d->targets; in aoedev_downdev()
218 te = tt + d->ntargets; in aoedev_downdev()
225 aoe_failip(d); in aoedev_downdev()
228 if (d->blkq) { in aoedev_downdev()
229 while ((rq = blk_peek_request(d->blkq))) { in aoedev_downdev()
231 aoe_end_request(d, rq, 1); in aoedev_downdev()
235 if (d->gd) in aoedev_downdev()
236 set_capacity(d->gd, 0); in aoedev_downdev()
243 user_req(char *s, size_t slen, struct aoedev *d) in user_req() argument
248 if (!d->gd) in user_req()
250 p = kbasename(d->gd->disk_name); in user_req()
251 lim = sizeof(d->gd->disk_name); in user_req()
252 lim -= p - d->gd->disk_name; in user_req()
260 freedev(struct aoedev *d) in freedev() argument
266 spin_lock_irqsave(&d->lock, flags); in freedev()
267 if (d->flags & DEVFL_TKILL in freedev()
268 && !(d->flags & DEVFL_FREEING)) { in freedev()
269 d->flags |= DEVFL_FREEING; in freedev()
272 spin_unlock_irqrestore(&d->lock, flags); in freedev()
276 del_timer_sync(&d->timer); in freedev()
277 if (d->gd) { in freedev()
278 aoedisk_rm_debugfs(d); in freedev()
279 aoedisk_rm_sysfs(d); in freedev()
280 del_gendisk(d->gd); in freedev()
281 put_disk(d->gd); in freedev()
282 blk_cleanup_queue(d->blkq); in freedev()
284 t = d->targets; in freedev()
285 e = t + d->ntargets; in freedev()
287 freetgt(d, *t); in freedev()
288 if (d->bufpool) in freedev()
289 mempool_destroy(d->bufpool); in freedev()
290 skbpoolfree(d); in freedev()
291 minor_free(d->sysminor); in freedev()
293 spin_lock_irqsave(&d->lock, flags); in freedev()
294 d->flags |= DEVFL_FREED; in freedev()
295 spin_unlock_irqrestore(&d->lock, flags); in freedev()
307 struct aoedev *d, **dd; in flush() local
328 for (d = devlist; d; d = d->next) { in flush()
329 spin_lock(&d->lock); in flush()
333 if (!user_req(buf, cnt, d)) in flush()
335 } else if ((!all && (d->flags & DEVFL_UP)) in flush()
336 || d->flags & skipflags in flush()
337 || d->nopen in flush()
338 || d->ref) in flush()
341 aoedev_downdev(d); in flush()
342 d->flags |= DEVFL_TKILL; in flush()
344 spin_unlock(&d->lock); in flush()
353 for (d = devlist; d; d = d->next) { in flush()
354 spin_lock(&d->lock); in flush()
355 if (d->flags & DEVFL_TKILL in flush()
356 && !(d->flags & DEVFL_FREEING)) { in flush()
357 spin_unlock(&d->lock); in flush()
359 freedev(d); in flush()
362 spin_unlock(&d->lock); in flush()
366 for (dd = &devlist, d = *dd; d; d = *dd) { in flush()
369 spin_lock(&d->lock); in flush()
370 if (d->flags & DEVFL_FREED) { in flush()
371 *dd = d->next; in flush()
372 doomed = d; in flush()
374 dd = &d->next; in flush()
376 spin_unlock(&d->lock); in flush()
421 skbpoolfree(struct aoedev *d) in skbpoolfree() argument
425 skb_queue_walk_safe(&d->skbpool, skb, tmp) in skbpoolfree()
428 __skb_queue_head_init(&d->skbpool); in skbpoolfree()
435 struct aoedev *d; in aoedev_by_aoeaddr() local
442 for (d=devlist; d; d=d->next) in aoedev_by_aoeaddr()
443 if (d->aoemajor == maj && d->aoeminor == min) { in aoedev_by_aoeaddr()
444 spin_lock(&d->lock); in aoedev_by_aoeaddr()
445 if (d->flags & DEVFL_TKILL) { in aoedev_by_aoeaddr()
446 spin_unlock(&d->lock); in aoedev_by_aoeaddr()
447 d = NULL; in aoedev_by_aoeaddr()
450 d->ref++; in aoedev_by_aoeaddr()
451 spin_unlock(&d->lock); in aoedev_by_aoeaddr()
454 if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0) in aoedev_by_aoeaddr()
456 d = kcalloc(1, sizeof *d, GFP_ATOMIC); in aoedev_by_aoeaddr()
457 if (!d) in aoedev_by_aoeaddr()
459 d->targets = kcalloc(NTARGETS, sizeof(*d->targets), GFP_ATOMIC); in aoedev_by_aoeaddr()
460 if (!d->targets) { in aoedev_by_aoeaddr()
461 kfree(d); in aoedev_by_aoeaddr()
462 d = NULL; in aoedev_by_aoeaddr()
465 d->ntargets = NTARGETS; in aoedev_by_aoeaddr()
466 INIT_WORK(&d->work, aoecmd_sleepwork); in aoedev_by_aoeaddr()
467 spin_lock_init(&d->lock); in aoedev_by_aoeaddr()
468 skb_queue_head_init(&d->skbpool); in aoedev_by_aoeaddr()
469 init_timer(&d->timer); in aoedev_by_aoeaddr()
470 d->timer.data = (ulong) d; in aoedev_by_aoeaddr()
471 d->timer.function = dummy_timer; in aoedev_by_aoeaddr()
472 d->timer.expires = jiffies + HZ; in aoedev_by_aoeaddr()
473 add_timer(&d->timer); in aoedev_by_aoeaddr()
474 d->bufpool = NULL; /* defer to aoeblk_gdalloc */ in aoedev_by_aoeaddr()
475 d->tgt = d->targets; in aoedev_by_aoeaddr()
476 d->ref = 1; in aoedev_by_aoeaddr()
478 INIT_LIST_HEAD(&d->factive[i]); in aoedev_by_aoeaddr()
479 INIT_LIST_HEAD(&d->rexmitq); in aoedev_by_aoeaddr()
480 d->sysminor = sysminor; in aoedev_by_aoeaddr()
481 d->aoemajor = maj; in aoedev_by_aoeaddr()
482 d->aoeminor = min; in aoedev_by_aoeaddr()
483 d->rttavg = RTTAVG_INIT; in aoedev_by_aoeaddr()
484 d->rttdev = RTTDEV_INIT; in aoedev_by_aoeaddr()
485 d->next = devlist; in aoedev_by_aoeaddr()
486 devlist = d; in aoedev_by_aoeaddr()
489 return d; in aoedev_by_aoeaddr()
493 freetgt(struct aoedev *d, struct aoetgt *t) in freetgt() argument