dtc 547 arch/ia64/include/asm/pal.h dtc : 1, /* Fail in data TC */ dtc 729 arch/ia64/include/asm/pal.h #define pmci_tlb_data_translation_cache_fail pme_tlb.dtc dtc 634 arch/ia64/kernel/mca_drv.c && !(ptci->itr || ptci->dtc || ptci->itc)) dtc 112 drivers/ata/pata_cs5536.c u32 dtc; dtc 114 drivers/ata/pata_cs5536.c cs5536_read(pdev, DTC, &dtc); dtc 115 drivers/ata/pata_cs5536.c dtc &= ~(IDE_DRV_MASK << dshift); dtc 116 drivers/ata/pata_cs5536.c dtc |= tim << dshift; dtc 117 drivers/ata/pata_cs5536.c cs5536_write(pdev, DTC, dtc); dtc 110 drivers/dma/dma-jz4780.c uint32_t dtc; dtc 349 drivers/dma/dma-jz4780.c desc->dtc = len >> jzchan->transfer_shift; dtc 390 drivers/dma/dma-jz4780.c desc->desc[i].dtc |= dtc 442 drivers/dma/dma-jz4780.c desc->desc[i].dtc |= dtc 473 drivers/dma/dma-jz4780.c desc->desc[0].dtc = len >> jzchan->transfer_shift; dtc 540 drivers/dma/dma-jz4780.c jzchan->desc->desc[jzchan->curr_hwdesc].dtc); dtc 624 drivers/dma/dma-jz4780.c count += desc->desc[i].dtc & GENMASK(23, 0); dtc 237 drivers/dma/dmatest.c struct dmatest_chan *dtc; dtc 239 drivers/dma/dmatest.c list_for_each_entry(dtc, &info->channels, node) { dtc 242 drivers/dma/dmatest.c list_for_each_entry(thread, &dtc->threads, node) { dtc 253 drivers/dma/dmatest.c struct dmatest_chan *dtc; dtc 255 drivers/dma/dmatest.c list_for_each_entry(dtc, &info->channels, node) { dtc 258 drivers/dma/dmatest.c list_for_each_entry(thread, &dtc->threads, node) { dtc 913 drivers/dma/dmatest.c static void dmatest_cleanup_channel(struct dmatest_chan *dtc) dtc 919 drivers/dma/dmatest.c list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { dtc 929 drivers/dma/dmatest.c dmaengine_terminate_sync(dtc->chan); dtc 931 drivers/dma/dmatest.c kfree(dtc); dtc 935 drivers/dma/dmatest.c struct dmatest_chan *dtc, enum dma_transaction_type type) dtc 939 drivers/dma/dmatest.c struct dma_chan *chan = dtc->chan; dtc 962 drivers/dma/dmatest.c thread->chan = dtc->chan; dtc 978 drivers/dma/dmatest.c list_add_tail(&thread->node, &dtc->threads); dtc 988 drivers/dma/dmatest.c struct dmatest_chan *dtc; dtc 993 drivers/dma/dmatest.c dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); dtc 994 drivers/dma/dmatest.c if (!dtc) { dtc 999 drivers/dma/dmatest.c dtc->chan = chan; dtc 1000 drivers/dma/dmatest.c INIT_LIST_HEAD(&dtc->threads); dtc 1004 drivers/dma/dmatest.c cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); dtc 1011 drivers/dma/dmatest.c cnt = dmatest_add_threads(info, dtc, DMA_MEMSET); dtc 1017 drivers/dma/dmatest.c cnt = dmatest_add_threads(info, dtc, DMA_XOR); dtc 1021 drivers/dma/dmatest.c cnt = dmatest_add_threads(info, dtc, DMA_PQ); dtc 1028 drivers/dma/dmatest.c list_add_tail(&dtc->node, &info->channels); dtc 1098 drivers/dma/dmatest.c struct dmatest_chan *dtc; dtc 1101 drivers/dma/dmatest.c list_for_each_entry(dtc, &info->channels, node) { dtc 1105 drivers/dma/dmatest.c list_for_each_entry(thread, &dtc->threads, node) { dtc 1110 drivers/dma/dmatest.c thread_count, dma_chan_name(dtc->chan)); dtc 1116 drivers/dma/dmatest.c struct dmatest_chan *dtc, *_dtc; dtc 1119 drivers/dma/dmatest.c list_for_each_entry_safe(dtc, _dtc, &info->channels, node) { dtc 1120 drivers/dma/dmatest.c list_del(&dtc->node); dtc 1121 drivers/dma/dmatest.c chan = dtc->chan; dtc 1122 drivers/dma/dmatest.c dmatest_cleanup_channel(dtc); dtc 1186 drivers/dma/dmatest.c struct dmatest_chan *dtc; dtc 1201 drivers/dma/dmatest.c list_for_each_entry(dtc, &info->channels, node) { dtc 1202 drivers/dma/dmatest.c if (strcmp(dma_chan_name(dtc->chan), dtc 1204 drivers/dma/dmatest.c dtc = list_last_entry(&info->channels, dtc 1208 drivers/dma/dmatest.c dma_chan_name(dtc->chan), dtc 1219 drivers/dma/dmatest.c dtc = list_last_entry(&info->channels, struct dmatest_chan, node); dtc 1221 drivers/dma/dmatest.c if (dtc->chan) { dtc 1228 drivers/dma/dmatest.c if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0) dtc 1231 drivers/dma/dmatest.c strlcpy(chan_reset_val, dma_chan_name(dtc->chan), dtc 1271 drivers/dma/dmatest.c struct dmatest_chan *dtc; dtc 1274 drivers/dma/dmatest.c list_for_each_entry(dtc, &info->channels, node) { dtc 1278 drivers/dma/dmatest.c list_for_each_entry(thread, &dtc->threads, node) { dtc 1282 drivers/dma/dmatest.c thread_count, dma_chan_name(dtc->chan)); dtc 84 drivers/ide/cs5536.c u32 dtc; dtc 86 drivers/ide/cs5536.c cs5536_read(pdev, DTC, &dtc); dtc 87 drivers/ide/cs5536.c dtc &= ~(IDE_DRV_MASK << dshift); dtc 88 drivers/ide/cs5536.c dtc |= tim << dshift; dtc 89 drivers/ide/cs5536.c cs5536_write(pdev, DTC, dtc); dtc 2065 drivers/pinctrl/tegra/pinctrl-tegra20.c MUX_PG(dtc, RSVD1, RSVD2, VI, RSVD4, 0x14, 13, 0x84, 26, 0xa0, 22), dtc 169 mm/page-writeback.c static bool mdtc_valid(struct dirty_throttle_control *dtc) dtc 171 mm/page-writeback.c return dtc->dom; dtc 174 mm/page-writeback.c static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc) dtc 176 mm/page-writeback.c return dtc->dom; dtc 223 mm/page-writeback.c static bool mdtc_valid(struct dirty_throttle_control *dtc) dtc 228 mm/page-writeback.c static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc) dtc 393 mm/page-writeback.c static void domain_dirty_limits(struct dirty_throttle_control *dtc) dtc 395 mm/page-writeback.c const unsigned long available_memory = dtc->avail; dtc 396 mm/page-writeback.c struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); dtc 443 mm/page-writeback.c dtc->thresh = thresh; dtc 444 mm/page-writeback.c dtc->bg_thresh = bg_thresh; dtc 764 mm/page-writeback.c static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc) dtc 766 mm/page-writeback.c struct wb_domain *dom = dtc_dom(dtc); dtc 767 mm/page-writeback.c unsigned long thresh = dtc->thresh; dtc 775 mm/page-writeback.c fprop_fraction_percpu(&dom->completions, dtc->wb_completions, dtc 782 mm/page-writeback.c wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio); dtc 904 mm/page-writeback.c static void wb_position_ratio(struct dirty_throttle_control *dtc) dtc 906 mm/page-writeback.c struct bdi_writeback *wb = dtc->wb; dtc 908 mm/page-writeback.c unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); dtc 909 mm/page-writeback.c unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); dtc 910 mm/page-writeback.c unsigned long wb_thresh = dtc->wb_thresh; dtc 918 mm/page-writeback.c dtc->pos_ratio = 0; dtc 920 mm/page-writeback.c if (unlikely(dtc->dirty >= limit)) dtc 929 mm/page-writeback.c pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit); dtc 959 mm/page-writeback.c if (dtc->wb_dirty < 8) { dtc 960 mm/page-writeback.c dtc->pos_ratio = min_t(long long, pos_ratio * 2, dtc 965 mm/page-writeback.c if (dtc->wb_dirty >= wb_thresh) dtc 969 mm/page-writeback.c dtc->wb_bg_thresh); dtc 974 mm/page-writeback.c wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty, dtc 998 mm/page-writeback.c dtc->pos_ratio = min(pos_ratio, wb_pos_ratio); dtc 1033 mm/page-writeback.c if (unlikely(wb_thresh > dtc->thresh)) dtc 1034 mm/page-writeback.c wb_thresh = dtc->thresh; dtc 1042 mm/page-writeback.c wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8); dtc 1047 mm/page-writeback.c x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1); dtc 1057 mm/page-writeback.c span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16; dtc 1060 mm/page-writeback.c if (dtc->wb_dirty < x_intercept - span / 4) { dtc 1061 mm/page-writeback.c pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty), dtc 1072 mm/page-writeback.c if (dtc->wb_dirty < x_intercept) { dtc 1073 mm/page-writeback.c if (dtc->wb_dirty > x_intercept / 8) dtc 1075 mm/page-writeback.c dtc->wb_dirty); dtc 1080 mm/page-writeback.c dtc->pos_ratio = pos_ratio; dtc 1133 mm/page-writeback.c static void update_dirty_limit(struct dirty_throttle_control *dtc) dtc 1135 mm/page-writeback.c struct wb_domain *dom = dtc_dom(dtc); dtc 1136 mm/page-writeback.c unsigned long thresh = dtc->thresh; dtc 1152 mm/page-writeback.c thresh = max(thresh, dtc->dirty); dtc 1162 mm/page-writeback.c static void domain_update_bandwidth(struct dirty_throttle_control *dtc, dtc 1165 mm/page-writeback.c struct wb_domain *dom = dtc_dom(dtc); dtc 1175 mm/page-writeback.c update_dirty_limit(dtc); dtc 1187 mm/page-writeback.c static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc, dtc 1191 mm/page-writeback.c struct bdi_writeback *wb = dtc->wb; dtc 1192 mm/page-writeback.c unsigned long dirty = dtc->dirty; dtc 1193 mm/page-writeback.c unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh); dtc 1194 mm/page-writeback.c unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); dtc 1215 mm/page-writeback.c dtc->pos_ratio >> RATELIMIT_CALC_SHIFT; dtc 1304 mm/page-writeback.c dirty = dtc->wb_dirty; dtc 1305 mm/page-writeback.c if (dtc->wb_dirty < 8) dtc 1306 mm/page-writeback.c setpoint = dtc->wb_dirty + 1; dtc 1308 mm/page-writeback.c setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2; dtc 1513 mm/page-writeback.c static inline void wb_dirty_limits(struct dirty_throttle_control *dtc) dtc 1515 mm/page-writeback.c struct bdi_writeback *wb = dtc->wb; dtc 1531 mm/page-writeback.c dtc->wb_thresh = __wb_calc_thresh(dtc); dtc 1532 mm/page-writeback.c dtc->wb_bg_thresh = dtc->thresh ? dtc 1533 mm/page-writeback.c div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0; dtc 1545 mm/page-writeback.c if (dtc->wb_thresh < 2 * wb_stat_error()) { dtc 1547 mm/page-writeback.c dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); dtc 1550 mm/page-writeback.c dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);