container 174 arch/s390/include/asm/sysinfo.h struct topology_container container; container 189 arch/s390/kernel/topology.c drawer->id = tle->container.id; container 193 arch/s390/kernel/topology.c book->id = tle->container.id; container 197 arch/s390/kernel/topology.c socket->id = tle->container.id; container 90 drivers/base/transport_class.c atc->container.class = &atc->tclass.class; container 91 drivers/base/transport_class.c attribute_container_set_no_classdevs(&atc->container); container 92 drivers/base/transport_class.c error = attribute_container_register(&atc->container); container 111 drivers/base/transport_class.c if (unlikely(attribute_container_unregister(&atc->container))) container 37 drivers/gpu/drm/amd/display/dc/basics/vector.c vector->container = NULL; container 45 drivers/gpu/drm/amd/display/dc/basics/vector.c vector->container = kcalloc(capacity, struct_size, GFP_KERNEL); container 46 drivers/gpu/drm/amd/display/dc/basics/vector.c if (vector->container == NULL) container 64 drivers/gpu/drm/amd/display/dc/basics/vector.c vector->container = NULL; container 72 drivers/gpu/drm/amd/display/dc/basics/vector.c vector->container = kcalloc(count, struct_size, GFP_KERNEL); container 74 drivers/gpu/drm/amd/display/dc/basics/vector.c if (vector->container == NULL) container 83 drivers/gpu/drm/amd/display/dc/basics/vector.c vector->container + i * struct_size, container 135 drivers/gpu/drm/amd/display/dc/basics/vector.c kfree(vector->container); container 160 drivers/gpu/drm/amd/display/dc/basics/vector.c if (vector->container == NULL || index >= vector->count) container 162 drivers/gpu/drm/amd/display/dc/basics/vector.c return vector->container + (index * vector->struct_size); container 174 drivers/gpu/drm/amd/display/dc/basics/vector.c vector->container + (index * vector->struct_size), container 175 drivers/gpu/drm/amd/display/dc/basics/vector.c vector->container + ((index + 1) * vector->struct_size), container 219 drivers/gpu/drm/amd/display/dc/basics/vector.c insert_address = vector->container + (vector->struct_size * position); container 276 drivers/gpu/drm/amd/display/dc/basics/vector.c memmove(vec_cloned->container, vector->container, container 294 drivers/gpu/drm/amd/display/dc/basics/vector.c new_container = krealloc(vector->container, container 298 drivers/gpu/drm/amd/display/dc/basics/vector.c vector->container = new_container; container 149 drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c return (struct i2c_payload *)p->payloads.container; container 30 drivers/gpu/drm/amd/display/include/vector.h uint8_t *container; container 455 drivers/net/wireless/ath/ath6kl/htc_mbox.c struct list_head container; container 461 drivers/net/wireless/ath/ath6kl/htc_mbox.c INIT_LIST_HEAD(&container); container 462 drivers/net/wireless/ath/ath6kl/htc_mbox.c list_add_tail(&packet->list, &container); container 464 drivers/net/wireless/ath/ath6kl/htc_mbox.c htc_tx_complete(endpoint, &container); container 1154 drivers/net/wireless/ath/ath6kl/htc_mbox.c struct list_head discard_q, container; container 1183 drivers/net/wireless/ath/ath6kl/htc_mbox.c INIT_LIST_HEAD(&container); container 1184 drivers/net/wireless/ath/ath6kl/htc_mbox.c list_add_tail(&packet->list, &container); container 1185 drivers/net/wireless/ath/ath6kl/htc_mbox.c htc_tx_complete(endpoint, &container); container 81 drivers/net/wireless/ath/ath6kl/htc_pipe.c struct list_head container; container 84 drivers/net/wireless/ath/ath6kl/htc_pipe.c INIT_LIST_HEAD(&container); container 85 drivers/net/wireless/ath/ath6kl/htc_pipe.c list_add_tail(&packet->list, &container); container 88 drivers/net/wireless/ath/ath6kl/htc_pipe.c do_send_completion(ep, &container); container 934 drivers/net/wireless/ath/ath6kl/htc_pipe.c struct list_head container; container 935 drivers/net/wireless/ath/ath6kl/htc_pipe.c INIT_LIST_HEAD(&container); container 936 drivers/net/wireless/ath/ath6kl/htc_pipe.c list_add_tail(&packet->list, &container); container 939 drivers/net/wireless/ath/ath6kl/htc_pipe.c do_recv_completion(ep, &container); container 1098 drivers/net/wireless/ath/ath6kl/htc_pipe.c struct list_head container; container 1120 drivers/net/wireless/ath/ath6kl/htc_pipe.c INIT_LIST_HEAD(&container); container 1121 drivers/net/wireless/ath/ath6kl/htc_pipe.c list_add_tail(&packet->list, &container); container 1124 drivers/net/wireless/ath/ath6kl/htc_pipe.c do_recv_completion(ep, &container); container 957 drivers/scsi/aacraid/aachba.c int container; container 964 drivers/scsi/aacraid/aachba.c for (container = 0; container < dev->maximum_num_containers; container 965 drivers/scsi/aacraid/aachba.c container++) { container 967 drivers/scsi/aacraid/aachba.c if (scmd_id(scsicmd) == container) { container 969 drivers/scsi/aacraid/aachba.c dev->fsa_dev[container].identifier, container 1065 drivers/scsi/aacraid/commsup.c u32 channel, id, lun, container; container 1078 drivers/scsi/aacraid/commsup.c container = channel = id = lun = (u32)-1; container 1090 drivers/scsi/aacraid/commsup.c container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); container 1091 drivers/scsi/aacraid/commsup.c if ((container >> 28)) { container 1092 drivers/scsi/aacraid/commsup.c container = (u32)-1; container 1095 drivers/scsi/aacraid/commsup.c channel = (container >> 24) & 0xF; container 1097 drivers/scsi/aacraid/commsup.c container = (u32)-1; container 1100 drivers/scsi/aacraid/commsup.c id = container & 0xFFFF; container 1102 drivers/scsi/aacraid/commsup.c container = (u32)-1; container 1105 drivers/scsi/aacraid/commsup.c lun = (container >> 16) & 0xFF; container 1106 drivers/scsi/aacraid/commsup.c container = (u32)-1; container 1116 drivers/scsi/aacraid/commsup.c container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); container 1117 drivers/scsi/aacraid/commsup.c if (container >= dev->maximum_num_containers) container 1129 drivers/scsi/aacraid/commsup.c CONTAINER_TO_CHANNEL(container), container 1130 drivers/scsi/aacraid/commsup.c CONTAINER_TO_ID(container), container 1131 drivers/scsi/aacraid/commsup.c CONTAINER_TO_LUN(container)); container 1133 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_needed = CHANGE; container 1134 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; container 1135 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_stamp = jiffies; container 1145 drivers/scsi/aacraid/commsup.c if (container != (u32)-1) { container 1146 drivers/scsi/aacraid/commsup.c if (container >= dev->maximum_num_containers) container 1148 drivers/scsi/aacraid/commsup.c if ((dev->fsa_dev[container].config_waiting_on == container 1150 drivers/scsi/aacraid/commsup.c time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) container 1151 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_on = 0; container 1152 drivers/scsi/aacraid/commsup.c } else for (container = 0; container 1153 drivers/scsi/aacraid/commsup.c container < dev->maximum_num_containers; ++container) { container 1154 drivers/scsi/aacraid/commsup.c if ((dev->fsa_dev[container].config_waiting_on == container 1156 drivers/scsi/aacraid/commsup.c time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) container 1157 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_on = 0; container 1171 drivers/scsi/aacraid/commsup.c container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); container 1172 drivers/scsi/aacraid/commsup.c if (container >= dev->maximum_num_containers) container 1174 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_needed = ADD; container 1175 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_on = container 1177 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_stamp = jiffies; container 1184 drivers/scsi/aacraid/commsup.c container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); container 1185 drivers/scsi/aacraid/commsup.c if (container >= dev->maximum_num_containers) container 1187 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_needed = DELETE; container 1188 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_on = container 1190 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_stamp = jiffies; container 1198 drivers/scsi/aacraid/commsup.c container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); container 1199 drivers/scsi/aacraid/commsup.c if (container >= dev->maximum_num_containers) container 1201 drivers/scsi/aacraid/commsup.c if (dev->fsa_dev[container].config_waiting_on && container 1202 drivers/scsi/aacraid/commsup.c time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) container 1204 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_needed = CHANGE; container 1205 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_on = container 1207 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_stamp = jiffies; container 1215 drivers/scsi/aacraid/commsup.c container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); container 1216 drivers/scsi/aacraid/commsup.c if ((container >> 28)) { container 1217 drivers/scsi/aacraid/commsup.c container = (u32)-1; container 1220 drivers/scsi/aacraid/commsup.c channel = (container >> 24) & 0xF; container 1222 drivers/scsi/aacraid/commsup.c container = (u32)-1; container 1225 drivers/scsi/aacraid/commsup.c id = container & 0xFFFF; container 1227 drivers/scsi/aacraid/commsup.c container = (u32)-1; container 1230 drivers/scsi/aacraid/commsup.c lun = (container >> 16) & 0xFF; container 1231 drivers/scsi/aacraid/commsup.c container = (u32)-1; container 1260 drivers/scsi/aacraid/commsup.c container = le32_to_cpu( container 1262 drivers/scsi/aacraid/commsup.c if ((container >> 28)) { container 1263 drivers/scsi/aacraid/commsup.c container = (u32)-1; container 1266 drivers/scsi/aacraid/commsup.c channel = (container >> 24) & 0xF; container 1268 drivers/scsi/aacraid/commsup.c container = (u32)-1; container 1271 drivers/scsi/aacraid/commsup.c id = container & 0xFFFF; container 1272 drivers/scsi/aacraid/commsup.c lun = (container >> 16) & 0xFF; container 1273 drivers/scsi/aacraid/commsup.c container = (u32)-1; container 1302 drivers/scsi/aacraid/commsup.c if (container != (u32)-1) { container 1303 drivers/scsi/aacraid/commsup.c if (container >= dev->maximum_num_containers) container 1305 drivers/scsi/aacraid/commsup.c if ((dev->fsa_dev[container].config_waiting_on == container 1307 drivers/scsi/aacraid/commsup.c time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) container 1308 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_on = 0; container 1309 drivers/scsi/aacraid/commsup.c } else for (container = 0; container 1310 drivers/scsi/aacraid/commsup.c container < dev->maximum_num_containers; ++container) { container 1311 drivers/scsi/aacraid/commsup.c if ((dev->fsa_dev[container].config_waiting_on == container 1313 drivers/scsi/aacraid/commsup.c time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) container 1314 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_on = 0; container 1330 drivers/scsi/aacraid/commsup.c for (container = 0; container 1331 drivers/scsi/aacraid/commsup.c container < dev->maximum_num_containers; container 1332 drivers/scsi/aacraid/commsup.c ++container) { container 1337 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_on = container 1339 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_needed = ADD; container 1340 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_stamp = container 1347 drivers/scsi/aacraid/commsup.c for (container = 0; container 1348 drivers/scsi/aacraid/commsup.c container < dev->maximum_num_containers; container 1349 drivers/scsi/aacraid/commsup.c ++container) { container 1354 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_on = container 1356 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_needed = DELETE; container 1357 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_waiting_stamp = container 1364 drivers/scsi/aacraid/commsup.c container = 0; container 1367 drivers/scsi/aacraid/commsup.c for (; container < dev->maximum_num_containers; ++container) { container 1368 drivers/scsi/aacraid/commsup.c if ((dev->fsa_dev[container].config_waiting_on == 0) && container 1369 drivers/scsi/aacraid/commsup.c (dev->fsa_dev[container].config_needed != NOTHING) && container 1370 drivers/scsi/aacraid/commsup.c time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { container 1372 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_needed; container 1373 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].config_needed = NOTHING; container 1374 drivers/scsi/aacraid/commsup.c channel = CONTAINER_TO_CHANNEL(container); container 1375 drivers/scsi/aacraid/commsup.c id = CONTAINER_TO_ID(container); container 1376 drivers/scsi/aacraid/commsup.c lun = CONTAINER_TO_LUN(container); container 1403 drivers/scsi/aacraid/commsup.c if (dev->fsa_dev[container].valid == 1) container 1404 drivers/scsi/aacraid/commsup.c dev->fsa_dev[container].valid = 2; container 1405 drivers/scsi/aacraid/commsup.c aac_probe_container(dev, container); container 1436 drivers/scsi/aacraid/commsup.c && (!dev->fsa_dev[container].valid)) { container 1460 drivers/scsi/aacraid/commsup.c container++; container 76 drivers/vfio/vfio.c struct vfio_container *container; container 291 drivers/vfio/vfio.c static void vfio_container_get(struct vfio_container *container) container 293 drivers/vfio/vfio.c kref_get(&container->kref); container 298 drivers/vfio/vfio.c struct vfio_container *container; container 299 drivers/vfio/vfio.c container = container_of(kref, struct vfio_container, kref); container 301 drivers/vfio/vfio.c kfree(container); container 304 drivers/vfio/vfio.c static void vfio_container_put(struct vfio_container *container) container 306 drivers/vfio/vfio.c kref_put(&container->kref, vfio_container_release); container 993 drivers/vfio/vfio.c wait_event(group->container_q, !group->container); container 1004 drivers/vfio/vfio.c static long vfio_ioctl_check_extension(struct vfio_container *container, container 1010 drivers/vfio/vfio.c down_read(&container->group_lock); container 1012 drivers/vfio/vfio.c driver = container->iommu_driver; container 1029 drivers/vfio/vfio.c if (!list_empty(&container->group_list) && container 1030 drivers/vfio/vfio.c (container->noiommu != container 1047 drivers/vfio/vfio.c ret = driver->ops->ioctl(container->iommu_data, container 1051 drivers/vfio/vfio.c up_read(&container->group_lock); container 1057 drivers/vfio/vfio.c static int __vfio_container_attach_groups(struct vfio_container *container, container 1064 drivers/vfio/vfio.c list_for_each_entry(group, &container->group_list, container_next) { container 1073 drivers/vfio/vfio.c list_for_each_entry_continue_reverse(group, &container->group_list, container 1081 drivers/vfio/vfio.c static long vfio_ioctl_set_iommu(struct vfio_container *container, container 1087 drivers/vfio/vfio.c down_write(&container->group_lock); container 1097 drivers/vfio/vfio.c if (list_empty(&container->group_list) || container->iommu_driver) { container 1098 drivers/vfio/vfio.c up_write(&container->group_lock); container 1111 drivers/vfio/vfio.c if (container->noiommu != (driver->ops == &vfio_noiommu_ops)) container 1137 drivers/vfio/vfio.c ret = __vfio_container_attach_groups(container, driver, data); container 1144 drivers/vfio/vfio.c container->iommu_driver = driver; container 1145 drivers/vfio/vfio.c container->iommu_data = data; container 1150 drivers/vfio/vfio.c up_write(&container->group_lock); container 1158 drivers/vfio/vfio.c struct vfio_container *container = filep->private_data; container 1163 drivers/vfio/vfio.c if (!container) container 1171 drivers/vfio/vfio.c ret = vfio_ioctl_check_extension(container, arg); container 1174 drivers/vfio/vfio.c ret = vfio_ioctl_set_iommu(container, arg); container 1177 drivers/vfio/vfio.c driver = container->iommu_driver; container 1178 drivers/vfio/vfio.c data = container->iommu_data; container 1198 drivers/vfio/vfio.c struct vfio_container *container; container 1200 drivers/vfio/vfio.c container = kzalloc(sizeof(*container), GFP_KERNEL); container 1201 drivers/vfio/vfio.c if (!container) container 1204 drivers/vfio/vfio.c INIT_LIST_HEAD(&container->group_list); container 1205 drivers/vfio/vfio.c init_rwsem(&container->group_lock); container 1206 drivers/vfio/vfio.c kref_init(&container->kref); container 1208 drivers/vfio/vfio.c filep->private_data = container; container 1215 drivers/vfio/vfio.c struct vfio_container *container = filep->private_data; container 1219 drivers/vfio/vfio.c vfio_container_put(container); container 1231 drivers/vfio/vfio.c struct vfio_container *container = filep->private_data; container 1235 drivers/vfio/vfio.c driver = container->iommu_driver; container 1237 drivers/vfio/vfio.c ret = driver->ops->read(container->iommu_data, container 1246 drivers/vfio/vfio.c struct vfio_container *container = filep->private_data; container 1250 drivers/vfio/vfio.c driver = container->iommu_driver; container 1252 drivers/vfio/vfio.c ret = driver->ops->write(container->iommu_data, container 1260 drivers/vfio/vfio.c struct vfio_container *container = filep->private_data; container 1264 drivers/vfio/vfio.c driver = container->iommu_driver; container 1266 drivers/vfio/vfio.c ret = driver->ops->mmap(container->iommu_data, vma); container 1289 drivers/vfio/vfio.c struct vfio_container *container = group->container; container 1292 drivers/vfio/vfio.c down_write(&container->group_lock); container 1294 drivers/vfio/vfio.c driver = container->iommu_driver; container 1296 drivers/vfio/vfio.c driver->ops->detach_group(container->iommu_data, container 1299 drivers/vfio/vfio.c group->container = NULL; container 1304 drivers/vfio/vfio.c if (driver && list_empty(&container->group_list)) { container 1305 drivers/vfio/vfio.c driver->ops->release(container->iommu_data); container 1307 drivers/vfio/vfio.c container->iommu_driver = NULL; container 1308 drivers/vfio/vfio.c container->iommu_data = NULL; container 1311 drivers/vfio/vfio.c up_write(&container->group_lock); container 1313 drivers/vfio/vfio.c vfio_container_put(container); container 1351 drivers/vfio/vfio.c struct vfio_container *container; container 1371 drivers/vfio/vfio.c container = f.file->private_data; container 1372 drivers/vfio/vfio.c WARN_ON(!container); /* fget ensures we don't race vfio_release */ container 1374 drivers/vfio/vfio.c down_write(&container->group_lock); container 1377 drivers/vfio/vfio.c if (!list_empty(&container->group_list) && container 1378 drivers/vfio/vfio.c container->noiommu != group->noiommu) { container 1383 drivers/vfio/vfio.c driver = container->iommu_driver; container 1385 drivers/vfio/vfio.c ret = driver->ops->attach_group(container->iommu_data, container 1391 drivers/vfio/vfio.c group->container = container; container 1392 drivers/vfio/vfio.c container->noiommu = group->noiommu; container 1393 drivers/vfio/vfio.c list_add(&group->container_next, &container->group_list); container 1396 drivers/vfio/vfio.c vfio_container_get(container); container 1400 drivers/vfio/vfio.c up_write(&container->group_lock); container 1420 drivers/vfio/vfio.c if (!group->container->iommu_driver || !vfio_group_viable(group)) { container 1437 drivers/vfio/vfio.c !group->container->iommu_driver || !vfio_group_viable(group)) container 1517 drivers/vfio/vfio.c if (group->container) container 1590 drivers/vfio/vfio.c if (group->container) { container 1780 drivers/vfio/vfio.c return vfio_ioctl_check_extension(group->container, arg); container 1916 drivers/vfio/vfio.c struct vfio_container *container; container 1935 drivers/vfio/vfio.c container = group->container; container 1936 drivers/vfio/vfio.c driver = container->iommu_driver; container 1938 drivers/vfio/vfio.c ret = driver->ops->pin_pages(container->iommu_data, user_pfn, container 1962 drivers/vfio/vfio.c struct vfio_container *container; container 1981 drivers/vfio/vfio.c container = group->container; container 1982 drivers/vfio/vfio.c driver = container->iommu_driver; container 1984 drivers/vfio/vfio.c ret = driver->ops->unpin_pages(container->iommu_data, user_pfn, container 2001 drivers/vfio/vfio.c struct vfio_container *container; container 2009 drivers/vfio/vfio.c container = group->container; container 2010 drivers/vfio/vfio.c driver = container->iommu_driver; container 2012 drivers/vfio/vfio.c ret = driver->ops->register_notifier(container->iommu_data, container 2025 drivers/vfio/vfio.c struct vfio_container *container; container 2033 drivers/vfio/vfio.c container = group->container; container 2034 drivers/vfio/vfio.c driver = container->iommu_driver; container 2036 drivers/vfio/vfio.c ret = driver->ops->unregister_notifier(container->iommu_data, container 73 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_mm_set(struct tce_container *container) container 75 drivers/vfio/vfio_iommu_spapr_tce.c if (container->mm) { container 76 drivers/vfio/vfio_iommu_spapr_tce.c if (container->mm == current->mm) container 81 drivers/vfio/vfio_iommu_spapr_tce.c container->mm = current->mm; container 82 drivers/vfio/vfio_iommu_spapr_tce.c atomic_inc(&container->mm->mm_count); container 87 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_prereg_free(struct tce_container *container, container 92 drivers/vfio/vfio_iommu_spapr_tce.c ret = mm_iommu_put(container->mm, tcemem->mem); container 102 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_unregister_pages(struct tce_container *container, container 113 drivers/vfio/vfio_iommu_spapr_tce.c mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT); container 117 drivers/vfio/vfio_iommu_spapr_tce.c list_for_each_entry(tcemem, &container->prereg_list, next) { container 127 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_prereg_free(container, tcemem); container 129 drivers/vfio/vfio_iommu_spapr_tce.c mm_iommu_put(container->mm, mem); container 134 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_register_pages(struct tce_container *container, container 146 drivers/vfio/vfio_iommu_spapr_tce.c mem = mm_iommu_get(container->mm, vaddr, entries); container 148 drivers/vfio/vfio_iommu_spapr_tce.c list_for_each_entry(tcemem, &container->prereg_list, next) { container 155 drivers/vfio/vfio_iommu_spapr_tce.c ret = mm_iommu_new(container->mm, vaddr, entries, &mem); container 167 drivers/vfio/vfio_iommu_spapr_tce.c list_add(&tcemem->next, &container->prereg_list); container 169 drivers/vfio/vfio_iommu_spapr_tce.c container->enabled = true; container 174 drivers/vfio/vfio_iommu_spapr_tce.c mm_iommu_put(container->mm, mem); container 196 drivers/vfio/vfio_iommu_spapr_tce.c static inline bool tce_groups_attached(struct tce_container *container) container 198 drivers/vfio/vfio_iommu_spapr_tce.c return !list_empty(&container->group_list); container 201 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_find_table(struct tce_container *container, container 207 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = container->tables[i]; container 224 drivers/vfio/vfio_iommu_spapr_tce.c static int tce_iommu_find_free_table(struct tce_container *container) container 229 drivers/vfio/vfio_iommu_spapr_tce.c if (!container->tables[i]) container 236 drivers/vfio/vfio_iommu_spapr_tce.c static int tce_iommu_enable(struct tce_container *container) container 243 drivers/vfio/vfio_iommu_spapr_tce.c if (container->enabled) container 275 drivers/vfio/vfio_iommu_spapr_tce.c if (!tce_groups_attached(container)) container 278 drivers/vfio/vfio_iommu_spapr_tce.c tcegrp = list_first_entry(&container->group_list, container 287 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_mm_set(container); container 292 drivers/vfio/vfio_iommu_spapr_tce.c ret = account_locked_vm(container->mm, locked, true); container 296 drivers/vfio/vfio_iommu_spapr_tce.c container->locked_pages = locked; container 298 drivers/vfio/vfio_iommu_spapr_tce.c container->enabled = true; container 303 drivers/vfio/vfio_iommu_spapr_tce.c static void tce_iommu_disable(struct tce_container *container) container 305 drivers/vfio/vfio_iommu_spapr_tce.c if (!container->enabled) container 308 drivers/vfio/vfio_iommu_spapr_tce.c container->enabled = false; container 310 drivers/vfio/vfio_iommu_spapr_tce.c BUG_ON(!container->mm); container 311 drivers/vfio/vfio_iommu_spapr_tce.c account_locked_vm(container->mm, container->locked_pages, false); container 316 drivers/vfio/vfio_iommu_spapr_tce.c struct tce_container *container; container 323 drivers/vfio/vfio_iommu_spapr_tce.c container = kzalloc(sizeof(*container), GFP_KERNEL); container 324 drivers/vfio/vfio_iommu_spapr_tce.c if (!container) container 327 drivers/vfio/vfio_iommu_spapr_tce.c mutex_init(&container->lock); container 328 drivers/vfio/vfio_iommu_spapr_tce.c INIT_LIST_HEAD_RCU(&container->group_list); container 329 drivers/vfio/vfio_iommu_spapr_tce.c INIT_LIST_HEAD_RCU(&container->prereg_list); container 331 drivers/vfio/vfio_iommu_spapr_tce.c container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU; container 333 drivers/vfio/vfio_iommu_spapr_tce.c return container; container 336 drivers/vfio/vfio_iommu_spapr_tce.c static int tce_iommu_clear(struct tce_container *container, container 339 drivers/vfio/vfio_iommu_spapr_tce.c static void tce_iommu_free_table(struct tce_container *container, container 344 drivers/vfio/vfio_iommu_spapr_tce.c struct tce_container *container = iommu_data; container 349 drivers/vfio/vfio_iommu_spapr_tce.c while (tce_groups_attached(container)) { container 350 drivers/vfio/vfio_iommu_spapr_tce.c tcegrp = list_first_entry(&container->group_list, container 360 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = container->tables[i]; container 365 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); container 366 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_free_table(container, tbl); container 369 drivers/vfio/vfio_iommu_spapr_tce.c list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next) container 370 drivers/vfio/vfio_iommu_spapr_tce.c WARN_ON(tce_iommu_prereg_free(container, tcemem)); container 372 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_disable(container); container 373 drivers/vfio/vfio_iommu_spapr_tce.c if (container->mm) container 374 drivers/vfio/vfio_iommu_spapr_tce.c mmdrop(container->mm); container 375 drivers/vfio/vfio_iommu_spapr_tce.c mutex_destroy(&container->lock); container 377 drivers/vfio/vfio_iommu_spapr_tce.c kfree(container); container 380 drivers/vfio/vfio_iommu_spapr_tce.c static void tce_iommu_unuse_page(struct tce_container *container, container 389 drivers/vfio/vfio_iommu_spapr_tce.c static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, container 396 drivers/vfio/vfio_iommu_spapr_tce.c mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift); container 409 drivers/vfio/vfio_iommu_spapr_tce.c static void tce_iommu_unuse_page_v2(struct tce_container *container, container 420 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua), container 431 drivers/vfio/vfio_iommu_spapr_tce.c static int tce_iommu_clear(struct tce_container *container, container 463 drivers/vfio/vfio_iommu_spapr_tce.c ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa, container 471 drivers/vfio/vfio_iommu_spapr_tce.c if (container->v2) { container 472 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_unuse_page_v2(container, tbl, entry); container 476 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_unuse_page(container, oldhpa); container 499 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_build(struct tce_container *container, container 515 drivers/vfio/vfio_iommu_spapr_tce.c if (!tce_page_is_contained(container->mm, hpa, container 523 drivers/vfio/vfio_iommu_spapr_tce.c ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i, container 526 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_unuse_page(container, hpa); container 534 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_unuse_page(container, hpa); container 540 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_clear(container, tbl, entry, i); container 547 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_build_v2(struct tce_container *container, container 560 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_prereg_ua_to_hpa(container, container 565 drivers/vfio/vfio_iommu_spapr_tce.c if (!tce_page_is_contained(container->mm, hpa, container 579 drivers/vfio/vfio_iommu_spapr_tce.c ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i, container 583 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_unuse_page_v2(container, tbl, entry + i); container 591 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_unuse_page_v2(container, tbl, entry + i); container 599 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_clear(container, tbl, entry, i); container 606 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_create_table(struct tce_container *container, container 621 drivers/vfio/vfio_iommu_spapr_tce.c ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true); container 634 drivers/vfio/vfio_iommu_spapr_tce.c static void tce_iommu_free_table(struct tce_container *container, container 640 drivers/vfio/vfio_iommu_spapr_tce.c account_locked_vm(container->mm, pages, false); container 643 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_create_window(struct tce_container *container, container 652 drivers/vfio/vfio_iommu_spapr_tce.c num = tce_iommu_find_free_table(container); container 657 drivers/vfio/vfio_iommu_spapr_tce.c tcegrp = list_first_entry(&container->group_list, container 672 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_create_table(container, table_group, num, container 683 drivers/vfio/vfio_iommu_spapr_tce.c list_for_each_entry(tcegrp, &container->group_list, next) { container 691 drivers/vfio/vfio_iommu_spapr_tce.c container->tables[num] = tbl; container 699 drivers/vfio/vfio_iommu_spapr_tce.c list_for_each_entry(tcegrp, &container->group_list, next) { container 703 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_free_table(container, tbl); container 708 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_remove_window(struct tce_container *container, container 716 drivers/vfio/vfio_iommu_spapr_tce.c num = tce_iommu_find_table(container, start_addr, &tbl); container 723 drivers/vfio/vfio_iommu_spapr_tce.c list_for_each_entry(tcegrp, &container->group_list, next) { container 740 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); container 741 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_free_table(container, tbl); container 742 drivers/vfio/vfio_iommu_spapr_tce.c container->tables[num] = NULL; container 747 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_create_default_window(struct tce_container *container) container 754 drivers/vfio/vfio_iommu_spapr_tce.c if (!container->def_window_pending) container 757 drivers/vfio/vfio_iommu_spapr_tce.c if (!tce_groups_attached(container)) container 760 drivers/vfio/vfio_iommu_spapr_tce.c tcegrp = list_first_entry(&container->group_list, container 766 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K, container 771 drivers/vfio/vfio_iommu_spapr_tce.c container->def_window_pending = false; container 779 drivers/vfio/vfio_iommu_spapr_tce.c struct tce_container *container = iommu_data; container 802 drivers/vfio/vfio_iommu_spapr_tce.c BUG_ON(!container); container 803 drivers/vfio/vfio_iommu_spapr_tce.c if (container->mm && container->mm != current->mm) container 812 drivers/vfio/vfio_iommu_spapr_tce.c if (!tce_groups_attached(container)) container 815 drivers/vfio/vfio_iommu_spapr_tce.c tcegrp = list_first_entry(&container->group_list, container 837 drivers/vfio/vfio_iommu_spapr_tce.c container->v2) { container 861 drivers/vfio/vfio_iommu_spapr_tce.c if (!container->enabled) container 876 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_create_default_window(container); container 880 drivers/vfio/vfio_iommu_spapr_tce.c num = tce_iommu_find_table(container, param.iova, &tbl); container 905 drivers/vfio/vfio_iommu_spapr_tce.c if (container->v2) container 906 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_build_v2(container, tbl, container 912 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_build(container, tbl, container 927 drivers/vfio/vfio_iommu_spapr_tce.c if (!container->enabled) container 943 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_create_default_window(container); container 947 drivers/vfio/vfio_iommu_spapr_tce.c num = tce_iommu_find_table(container, param.iova, &tbl); container 959 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_clear(container, tbl, container 969 drivers/vfio/vfio_iommu_spapr_tce.c if (!container->v2) container 975 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_mm_set(container); container 989 drivers/vfio/vfio_iommu_spapr_tce.c mutex_lock(&container->lock); container 990 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_register_pages(container, param.vaddr, container 992 drivers/vfio/vfio_iommu_spapr_tce.c mutex_unlock(&container->lock); container 999 drivers/vfio/vfio_iommu_spapr_tce.c if (!container->v2) container 1002 drivers/vfio/vfio_iommu_spapr_tce.c if (!container->mm) container 1018 drivers/vfio/vfio_iommu_spapr_tce.c mutex_lock(&container->lock); container 1019 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_unregister_pages(container, param.vaddr, container 1021 drivers/vfio/vfio_iommu_spapr_tce.c mutex_unlock(&container->lock); container 1026 drivers/vfio/vfio_iommu_spapr_tce.c if (container->v2) container 1029 drivers/vfio/vfio_iommu_spapr_tce.c mutex_lock(&container->lock); container 1030 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_enable(container); container 1031 drivers/vfio/vfio_iommu_spapr_tce.c mutex_unlock(&container->lock); container 1036 drivers/vfio/vfio_iommu_spapr_tce.c if (container->v2) container 1039 drivers/vfio/vfio_iommu_spapr_tce.c mutex_lock(&container->lock); container 1040 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_disable(container); container 1041 drivers/vfio/vfio_iommu_spapr_tce.c mutex_unlock(&container->lock); container 1048 drivers/vfio/vfio_iommu_spapr_tce.c list_for_each_entry(tcegrp, &container->group_list, next) { container 1060 drivers/vfio/vfio_iommu_spapr_tce.c if (!container->v2) container 1063 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_mm_set(container); container 1067 drivers/vfio/vfio_iommu_spapr_tce.c if (!tce_groups_attached(container)) container 1082 drivers/vfio/vfio_iommu_spapr_tce.c mutex_lock(&container->lock); container 1084 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_create_default_window(container); container 1086 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_create_window(container, container 1091 drivers/vfio/vfio_iommu_spapr_tce.c mutex_unlock(&container->lock); container 1101 drivers/vfio/vfio_iommu_spapr_tce.c if (!container->v2) container 1104 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_mm_set(container); container 1108 drivers/vfio/vfio_iommu_spapr_tce.c if (!tce_groups_attached(container)) container 1123 drivers/vfio/vfio_iommu_spapr_tce.c if (container->def_window_pending && !remove.start_addr) { container 1124 drivers/vfio/vfio_iommu_spapr_tce.c container->def_window_pending = false; container 1128 drivers/vfio/vfio_iommu_spapr_tce.c mutex_lock(&container->lock); container 1130 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_remove_window(container, remove.start_addr); container 1132 drivers/vfio/vfio_iommu_spapr_tce.c mutex_unlock(&container->lock); container 1141 drivers/vfio/vfio_iommu_spapr_tce.c static void tce_iommu_release_ownership(struct tce_container *container, container 1147 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = container->tables[i]; container 1152 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); container 1156 drivers/vfio/vfio_iommu_spapr_tce.c container->tables[i] = NULL; container 1160 drivers/vfio/vfio_iommu_spapr_tce.c static int tce_iommu_take_ownership(struct tce_container *container, container 1182 drivers/vfio/vfio_iommu_spapr_tce.c container->tables[i] = table_group->tables[i]; container 1187 drivers/vfio/vfio_iommu_spapr_tce.c static void tce_iommu_release_ownership_ddw(struct tce_container *container, container 1198 drivers/vfio/vfio_iommu_spapr_tce.c if (container->tables[i]) container 1204 drivers/vfio/vfio_iommu_spapr_tce.c static long tce_iommu_take_ownership_ddw(struct tce_container *container, container 1219 drivers/vfio/vfio_iommu_spapr_tce.c struct iommu_table *tbl = container->tables[i]; container 1244 drivers/vfio/vfio_iommu_spapr_tce.c struct tce_container *container = iommu_data; container 1248 drivers/vfio/vfio_iommu_spapr_tce.c mutex_lock(&container->lock); container 1258 drivers/vfio/vfio_iommu_spapr_tce.c if (tce_groups_attached(container) && (!table_group->ops || container 1266 drivers/vfio/vfio_iommu_spapr_tce.c list_for_each_entry(tcegrp, &container->group_list, next) { container 1294 drivers/vfio/vfio_iommu_spapr_tce.c if (container->v2) { container 1298 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_take_ownership(container, table_group); container 1300 drivers/vfio/vfio_iommu_spapr_tce.c if (!container->v2) { container 1304 drivers/vfio/vfio_iommu_spapr_tce.c ret = tce_iommu_take_ownership_ddw(container, table_group); container 1305 drivers/vfio/vfio_iommu_spapr_tce.c if (!tce_groups_attached(container) && !container->tables[0]) container 1306 drivers/vfio/vfio_iommu_spapr_tce.c container->def_window_pending = true; container 1311 drivers/vfio/vfio_iommu_spapr_tce.c list_add(&tcegrp->next, &container->group_list); container 1319 drivers/vfio/vfio_iommu_spapr_tce.c mutex_unlock(&container->lock); container 1327 drivers/vfio/vfio_iommu_spapr_tce.c struct tce_container *container = iommu_data; container 1332 drivers/vfio/vfio_iommu_spapr_tce.c mutex_lock(&container->lock); container 1334 drivers/vfio/vfio_iommu_spapr_tce.c list_for_each_entry(tcegrp, &container->group_list, next) { container 1354 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_release_ownership(container, table_group); container 1356 drivers/vfio/vfio_iommu_spapr_tce.c tce_iommu_release_ownership_ddw(container, table_group); container 1359 drivers/vfio/vfio_iommu_spapr_tce.c mutex_unlock(&container->lock); container 40 include/linux/transport_class.h struct attribute_container container; container 48 include/linux/transport_class.h . container = { \ container 56 kernel/trace/trace.h #define __field_desc(type, container, item) container 62 kernel/trace/trace.h #define __array_desc(type, container, item, size) container 45 kernel/trace/trace_export.c #define __field_desc(type, container, item) type item; container 51 kernel/trace/trace_export.c #define __array_desc(type, container, item, size) type item[size]; container 92 kernel/trace/trace_export.c #define __field_desc(type, container, item) \ container 95 kernel/trace/trace_export.c container.item), \ container 96 kernel/trace/trace_export.c sizeof(field.container.item), \ container 115 kernel/trace/trace_export.c #define __array_desc(type, container, item, len) \ container 119 kernel/trace/trace_export.c container.item), \ container 120 kernel/trace/trace_export.c sizeof(field.container.item), \ container 156 kernel/trace/trace_export.c #define __field_desc(type, container, item) container 162 kernel/trace/trace_export.c #define __array_desc(type, container, item, len) container 1093 sound/core/control.c unsigned int *container; container 1102 sound/core/control.c container = vmemdup_user(buf, size); container 1103 sound/core/control.c if (IS_ERR(container)) container 1104 sound/core/control.c return PTR_ERR(container); container 1108 sound/core/control.c change = memcmp(ue->tlv_data, container, size) != 0; container 1110 sound/core/control.c kvfree(container); container 1122 sound/core/control.c ue->tlv_data = container; container 1468 sound/core/control.c unsigned int __user *container; container 1485 sound/core/control.c container = buf->tlv; container 1497 sound/core/control.c return call_tlv_handler(file, op_flag, kctl, &id, container, container 1501 sound/core/control.c return read_tlv_buf(kctl, &id, container,