qdev               35 drivers/gpu/drm/qxl/qxl_cmd.c static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
qdev              186 drivers/gpu/drm/qxl/qxl_cmd.c qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
qdev              192 drivers/gpu/drm/qxl/qxl_cmd.c 	cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
qdev              194 drivers/gpu/drm/qxl/qxl_cmd.c 	return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
qdev              198 drivers/gpu/drm/qxl/qxl_cmd.c qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
qdev              204 drivers/gpu/drm/qxl/qxl_cmd.c 	cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
qdev              206 drivers/gpu/drm/qxl/qxl_cmd.c 	return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
qdev              209 drivers/gpu/drm/qxl/qxl_cmd.c bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
qdev              211 drivers/gpu/drm/qxl/qxl_cmd.c 	if (!qxl_check_idle(qdev->release_ring)) {
qdev              212 drivers/gpu/drm/qxl/qxl_cmd.c 		schedule_work(&qdev->gc_work);
qdev              214 drivers/gpu/drm/qxl/qxl_cmd.c 			flush_work(&qdev->gc_work);
qdev              220 drivers/gpu/drm/qxl/qxl_cmd.c int qxl_garbage_collect(struct qxl_device *qdev)
qdev              227 drivers/gpu/drm/qxl/qxl_cmd.c 	while (qxl_ring_pop(qdev->release_ring, &id)) {
qdev              230 drivers/gpu/drm/qxl/qxl_cmd.c 			release = qxl_release_from_id_locked(qdev, id);
qdev              234 drivers/gpu/drm/qxl/qxl_cmd.c 			info = qxl_release_map(qdev, release);
qdev              236 drivers/gpu/drm/qxl/qxl_cmd.c 			qxl_release_unmap(qdev, release, info);
qdev              252 drivers/gpu/drm/qxl/qxl_cmd.c 			qxl_release_free(qdev, release);
qdev              262 drivers/gpu/drm/qxl/qxl_cmd.c int qxl_alloc_bo_reserved(struct qxl_device *qdev,
qdev              270 drivers/gpu/drm/qxl/qxl_cmd.c 	ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
qdev              287 drivers/gpu/drm/qxl/qxl_cmd.c static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
qdev              290 drivers/gpu/drm/qxl/qxl_cmd.c 	long addr = qdev->io_base + port;
qdev              293 drivers/gpu/drm/qxl/qxl_cmd.c 	mutex_lock(&qdev->async_io_mutex);
qdev              294 drivers/gpu/drm/qxl/qxl_cmd.c 	irq_num = atomic_read(&qdev->irq_received_io_cmd);
qdev              295 drivers/gpu/drm/qxl/qxl_cmd.c 	if (qdev->last_sent_io_cmd > irq_num) {
qdev              297 drivers/gpu/drm/qxl/qxl_cmd.c 			ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
qdev              298 drivers/gpu/drm/qxl/qxl_cmd.c 							       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
qdev              300 drivers/gpu/drm/qxl/qxl_cmd.c 			ret = wait_event_timeout(qdev->io_cmd_event,
qdev              301 drivers/gpu/drm/qxl/qxl_cmd.c 						 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
qdev              305 drivers/gpu/drm/qxl/qxl_cmd.c 		irq_num = atomic_read(&qdev->irq_received_io_cmd);
qdev              308 drivers/gpu/drm/qxl/qxl_cmd.c 	qdev->last_sent_io_cmd = irq_num + 1;
qdev              310 drivers/gpu/drm/qxl/qxl_cmd.c 		ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
qdev              311 drivers/gpu/drm/qxl/qxl_cmd.c 						       atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
qdev              313 drivers/gpu/drm/qxl/qxl_cmd.c 		ret = wait_event_timeout(qdev->io_cmd_event,
qdev              314 drivers/gpu/drm/qxl/qxl_cmd.c 					 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
qdev              318 drivers/gpu/drm/qxl/qxl_cmd.c 	mutex_unlock(&qdev->async_io_mutex);
qdev              322 drivers/gpu/drm/qxl/qxl_cmd.c static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
qdev              327 drivers/gpu/drm/qxl/qxl_cmd.c 	ret = wait_for_io_cmd_user(qdev, val, port, false);
qdev              332 drivers/gpu/drm/qxl/qxl_cmd.c int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
qdev              353 drivers/gpu/drm/qxl/qxl_cmd.c 	mutex_lock(&qdev->update_area_mutex);
qdev              354 drivers/gpu/drm/qxl/qxl_cmd.c 	qdev->ram_header->update_area = *area;
qdev              355 drivers/gpu/drm/qxl/qxl_cmd.c 	qdev->ram_header->update_surface = surface_id;
qdev              356 drivers/gpu/drm/qxl/qxl_cmd.c 	ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
qdev              357 drivers/gpu/drm/qxl/qxl_cmd.c 	mutex_unlock(&qdev->update_area_mutex);
qdev              361 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_io_notify_oom(struct qxl_device *qdev)
qdev              363 drivers/gpu/drm/qxl/qxl_cmd.c 	outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
qdev              366 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_io_flush_release(struct qxl_device *qdev)
qdev              368 drivers/gpu/drm/qxl/qxl_cmd.c 	outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
qdev              371 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_io_flush_surfaces(struct qxl_device *qdev)
qdev              373 drivers/gpu/drm/qxl/qxl_cmd.c 	wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
qdev              376 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_io_destroy_primary(struct qxl_device *qdev)
qdev              378 drivers/gpu/drm/qxl/qxl_cmd.c 	wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
qdev              379 drivers/gpu/drm/qxl/qxl_cmd.c 	qdev->primary_bo->is_primary = false;
qdev              380 drivers/gpu/drm/qxl/qxl_cmd.c 	drm_gem_object_put_unlocked(&qdev->primary_bo->tbo.base);
qdev              381 drivers/gpu/drm/qxl/qxl_cmd.c 	qdev->primary_bo = NULL;
qdev              384 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
qdev              388 drivers/gpu/drm/qxl/qxl_cmd.c 	if (WARN_ON(qdev->primary_bo))
qdev              391 drivers/gpu/drm/qxl/qxl_cmd.c 	DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header);
qdev              392 drivers/gpu/drm/qxl/qxl_cmd.c 	create = &qdev->ram_header->create_surface;
qdev              397 drivers/gpu/drm/qxl/qxl_cmd.c 	create->mem = qxl_bo_physical_address(qdev, bo, 0);
qdev              404 drivers/gpu/drm/qxl/qxl_cmd.c 	wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
qdev              405 drivers/gpu/drm/qxl/qxl_cmd.c 	qdev->primary_bo = bo;
qdev              406 drivers/gpu/drm/qxl/qxl_cmd.c 	qdev->primary_bo->is_primary = true;
qdev              407 drivers/gpu/drm/qxl/qxl_cmd.c 	drm_gem_object_get(&qdev->primary_bo->tbo.base);
qdev              410 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
qdev              413 drivers/gpu/drm/qxl/qxl_cmd.c 	wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
qdev              416 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_io_reset(struct qxl_device *qdev)
qdev              418 drivers/gpu/drm/qxl/qxl_cmd.c 	outb(0, qdev->io_base + QXL_IO_RESET);
qdev              421 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_io_monitors_config(struct qxl_device *qdev)
qdev              423 drivers/gpu/drm/qxl/qxl_cmd.c 	wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
qdev              426 drivers/gpu/drm/qxl/qxl_cmd.c int qxl_surface_id_alloc(struct qxl_device *qdev,
qdev              434 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_lock(&qdev->surf_id_idr_lock);
qdev              435 drivers/gpu/drm/qxl/qxl_cmd.c 	idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
qdev              436 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_unlock(&qdev->surf_id_idr_lock);
qdev              442 drivers/gpu/drm/qxl/qxl_cmd.c 	if (handle >= qdev->rom->n_surfaces) {
qdev              444 drivers/gpu/drm/qxl/qxl_cmd.c 		spin_lock(&qdev->surf_id_idr_lock);
qdev              445 drivers/gpu/drm/qxl/qxl_cmd.c 		idr_remove(&qdev->surf_id_idr, handle);
qdev              446 drivers/gpu/drm/qxl/qxl_cmd.c 		spin_unlock(&qdev->surf_id_idr_lock);
qdev              447 drivers/gpu/drm/qxl/qxl_cmd.c 		qxl_reap_surface_id(qdev, 2);
qdev              452 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_lock(&qdev->surf_id_idr_lock);
qdev              453 drivers/gpu/drm/qxl/qxl_cmd.c 	qdev->last_alloced_surf_id = handle;
qdev              454 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_unlock(&qdev->surf_id_idr_lock);
qdev              458 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_surface_id_dealloc(struct qxl_device *qdev,
qdev              461 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_lock(&qdev->surf_id_idr_lock);
qdev              462 drivers/gpu/drm/qxl/qxl_cmd.c 	idr_remove(&qdev->surf_id_idr, surface_id);
qdev              463 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_unlock(&qdev->surf_id_idr_lock);
qdev              466 drivers/gpu/drm/qxl/qxl_cmd.c int qxl_hw_surface_alloc(struct qxl_device *qdev,
qdev              476 drivers/gpu/drm/qxl/qxl_cmd.c 	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
qdev              484 drivers/gpu/drm/qxl/qxl_cmd.c 		qxl_release_free(qdev, release);
qdev              487 drivers/gpu/drm/qxl/qxl_cmd.c 	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
qdev              494 drivers/gpu/drm/qxl/qxl_cmd.c 	cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
qdev              496 drivers/gpu/drm/qxl/qxl_cmd.c 	qxl_release_unmap(qdev, release, &cmd->release_info);
qdev              504 drivers/gpu/drm/qxl/qxl_cmd.c 	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qdev              507 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_lock(&qdev->surf_id_idr_lock);
qdev              508 drivers/gpu/drm/qxl/qxl_cmd.c 	idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
qdev              509 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_unlock(&qdev->surf_id_idr_lock);
qdev              513 drivers/gpu/drm/qxl/qxl_cmd.c int qxl_hw_surface_dealloc(struct qxl_device *qdev,
qdev              524 drivers/gpu/drm/qxl/qxl_cmd.c 	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
qdev              532 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_lock(&qdev->surf_id_idr_lock);
qdev              533 drivers/gpu/drm/qxl/qxl_cmd.c 	idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
qdev              534 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_unlock(&qdev->surf_id_idr_lock);
qdev              541 drivers/gpu/drm/qxl/qxl_cmd.c 	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
qdev              544 drivers/gpu/drm/qxl/qxl_cmd.c 	qxl_release_unmap(qdev, release, &cmd->release_info);
qdev              547 drivers/gpu/drm/qxl/qxl_cmd.c 	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qdev              552 drivers/gpu/drm/qxl/qxl_cmd.c static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
qdev              564 drivers/gpu/drm/qxl/qxl_cmd.c 	ret = qxl_io_update_area(qdev, surf, &rect);
qdev              570 drivers/gpu/drm/qxl/qxl_cmd.c static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
qdev              574 drivers/gpu/drm/qxl/qxl_cmd.c 		qxl_update_surface(qdev, surf);
qdev              577 drivers/gpu/drm/qxl/qxl_cmd.c 	qxl_hw_surface_dealloc(qdev, surf);
qdev              580 drivers/gpu/drm/qxl/qxl_cmd.c void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
qdev              582 drivers/gpu/drm/qxl/qxl_cmd.c 	mutex_lock(&qdev->surf_evict_mutex);
qdev              583 drivers/gpu/drm/qxl/qxl_cmd.c 	qxl_surface_evict_locked(qdev, surf, do_update_area);
qdev              584 drivers/gpu/drm/qxl/qxl_cmd.c 	mutex_unlock(&qdev->surf_evict_mutex);
qdev              587 drivers/gpu/drm/qxl/qxl_cmd.c static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
qdev              596 drivers/gpu/drm/qxl/qxl_cmd.c 		mutex_unlock(&qdev->surf_evict_mutex);
qdev              601 drivers/gpu/drm/qxl/qxl_cmd.c 		mutex_lock(&qdev->surf_evict_mutex);
qdev              607 drivers/gpu/drm/qxl/qxl_cmd.c 	qxl_surface_evict_locked(qdev, surf, true);
qdev              612 drivers/gpu/drm/qxl/qxl_cmd.c static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
qdev              619 drivers/gpu/drm/qxl/qxl_cmd.c 	mutex_lock(&qdev->surf_evict_mutex);
qdev              622 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_lock(&qdev->surf_id_idr_lock);
qdev              623 drivers/gpu/drm/qxl/qxl_cmd.c 	start = qdev->last_alloced_surf_id + 1;
qdev              624 drivers/gpu/drm/qxl/qxl_cmd.c 	spin_unlock(&qdev->surf_id_idr_lock);
qdev              626 drivers/gpu/drm/qxl/qxl_cmd.c 	for (i = start; i < start + qdev->rom->n_surfaces; i++) {
qdev              628 drivers/gpu/drm/qxl/qxl_cmd.c 		int surfid = i % qdev->rom->n_surfaces;
qdev              633 drivers/gpu/drm/qxl/qxl_cmd.c 		spin_lock(&qdev->surf_id_idr_lock);
qdev              634 drivers/gpu/drm/qxl/qxl_cmd.c 		objptr = idr_find(&qdev->surf_id_idr, surfid);
qdev              635 drivers/gpu/drm/qxl/qxl_cmd.c 		spin_unlock(&qdev->surf_id_idr_lock);
qdev              640 drivers/gpu/drm/qxl/qxl_cmd.c 		ret = qxl_reap_surf(qdev, objptr, stall);
qdev              651 drivers/gpu/drm/qxl/qxl_cmd.c 	mutex_unlock(&qdev->surf_evict_mutex);
qdev              654 drivers/gpu/drm/qxl/qxl_cmd.c 		qxl_queue_garbage_collect(qdev, true);
qdev               42 drivers/gpu/drm/qxl/qxl_debugfs.c 	struct qxl_device *qdev = node->minor->dev->dev_private;
qdev               44 drivers/gpu/drm/qxl/qxl_debugfs.c 	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
qdev               45 drivers/gpu/drm/qxl/qxl_debugfs.c 	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
qdev               46 drivers/gpu/drm/qxl/qxl_debugfs.c 	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
qdev               47 drivers/gpu/drm/qxl/qxl_debugfs.c 	seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
qdev               48 drivers/gpu/drm/qxl/qxl_debugfs.c 	seq_printf(m, "%d\n", qdev->irq_received_error);
qdev               56 drivers/gpu/drm/qxl/qxl_debugfs.c 	struct qxl_device *qdev = node->minor->dev->dev_private;
qdev               59 drivers/gpu/drm/qxl/qxl_debugfs.c 	list_for_each_entry(bo, &qdev->gem.objects, list) {
qdev              102 drivers/gpu/drm/qxl/qxl_debugfs.c int qxl_debugfs_add_files(struct qxl_device *qdev,
qdev              108 drivers/gpu/drm/qxl/qxl_debugfs.c 	for (i = 0; i < qdev->debugfs_count; i++) {
qdev              109 drivers/gpu/drm/qxl/qxl_debugfs.c 		if (qdev->debugfs[i].files == files) {
qdev              115 drivers/gpu/drm/qxl/qxl_debugfs.c 	i = qdev->debugfs_count + 1;
qdev              121 drivers/gpu/drm/qxl/qxl_debugfs.c 	qdev->debugfs[qdev->debugfs_count].files = files;
qdev              122 drivers/gpu/drm/qxl/qxl_debugfs.c 	qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
qdev              123 drivers/gpu/drm/qxl/qxl_debugfs.c 	qdev->debugfs_count = i;
qdev              126 drivers/gpu/drm/qxl/qxl_debugfs.c 				 qdev->ddev.primary->debugfs_root,
qdev              127 drivers/gpu/drm/qxl/qxl_debugfs.c 				 qdev->ddev.primary);
qdev               44 drivers/gpu/drm/qxl/qxl_display.c static int qxl_alloc_client_monitors_config(struct qxl_device *qdev,
qdev               47 drivers/gpu/drm/qxl/qxl_display.c 	if (qdev->client_monitors_config &&
qdev               48 drivers/gpu/drm/qxl/qxl_display.c 	    count > qdev->client_monitors_config->count) {
qdev               49 drivers/gpu/drm/qxl/qxl_display.c 		kfree(qdev->client_monitors_config);
qdev               50 drivers/gpu/drm/qxl/qxl_display.c 		qdev->client_monitors_config = NULL;
qdev               52 drivers/gpu/drm/qxl/qxl_display.c 	if (!qdev->client_monitors_config) {
qdev               53 drivers/gpu/drm/qxl/qxl_display.c 		qdev->client_monitors_config = kzalloc(
qdev               54 drivers/gpu/drm/qxl/qxl_display.c 				struct_size(qdev->client_monitors_config,
qdev               56 drivers/gpu/drm/qxl/qxl_display.c 		if (!qdev->client_monitors_config)
qdev               59 drivers/gpu/drm/qxl/qxl_display.c 	qdev->client_monitors_config->count = count;
qdev               70 drivers/gpu/drm/qxl/qxl_display.c static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
qdev               77 drivers/gpu/drm/qxl/qxl_display.c 	num_monitors = qdev->rom->client_monitors_config.count;
qdev               78 drivers/gpu/drm/qxl/qxl_display.c 	crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config,
qdev               79 drivers/gpu/drm/qxl/qxl_display.c 		  sizeof(qdev->rom->client_monitors_config));
qdev               80 drivers/gpu/drm/qxl/qxl_display.c 	if (crc != qdev->rom->client_monitors_config_crc)
qdev               91 drivers/gpu/drm/qxl/qxl_display.c 		num_monitors = qdev->rom->client_monitors_config.count;
qdev               93 drivers/gpu/drm/qxl/qxl_display.c 	if (qdev->client_monitors_config
qdev               94 drivers/gpu/drm/qxl/qxl_display.c 	      && (num_monitors != qdev->client_monitors_config->count)) {
qdev               97 drivers/gpu/drm/qxl/qxl_display.c 	if (qxl_alloc_client_monitors_config(qdev, num_monitors)) {
qdev              102 drivers/gpu/drm/qxl/qxl_display.c 	qdev->client_monitors_config->max_allowed = qxl_num_crtc;
qdev              103 drivers/gpu/drm/qxl/qxl_display.c 	for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) {
qdev              105 drivers/gpu/drm/qxl/qxl_display.c 			&qdev->rom->client_monitors_config.heads[i];
qdev              107 drivers/gpu/drm/qxl/qxl_display.c 			&qdev->client_monitors_config->heads[i];
qdev              143 drivers/gpu/drm/qxl/qxl_display.c static void qxl_update_offset_props(struct qxl_device *qdev)
qdev              145 drivers/gpu/drm/qxl/qxl_display.c 	struct drm_device *dev = &qdev->ddev;
qdev              153 drivers/gpu/drm/qxl/qxl_display.c 		head = &qdev->client_monitors_config->heads[output->index];
qdev              162 drivers/gpu/drm/qxl/qxl_display.c void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
qdev              164 drivers/gpu/drm/qxl/qxl_display.c 	struct drm_device *dev = &qdev->ddev;
qdev              168 drivers/gpu/drm/qxl/qxl_display.c 		status = qxl_display_copy_rom_client_monitors_config(qdev);
qdev              187 drivers/gpu/drm/qxl/qxl_display.c 	qxl_update_offset_props(qdev);
qdev              196 drivers/gpu/drm/qxl/qxl_display.c static int qxl_check_mode(struct qxl_device *qdev,
qdev              207 drivers/gpu/drm/qxl/qxl_display.c 	if (size > qdev->vram_size)
qdev              212 drivers/gpu/drm/qxl/qxl_display.c static int qxl_check_framebuffer(struct qxl_device *qdev,
qdev              215 drivers/gpu/drm/qxl/qxl_display.c 	return qxl_check_mode(qdev, bo->surf.width, bo->surf.height);
qdev              224 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = dev->dev_private;
qdev              228 drivers/gpu/drm/qxl/qxl_display.c 	rc = qxl_check_mode(qdev, width, height);
qdev              245 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = dev->dev_private;
qdev              250 drivers/gpu/drm/qxl/qxl_display.c 	if (!qdev->monitors_config)
qdev              254 drivers/gpu/drm/qxl/qxl_display.c 	if (!qdev->client_monitors_config)
qdev              256 drivers/gpu/drm/qxl/qxl_display.c 	if (h >= qdev->client_monitors_config->count)
qdev              259 drivers/gpu/drm/qxl/qxl_display.c 	head = &qdev->client_monitors_config->heads[h];
qdev              286 drivers/gpu/drm/qxl/qxl_display.c static void qxl_send_monitors_config(struct qxl_device *qdev)
qdev              290 drivers/gpu/drm/qxl/qxl_display.c 	BUG_ON(!qdev->ram_header->monitors_config);
qdev              292 drivers/gpu/drm/qxl/qxl_display.c 	if (qdev->monitors_config->count == 0)
qdev              295 drivers/gpu/drm/qxl/qxl_display.c 	for (i = 0 ; i < qdev->monitors_config->count ; ++i) {
qdev              296 drivers/gpu/drm/qxl/qxl_display.c 		struct qxl_head *head = &qdev->monitors_config->heads[i];
qdev              306 drivers/gpu/drm/qxl/qxl_display.c 	qxl_io_monitors_config(qdev);
qdev              313 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = dev->dev_private;
qdev              318 drivers/gpu/drm/qxl/qxl_display.c 	if (!qdev->primary_bo) {
qdev              323 drivers/gpu/drm/qxl/qxl_display.c 	if (!qdev->monitors_config || qxl_num_crtc <= i)
qdev              328 drivers/gpu/drm/qxl/qxl_display.c 	oldcount = qdev->monitors_config->count;
qdev              336 drivers/gpu/drm/qxl/qxl_display.c 		if (qdev->monitors_config->count < i + 1)
qdev              337 drivers/gpu/drm/qxl/qxl_display.c 			qdev->monitors_config->count = i + 1;
qdev              338 drivers/gpu/drm/qxl/qxl_display.c 		if (qdev->primary_bo == qdev->dumb_shadow_bo)
qdev              339 drivers/gpu/drm/qxl/qxl_display.c 			head.x += qdev->dumb_heads[i].x;
qdev              345 drivers/gpu/drm/qxl/qxl_display.c 		if (qdev->monitors_config->count == i + 1)
qdev              346 drivers/gpu/drm/qxl/qxl_display.c 			qdev->monitors_config->count = i;
qdev              352 drivers/gpu/drm/qxl/qxl_display.c 	if (head.width  == qdev->monitors_config->heads[i].width  &&
qdev              353 drivers/gpu/drm/qxl/qxl_display.c 	    head.height == qdev->monitors_config->heads[i].height &&
qdev              354 drivers/gpu/drm/qxl/qxl_display.c 	    head.x      == qdev->monitors_config->heads[i].x      &&
qdev              355 drivers/gpu/drm/qxl/qxl_display.c 	    head.y      == qdev->monitors_config->heads[i].y      &&
qdev              356 drivers/gpu/drm/qxl/qxl_display.c 	    oldcount    == qdev->monitors_config->count)
qdev              362 drivers/gpu/drm/qxl/qxl_display.c 	if (oldcount != qdev->monitors_config->count)
qdev              364 drivers/gpu/drm/qxl/qxl_display.c 			      oldcount, qdev->monitors_config->count,
qdev              367 drivers/gpu/drm/qxl/qxl_display.c 	qdev->monitors_config->heads[i] = head;
qdev              368 drivers/gpu/drm/qxl/qxl_display.c 	qdev->monitors_config->max_allowed = qxl_num_crtc;
qdev              369 drivers/gpu/drm/qxl/qxl_display.c 	qxl_send_monitors_config(qdev);
qdev              416 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = fb->dev->dev_private;
qdev              443 drivers/gpu/drm/qxl/qxl_display.c 	qxl_draw_dirty_fb(qdev, fb, qobj, flags, color,
qdev              478 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = plane->dev->dev_private;
qdev              486 drivers/gpu/drm/qxl/qxl_display.c 	return qxl_check_framebuffer(qdev, bo);
qdev              492 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = dev->dev_private;
qdev              502 drivers/gpu/drm/qxl/qxl_display.c 	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
qdev              516 drivers/gpu/drm/qxl/qxl_display.c 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
qdev              521 drivers/gpu/drm/qxl/qxl_display.c 	cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
qdev              524 drivers/gpu/drm/qxl/qxl_display.c 	qxl_release_unmap(qdev, release, &cmd->release_info);
qdev              527 drivers/gpu/drm/qxl/qxl_display.c 	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qdev              532 drivers/gpu/drm/qxl/qxl_display.c 	qxl_release_free(qdev, release);
qdev              539 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = plane->dev->dev_private;
qdev              553 drivers/gpu/drm/qxl/qxl_display.c 		if (qdev->primary_bo)
qdev              554 drivers/gpu/drm/qxl/qxl_display.c 			qxl_io_destroy_primary(qdev);
qdev              555 drivers/gpu/drm/qxl/qxl_display.c 		qxl_io_create_primary(qdev, primary);
qdev              561 drivers/gpu/drm/qxl/qxl_display.c 			qdev->dumb_heads[plane->state->crtc->index].x;
qdev              563 drivers/gpu/drm/qxl/qxl_display.c 	qxl_draw_dirty_fb(qdev, plane->state->fb, bo, 0, 0, &norect, 1, 1,
qdev              570 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = plane->dev->dev_private;
qdev              576 drivers/gpu/drm/qxl/qxl_display.c 			qxl_io_destroy_primary(qdev);
qdev              586 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = dev->dev_private;
qdev              598 drivers/gpu/drm/qxl/qxl_display.c 	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
qdev              613 drivers/gpu/drm/qxl/qxl_display.c 		ret = qxl_alloc_bo_reserved(qdev, release,
qdev              645 drivers/gpu/drm/qxl/qxl_display.c 		cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
qdev              647 drivers/gpu/drm/qxl/qxl_display.c 		cmd->u.set.shape = qxl_bo_physical_address(qdev,
qdev              660 drivers/gpu/drm/qxl/qxl_display.c 		cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
qdev              667 drivers/gpu/drm/qxl/qxl_display.c 	qxl_release_unmap(qdev, release, &cmd->release_info);
qdev              669 drivers/gpu/drm/qxl/qxl_display.c 	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qdev              687 drivers/gpu/drm/qxl/qxl_display.c 	qxl_release_free(qdev, release);
qdev              695 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = plane->dev->dev_private;
qdev              700 drivers/gpu/drm/qxl/qxl_display.c 	ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
qdev              708 drivers/gpu/drm/qxl/qxl_display.c 		qxl_release_free(qdev, release);
qdev              712 drivers/gpu/drm/qxl/qxl_display.c 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
qdev              714 drivers/gpu/drm/qxl/qxl_display.c 	qxl_release_unmap(qdev, release, &cmd->release_info);
qdev              717 drivers/gpu/drm/qxl/qxl_display.c 	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qdev              720 drivers/gpu/drm/qxl/qxl_display.c static void qxl_update_dumb_head(struct qxl_device *qdev,
qdev              725 drivers/gpu/drm/qxl/qxl_display.c 	if (index >= qdev->monitors_config->max_allowed)
qdev              736 drivers/gpu/drm/qxl/qxl_display.c 	if (qdev->dumb_heads[index].width == width &&
qdev              737 drivers/gpu/drm/qxl/qxl_display.c 	    qdev->dumb_heads[index].height == height)
qdev              741 drivers/gpu/drm/qxl/qxl_display.c 		  qdev->dumb_heads[index].width,
qdev              742 drivers/gpu/drm/qxl/qxl_display.c 		  qdev->dumb_heads[index].height,
qdev              744 drivers/gpu/drm/qxl/qxl_display.c 	qdev->dumb_heads[index].width = width;
qdev              745 drivers/gpu/drm/qxl/qxl_display.c 	qdev->dumb_heads[index].height = height;
qdev              748 drivers/gpu/drm/qxl/qxl_display.c static void qxl_calc_dumb_shadow(struct qxl_device *qdev,
qdev              755 drivers/gpu/drm/qxl/qxl_display.c 	for (i = 0; i < qdev->monitors_config->max_allowed; i++) {
qdev              756 drivers/gpu/drm/qxl/qxl_display.c 		head = qdev->dumb_heads + i;
qdev              769 drivers/gpu/drm/qxl/qxl_display.c 	if (!qdev->dumb_shadow_bo ||
qdev              770 drivers/gpu/drm/qxl/qxl_display.c 	    qdev->dumb_shadow_bo->surf.width != surf->width ||
qdev              771 drivers/gpu/drm/qxl/qxl_display.c 	    qdev->dumb_shadow_bo->surf.height != surf->height)
qdev              778 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = plane->dev->dev_private;
qdev              792 drivers/gpu/drm/qxl/qxl_display.c 		qxl_update_dumb_head(qdev, new_state->crtc->index,
qdev              794 drivers/gpu/drm/qxl/qxl_display.c 		qxl_calc_dumb_shadow(qdev, &surf);
qdev              795 drivers/gpu/drm/qxl/qxl_display.c 		if (!qdev->dumb_shadow_bo ||
qdev              796 drivers/gpu/drm/qxl/qxl_display.c 		    qdev->dumb_shadow_bo->surf.width  != surf.width ||
qdev              797 drivers/gpu/drm/qxl/qxl_display.c 		    qdev->dumb_shadow_bo->surf.height != surf.height) {
qdev              798 drivers/gpu/drm/qxl/qxl_display.c 			if (qdev->dumb_shadow_bo) {
qdev              800 drivers/gpu/drm/qxl/qxl_display.c 					(&qdev->dumb_shadow_bo->tbo.base);
qdev              801 drivers/gpu/drm/qxl/qxl_display.c 				qdev->dumb_shadow_bo = NULL;
qdev              803 drivers/gpu/drm/qxl/qxl_display.c 			qxl_bo_create(qdev, surf.height * surf.stride,
qdev              805 drivers/gpu/drm/qxl/qxl_display.c 				      &qdev->dumb_shadow_bo);
qdev              807 drivers/gpu/drm/qxl/qxl_display.c 		if (user_bo->shadow != qdev->dumb_shadow_bo) {
qdev              813 drivers/gpu/drm/qxl/qxl_display.c 			drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base);
qdev              814 drivers/gpu/drm/qxl/qxl_display.c 			user_bo->shadow = qdev->dumb_shadow_bo;
qdev              891 drivers/gpu/drm/qxl/qxl_display.c static struct drm_plane *qxl_create_plane(struct qxl_device *qdev,
qdev              920 drivers/gpu/drm/qxl/qxl_display.c 	err = drm_universal_plane_init(&qdev->ddev, plane, possible_crtcs,
qdev              939 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = dev->dev_private;
qdev              946 drivers/gpu/drm/qxl/qxl_display.c 	primary = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_PRIMARY);
qdev              952 drivers/gpu/drm/qxl/qxl_display.c 	cursor = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_CURSOR);
qdev              981 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = dev->dev_private;
qdev              987 drivers/gpu/drm/qxl/qxl_display.c 	if (qdev->client_monitors_config) {
qdev              989 drivers/gpu/drm/qxl/qxl_display.c 		head = &qdev->client_monitors_config->heads[output->index];
qdev             1007 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = ddev->dev_private;
qdev             1009 drivers/gpu/drm/qxl/qxl_display.c 	if (qxl_check_mode(qdev, mode->hdisplay, mode->vdisplay) != 0)
qdev             1040 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = ddev->dev_private;
qdev             1044 drivers/gpu/drm/qxl/qxl_display.c 	if (!qdev->client_monitors_config) {
qdev             1048 drivers/gpu/drm/qxl/qxl_display.c 		connected = qdev->client_monitors_config->count > output->index &&
qdev             1049 drivers/gpu/drm/qxl/qxl_display.c 		     qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
qdev             1085 drivers/gpu/drm/qxl/qxl_display.c static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
qdev             1087 drivers/gpu/drm/qxl/qxl_display.c 	if (qdev->hotplug_mode_update_property)
qdev             1090 drivers/gpu/drm/qxl/qxl_display.c 	qdev->hotplug_mode_update_property =
qdev             1091 drivers/gpu/drm/qxl/qxl_display.c 		drm_property_create_range(&qdev->ddev, DRM_MODE_PROP_IMMUTABLE,
qdev             1099 drivers/gpu/drm/qxl/qxl_display.c 	struct qxl_device *qdev = dev->dev_private;
qdev             1127 drivers/gpu/drm/qxl/qxl_display.c 				   qdev->hotplug_mode_update_property, 0);
qdev             1150 drivers/gpu/drm/qxl/qxl_display.c int qxl_create_monitors_object(struct qxl_device *qdev)
qdev             1157 drivers/gpu/drm/qxl/qxl_display.c 	ret = qxl_gem_object_create(qdev, monitors_config_size, 0,
qdev             1164 drivers/gpu/drm/qxl/qxl_display.c 	qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
qdev             1166 drivers/gpu/drm/qxl/qxl_display.c 	ret = qxl_bo_pin(qdev->monitors_config_bo);
qdev             1170 drivers/gpu/drm/qxl/qxl_display.c 	qxl_bo_kmap(qdev->monitors_config_bo, NULL);
qdev             1172 drivers/gpu/drm/qxl/qxl_display.c 	qdev->monitors_config = qdev->monitors_config_bo->kptr;
qdev             1173 drivers/gpu/drm/qxl/qxl_display.c 	qdev->ram_header->monitors_config =
qdev             1174 drivers/gpu/drm/qxl/qxl_display.c 		qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
qdev             1176 drivers/gpu/drm/qxl/qxl_display.c 	memset(qdev->monitors_config, 0, monitors_config_size);
qdev             1177 drivers/gpu/drm/qxl/qxl_display.c 	qdev->dumb_heads = kcalloc(qxl_num_crtc, sizeof(qdev->dumb_heads[0]),
qdev             1179 drivers/gpu/drm/qxl/qxl_display.c 	if (!qdev->dumb_heads) {
qdev             1180 drivers/gpu/drm/qxl/qxl_display.c 		qxl_destroy_monitors_object(qdev);
qdev             1186 drivers/gpu/drm/qxl/qxl_display.c int qxl_destroy_monitors_object(struct qxl_device *qdev)
qdev             1190 drivers/gpu/drm/qxl/qxl_display.c 	qdev->monitors_config = NULL;
qdev             1191 drivers/gpu/drm/qxl/qxl_display.c 	qdev->ram_header->monitors_config = 0;
qdev             1193 drivers/gpu/drm/qxl/qxl_display.c 	qxl_bo_kunmap(qdev->monitors_config_bo);
qdev             1194 drivers/gpu/drm/qxl/qxl_display.c 	ret = qxl_bo_unpin(qdev->monitors_config_bo);
qdev             1198 drivers/gpu/drm/qxl/qxl_display.c 	qxl_bo_unref(&qdev->monitors_config_bo);
qdev             1202 drivers/gpu/drm/qxl/qxl_display.c int qxl_modeset_init(struct qxl_device *qdev)
qdev             1207 drivers/gpu/drm/qxl/qxl_display.c 	drm_mode_config_init(&qdev->ddev);
qdev             1209 drivers/gpu/drm/qxl/qxl_display.c 	ret = qxl_create_monitors_object(qdev);
qdev             1213 drivers/gpu/drm/qxl/qxl_display.c 	qdev->ddev.mode_config.funcs = (void *)&qxl_mode_funcs;
qdev             1216 drivers/gpu/drm/qxl/qxl_display.c 	qdev->ddev.mode_config.min_width = 0;
qdev             1217 drivers/gpu/drm/qxl/qxl_display.c 	qdev->ddev.mode_config.min_height = 0;
qdev             1218 drivers/gpu/drm/qxl/qxl_display.c 	qdev->ddev.mode_config.max_width = 8192;
qdev             1219 drivers/gpu/drm/qxl/qxl_display.c 	qdev->ddev.mode_config.max_height = 8192;
qdev             1221 drivers/gpu/drm/qxl/qxl_display.c 	qdev->ddev.mode_config.fb_base = qdev->vram_base;
qdev             1223 drivers/gpu/drm/qxl/qxl_display.c 	drm_mode_create_suggested_offset_properties(&qdev->ddev);
qdev             1224 drivers/gpu/drm/qxl/qxl_display.c 	qxl_mode_create_hotplug_mode_update_property(qdev);
qdev             1227 drivers/gpu/drm/qxl/qxl_display.c 		qdev_crtc_init(&qdev->ddev, i);
qdev             1228 drivers/gpu/drm/qxl/qxl_display.c 		qdev_output_init(&qdev->ddev, i);
qdev             1231 drivers/gpu/drm/qxl/qxl_display.c 	qxl_display_read_client_monitors_config(qdev);
qdev             1233 drivers/gpu/drm/qxl/qxl_display.c 	drm_mode_config_reset(&qdev->ddev);
qdev             1237 drivers/gpu/drm/qxl/qxl_display.c void qxl_modeset_fini(struct qxl_device *qdev)
qdev             1239 drivers/gpu/drm/qxl/qxl_display.c 	qxl_destroy_monitors_object(qdev);
qdev             1240 drivers/gpu/drm/qxl/qxl_display.c 	drm_mode_config_cleanup(&qdev->ddev);
qdev               28 drivers/gpu/drm/qxl/qxl_draw.c static int alloc_clips(struct qxl_device *qdev,
qdev               35 drivers/gpu/drm/qxl/qxl_draw.c 	return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
qdev               41 drivers/gpu/drm/qxl/qxl_draw.c static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
qdev               60 drivers/gpu/drm/qxl/qxl_draw.c alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
qdev               62 drivers/gpu/drm/qxl/qxl_draw.c 	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
qdev               67 drivers/gpu/drm/qxl/qxl_draw.c free_drawable(struct qxl_device *qdev, struct qxl_release *release)
qdev               69 drivers/gpu/drm/qxl/qxl_draw.c 	qxl_release_free(qdev, release);
qdev               74 drivers/gpu/drm/qxl/qxl_draw.c make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
qdev               81 drivers/gpu/drm/qxl/qxl_draw.c 	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
qdev              109 drivers/gpu/drm/qxl/qxl_draw.c 	drawable->mm_time = qdev->rom->mm_clock;
qdev              110 drivers/gpu/drm/qxl/qxl_draw.c 	qxl_release_unmap(qdev, release, &drawable->release_info);
qdev              121 drivers/gpu/drm/qxl/qxl_draw.c void qxl_draw_dirty_fb(struct qxl_device *qdev,
qdev              151 drivers/gpu/drm/qxl/qxl_draw.c 	ret = alloc_drawable(qdev, &release);
qdev              175 drivers/gpu/drm/qxl/qxl_draw.c 	ret = alloc_clips(qdev, release, num_clips, &clips_bo);
qdev              179 drivers/gpu/drm/qxl/qxl_draw.c 	ret = qxl_image_alloc_objects(qdev, release,
qdev              195 drivers/gpu/drm/qxl/qxl_draw.c 	ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
qdev              204 drivers/gpu/drm/qxl/qxl_draw.c 	ret = qxl_image_init(qdev, release, dimage, surface_base,
qdev              211 drivers/gpu/drm/qxl/qxl_draw.c 	rects = drawable_set_clipping(qdev, num_clips, clips_bo);
qdev              216 drivers/gpu/drm/qxl/qxl_draw.c 	drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
qdev              219 drivers/gpu/drm/qxl/qxl_draw.c 	drawable->clip.data = qxl_bo_physical_address(qdev,
qdev              234 drivers/gpu/drm/qxl/qxl_draw.c 	drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
qdev              235 drivers/gpu/drm/qxl/qxl_draw.c 	qxl_release_unmap(qdev, release, &drawable->release_info);
qdev              247 drivers/gpu/drm/qxl/qxl_draw.c 	qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qdev              253 drivers/gpu/drm/qxl/qxl_draw.c 	qxl_image_free_objects(qdev, dimage);
qdev              259 drivers/gpu/drm/qxl/qxl_draw.c 		free_drawable(qdev, release);
qdev               74 drivers/gpu/drm/qxl/qxl_drv.c 	struct qxl_device *qdev;
qdev               83 drivers/gpu/drm/qxl/qxl_drv.c 	qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
qdev               84 drivers/gpu/drm/qxl/qxl_drv.c 	if (!qdev)
qdev              103 drivers/gpu/drm/qxl/qxl_drv.c 	ret = qxl_device_init(qdev, &qxl_driver, pdev);
qdev              107 drivers/gpu/drm/qxl/qxl_drv.c 	ret = qxl_modeset_init(qdev);
qdev              111 drivers/gpu/drm/qxl/qxl_drv.c 	drm_kms_helper_poll_init(&qdev->ddev);
qdev              114 drivers/gpu/drm/qxl/qxl_drv.c 	ret = drm_dev_register(&qdev->ddev, ent->driver_data);
qdev              118 drivers/gpu/drm/qxl/qxl_drv.c 	drm_fbdev_generic_setup(&qdev->ddev, 32);
qdev              122 drivers/gpu/drm/qxl/qxl_drv.c 	qxl_modeset_fini(qdev);
qdev              124 drivers/gpu/drm/qxl/qxl_drv.c 	qxl_device_fini(qdev);
qdev              131 drivers/gpu/drm/qxl/qxl_drv.c 	kfree(qdev);
qdev              139 drivers/gpu/drm/qxl/qxl_drv.c 	struct qxl_device *qdev = dev->dev_private;
qdev              143 drivers/gpu/drm/qxl/qxl_drv.c 	qxl_modeset_fini(qdev);
qdev              144 drivers/gpu/drm/qxl/qxl_drv.c 	qxl_device_fini(qdev);
qdev              149 drivers/gpu/drm/qxl/qxl_drv.c 	kfree(qdev);
qdev              166 drivers/gpu/drm/qxl/qxl_drv.c 	struct qxl_device *qdev = dev->dev_private;
qdev              173 drivers/gpu/drm/qxl/qxl_drv.c 	qxl_destroy_monitors_object(qdev);
qdev              174 drivers/gpu/drm/qxl/qxl_drv.c 	qxl_surf_evict(qdev);
qdev              175 drivers/gpu/drm/qxl/qxl_drv.c 	qxl_vram_evict(qdev);
qdev              177 drivers/gpu/drm/qxl/qxl_drv.c 	while (!qxl_check_idle(qdev->command_ring));
qdev              178 drivers/gpu/drm/qxl/qxl_drv.c 	while (!qxl_check_idle(qdev->release_ring))
qdev              179 drivers/gpu/drm/qxl/qxl_drv.c 		qxl_queue_garbage_collect(qdev, 1);
qdev              188 drivers/gpu/drm/qxl/qxl_drv.c 	struct qxl_device *qdev = dev->dev_private;
qdev              190 drivers/gpu/drm/qxl/qxl_drv.c 	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
qdev              192 drivers/gpu/drm/qxl/qxl_drv.c 		qxl_reinit_memslots(qdev);
qdev              193 drivers/gpu/drm/qxl/qxl_drv.c 		qxl_ring_init_hdr(qdev->release_ring);
qdev              196 drivers/gpu/drm/qxl/qxl_drv.c 	qxl_create_monitors_object(qdev);
qdev              247 drivers/gpu/drm/qxl/qxl_drv.c 	struct qxl_device *qdev = drm_dev->dev_private;
qdev              249 drivers/gpu/drm/qxl/qxl_drv.c 	qxl_io_reset(qdev);
qdev              171 drivers/gpu/drm/qxl/qxl_drv.h 	struct qxl_device *qdev;
qdev              178 drivers/gpu/drm/qxl/qxl_drv.h 	struct qxl_device *qdev;
qdev              281 drivers/gpu/drm/qxl/qxl_drv.h int qxl_device_init(struct qxl_device *qdev, struct drm_driver *drv,
qdev              283 drivers/gpu/drm/qxl/qxl_drv.h void qxl_device_fini(struct qxl_device *qdev);
qdev              285 drivers/gpu/drm/qxl/qxl_drv.h int qxl_modeset_init(struct qxl_device *qdev);
qdev              286 drivers/gpu/drm/qxl/qxl_drv.h void qxl_modeset_fini(struct qxl_device *qdev);
qdev              288 drivers/gpu/drm/qxl/qxl_drv.h int qxl_bo_init(struct qxl_device *qdev);
qdev              289 drivers/gpu/drm/qxl/qxl_drv.h void qxl_bo_fini(struct qxl_device *qdev);
qdev              291 drivers/gpu/drm/qxl/qxl_drv.h void qxl_reinit_memslots(struct qxl_device *qdev);
qdev              292 drivers/gpu/drm/qxl/qxl_drv.h int qxl_surf_evict(struct qxl_device *qdev);
qdev              293 drivers/gpu/drm/qxl/qxl_drv.h int qxl_vram_evict(struct qxl_device *qdev);
qdev              306 drivers/gpu/drm/qxl/qxl_drv.h qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
qdev              311 drivers/gpu/drm/qxl/qxl_drv.h 		? &qdev->main_slot : &qdev->surfaces_slot;
qdev              320 drivers/gpu/drm/qxl/qxl_drv.h void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
qdev              321 drivers/gpu/drm/qxl/qxl_drv.h int qxl_create_monitors_object(struct qxl_device *qdev);
qdev              322 drivers/gpu/drm/qxl/qxl_drv.h int qxl_destroy_monitors_object(struct qxl_device *qdev);
qdev              325 drivers/gpu/drm/qxl/qxl_drv.h void qxl_gem_init(struct qxl_device *qdev);
qdev              326 drivers/gpu/drm/qxl/qxl_drv.h void qxl_gem_fini(struct qxl_device *qdev);
qdev              327 drivers/gpu/drm/qxl/qxl_drv.h int qxl_gem_object_create(struct qxl_device *qdev, int size,
qdev              332 drivers/gpu/drm/qxl/qxl_drv.h int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
qdev              343 drivers/gpu/drm/qxl/qxl_drv.h void qxl_bo_force_delete(struct qxl_device *qdev);
qdev              355 drivers/gpu/drm/qxl/qxl_drv.h int qxl_ttm_init(struct qxl_device *qdev);
qdev              356 drivers/gpu/drm/qxl/qxl_drv.h void qxl_ttm_fini(struct qxl_device *qdev);
qdev              361 drivers/gpu/drm/qxl/qxl_drv.h int qxl_image_init(struct qxl_device *qdev,
qdev              368 drivers/gpu/drm/qxl/qxl_drv.h qxl_image_alloc_objects(struct qxl_device *qdev,
qdev              372 drivers/gpu/drm/qxl/qxl_drv.h void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
qdev              378 drivers/gpu/drm/qxl/qxl_drv.h void qxl_io_create_primary(struct qxl_device *qdev,
qdev              380 drivers/gpu/drm/qxl/qxl_drv.h void qxl_io_destroy_primary(struct qxl_device *qdev);
qdev              381 drivers/gpu/drm/qxl/qxl_drv.h void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
qdev              382 drivers/gpu/drm/qxl/qxl_drv.h void qxl_io_notify_oom(struct qxl_device *qdev);
qdev              384 drivers/gpu/drm/qxl/qxl_drv.h int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
qdev              387 drivers/gpu/drm/qxl/qxl_drv.h void qxl_io_reset(struct qxl_device *qdev);
qdev              388 drivers/gpu/drm/qxl/qxl_drv.h void qxl_io_monitors_config(struct qxl_device *qdev);
qdev              390 drivers/gpu/drm/qxl/qxl_drv.h void qxl_io_flush_release(struct qxl_device *qdev);
qdev              391 drivers/gpu/drm/qxl/qxl_drv.h void qxl_io_flush_surfaces(struct qxl_device *qdev);
qdev              393 drivers/gpu/drm/qxl/qxl_drv.h union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
qdev              395 drivers/gpu/drm/qxl/qxl_drv.h void qxl_release_unmap(struct qxl_device *qdev,
qdev              403 drivers/gpu/drm/qxl/qxl_drv.h int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
qdev              407 drivers/gpu/drm/qxl/qxl_drv.h int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
qdev              412 drivers/gpu/drm/qxl/qxl_drv.h qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
qdev              415 drivers/gpu/drm/qxl/qxl_drv.h qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
qdev              417 drivers/gpu/drm/qxl/qxl_drv.h int qxl_alloc_bo_reserved(struct qxl_device *qdev,
qdev              423 drivers/gpu/drm/qxl/qxl_drv.h void qxl_draw_dirty_fb(struct qxl_device *qdev,
qdev              431 drivers/gpu/drm/qxl/qxl_drv.h void qxl_release_free(struct qxl_device *qdev,
qdev              435 drivers/gpu/drm/qxl/qxl_drv.h struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
qdev              438 drivers/gpu/drm/qxl/qxl_drv.h bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush);
qdev              439 drivers/gpu/drm/qxl/qxl_drv.h int qxl_garbage_collect(struct qxl_device *qdev);
qdev              444 drivers/gpu/drm/qxl/qxl_drv.h int qxl_ttm_debugfs_init(struct qxl_device *qdev);
qdev              459 drivers/gpu/drm/qxl/qxl_drv.h int qxl_irq_init(struct qxl_device *qdev);
qdev              462 drivers/gpu/drm/qxl/qxl_drv.h int qxl_debugfs_add_files(struct qxl_device *qdev,
qdev              466 drivers/gpu/drm/qxl/qxl_drv.h int qxl_surface_id_alloc(struct qxl_device *qdev,
qdev              468 drivers/gpu/drm/qxl/qxl_drv.h void qxl_surface_id_dealloc(struct qxl_device *qdev,
qdev              470 drivers/gpu/drm/qxl/qxl_drv.h int qxl_hw_surface_alloc(struct qxl_device *qdev,
qdev              472 drivers/gpu/drm/qxl/qxl_drv.h int qxl_hw_surface_dealloc(struct qxl_device *qdev,
qdev              475 drivers/gpu/drm/qxl/qxl_drv.h int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
qdev              479 drivers/gpu/drm/qxl/qxl_drv.h void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
qdev               35 drivers/gpu/drm/qxl/qxl_dumb.c 	struct qxl_device *qdev = dev->dev_private;
qdev               61 drivers/gpu/drm/qxl/qxl_dumb.c 	r = qxl_gem_object_create_with_handle(qdev, file_priv,
qdev               34 drivers/gpu/drm/qxl/qxl_gem.c 	struct qxl_device *qdev;
qdev               37 drivers/gpu/drm/qxl/qxl_gem.c 	qdev = (struct qxl_device *)gobj->dev->dev_private;
qdev               39 drivers/gpu/drm/qxl/qxl_gem.c 	qxl_surface_evict(qdev, qobj, false);
qdev               45 drivers/gpu/drm/qxl/qxl_gem.c int qxl_gem_object_create(struct qxl_device *qdev, int size,
qdev               58 drivers/gpu/drm/qxl/qxl_gem.c 	r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
qdev               68 drivers/gpu/drm/qxl/qxl_gem.c 	mutex_lock(&qdev->gem.mutex);
qdev               69 drivers/gpu/drm/qxl/qxl_gem.c 	list_add_tail(&qbo->list, &qdev->gem.objects);
qdev               70 drivers/gpu/drm/qxl/qxl_gem.c 	mutex_unlock(&qdev->gem.mutex);
qdev               75 drivers/gpu/drm/qxl/qxl_gem.c int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
qdev               89 drivers/gpu/drm/qxl/qxl_gem.c 	r = qxl_gem_object_create(qdev, size, 0,
qdev              114 drivers/gpu/drm/qxl/qxl_gem.c void qxl_gem_init(struct qxl_device *qdev)
qdev              116 drivers/gpu/drm/qxl/qxl_gem.c 	INIT_LIST_HEAD(&qdev->gem.objects);
qdev              119 drivers/gpu/drm/qxl/qxl_gem.c void qxl_gem_fini(struct qxl_device *qdev)
qdev              121 drivers/gpu/drm/qxl/qxl_gem.c 	qxl_bo_force_delete(qdev);
qdev               33 drivers/gpu/drm/qxl/qxl_image.c qxl_allocate_chunk(struct qxl_device *qdev,
qdev               45 drivers/gpu/drm/qxl/qxl_image.c 	ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
qdev               56 drivers/gpu/drm/qxl/qxl_image.c qxl_image_alloc_objects(struct qxl_device *qdev,
qdev               70 drivers/gpu/drm/qxl/qxl_image.c 	ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
qdev               76 drivers/gpu/drm/qxl/qxl_image.c 	ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
qdev               86 drivers/gpu/drm/qxl/qxl_image.c void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
qdev              100 drivers/gpu/drm/qxl/qxl_image.c qxl_image_init_helper(struct qxl_device *qdev,
qdev              127 drivers/gpu/drm/qxl/qxl_image.c 	ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
qdev              132 drivers/gpu/drm/qxl/qxl_image.c 	qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
qdev              146 drivers/gpu/drm/qxl/qxl_image.c 				ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
qdev              160 drivers/gpu/drm/qxl/qxl_image.c 				qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
qdev              178 drivers/gpu/drm/qxl/qxl_image.c 					ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
qdev              181 drivers/gpu/drm/qxl/qxl_image.c 					qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
qdev              192 drivers/gpu/drm/qxl/qxl_image.c 	ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
qdev              215 drivers/gpu/drm/qxl/qxl_image.c 		qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
qdev              223 drivers/gpu/drm/qxl/qxl_image.c 	image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
qdev              225 drivers/gpu/drm/qxl/qxl_image.c 	qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
qdev              230 drivers/gpu/drm/qxl/qxl_image.c int qxl_image_init(struct qxl_device *qdev,
qdev              238 drivers/gpu/drm/qxl/qxl_image.c 	return qxl_image_init_helper(qdev, release, dimage, data,
qdev               39 drivers/gpu/drm/qxl/qxl_ioctl.c 	struct qxl_device *qdev = dev->dev_private;
qdev               50 drivers/gpu/drm/qxl/qxl_ioctl.c 	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
qdev               67 drivers/gpu/drm/qxl/qxl_ioctl.c 	struct qxl_device *qdev = dev->dev_private;
qdev               70 drivers/gpu/drm/qxl/qxl_ioctl.c 	return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
qdev               88 drivers/gpu/drm/qxl/qxl_ioctl.c apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
qdev               92 drivers/gpu/drm/qxl/qxl_ioctl.c 	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
qdev               93 drivers/gpu/drm/qxl/qxl_ioctl.c 	*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
qdev               96 drivers/gpu/drm/qxl/qxl_ioctl.c 	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
qdev              100 drivers/gpu/drm/qxl/qxl_ioctl.c apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
qdev              108 drivers/gpu/drm/qxl/qxl_ioctl.c 	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
qdev              110 drivers/gpu/drm/qxl/qxl_ioctl.c 	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
qdev              142 drivers/gpu/drm/qxl/qxl_ioctl.c static int qxl_process_single_command(struct qxl_device *qdev,
qdev              178 drivers/gpu/drm/qxl/qxl_ioctl.c 	ret = qxl_alloc_release_reserved(qdev,
qdev              188 drivers/gpu/drm/qxl/qxl_ioctl.c 	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
qdev              196 drivers/gpu/drm/qxl/qxl_ioctl.c 		draw->mm_time = qdev->rom->mm_clock;
qdev              199 drivers/gpu/drm/qxl/qxl_ioctl.c 	qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
qdev              259 drivers/gpu/drm/qxl/qxl_ioctl.c 			apply_reloc(qdev, &reloc_info[i]);
qdev              261 drivers/gpu/drm/qxl/qxl_ioctl.c 			apply_surf_reloc(qdev, &reloc_info[i]);
qdev              265 drivers/gpu/drm/qxl/qxl_ioctl.c 	ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
qdev              270 drivers/gpu/drm/qxl/qxl_ioctl.c 		qxl_release_free(qdev, release);
qdev              279 drivers/gpu/drm/qxl/qxl_ioctl.c 	struct qxl_device *qdev = dev->dev_private;
qdev              294 drivers/gpu/drm/qxl/qxl_ioctl.c 		ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
qdev              304 drivers/gpu/drm/qxl/qxl_ioctl.c 	struct qxl_device *qdev = dev->dev_private;
qdev              336 drivers/gpu/drm/qxl/qxl_ioctl.c 	ret = qxl_bo_check_id(qdev, qobj);
qdev              341 drivers/gpu/drm/qxl/qxl_ioctl.c 	ret = qxl_io_update_area(qdev, qobj, &area);
qdev              354 drivers/gpu/drm/qxl/qxl_ioctl.c 	struct qxl_device *qdev = dev->dev_private;
qdev              359 drivers/gpu/drm/qxl/qxl_ioctl.c 		param->value = qdev->rom->n_surfaces;
qdev              373 drivers/gpu/drm/qxl/qxl_ioctl.c 	struct qxl_device *qdev = dev->dev_private;
qdev              386 drivers/gpu/drm/qxl/qxl_ioctl.c 	if (qdev->rom->client_capabilities[byte] & (1 << idx))
qdev              394 drivers/gpu/drm/qxl/qxl_ioctl.c 	struct qxl_device *qdev = dev->dev_private;
qdev              412 drivers/gpu/drm/qxl/qxl_ioctl.c 	ret = qxl_gem_object_create_with_handle(qdev, file,
qdev               35 drivers/gpu/drm/qxl/qxl_irq.c 	struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
qdev               38 drivers/gpu/drm/qxl/qxl_irq.c 	pending = xchg(&qdev->ram_header->int_pending, 0);
qdev               43 drivers/gpu/drm/qxl/qxl_irq.c 	atomic_inc(&qdev->irq_received);
qdev               46 drivers/gpu/drm/qxl/qxl_irq.c 		atomic_inc(&qdev->irq_received_display);
qdev               47 drivers/gpu/drm/qxl/qxl_irq.c 		wake_up_all(&qdev->display_event);
qdev               48 drivers/gpu/drm/qxl/qxl_irq.c 		qxl_queue_garbage_collect(qdev, false);
qdev               51 drivers/gpu/drm/qxl/qxl_irq.c 		atomic_inc(&qdev->irq_received_cursor);
qdev               52 drivers/gpu/drm/qxl/qxl_irq.c 		wake_up_all(&qdev->cursor_event);
qdev               55 drivers/gpu/drm/qxl/qxl_irq.c 		atomic_inc(&qdev->irq_received_io_cmd);
qdev               56 drivers/gpu/drm/qxl/qxl_irq.c 		wake_up_all(&qdev->io_cmd_event);
qdev               63 drivers/gpu/drm/qxl/qxl_irq.c 		qdev->irq_received_error++;
qdev               67 drivers/gpu/drm/qxl/qxl_irq.c 		schedule_work(&qdev->client_monitors_config_work);
qdev               69 drivers/gpu/drm/qxl/qxl_irq.c 	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
qdev               70 drivers/gpu/drm/qxl/qxl_irq.c 	outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
qdev               76 drivers/gpu/drm/qxl/qxl_irq.c 	struct qxl_device *qdev = container_of(work, struct qxl_device,
qdev               79 drivers/gpu/drm/qxl/qxl_irq.c 	qxl_display_read_client_monitors_config(qdev);
qdev               82 drivers/gpu/drm/qxl/qxl_irq.c int qxl_irq_init(struct qxl_device *qdev)
qdev               86 drivers/gpu/drm/qxl/qxl_irq.c 	init_waitqueue_head(&qdev->display_event);
qdev               87 drivers/gpu/drm/qxl/qxl_irq.c 	init_waitqueue_head(&qdev->cursor_event);
qdev               88 drivers/gpu/drm/qxl/qxl_irq.c 	init_waitqueue_head(&qdev->io_cmd_event);
qdev               89 drivers/gpu/drm/qxl/qxl_irq.c 	INIT_WORK(&qdev->client_monitors_config_work,
qdev               91 drivers/gpu/drm/qxl/qxl_irq.c 	atomic_set(&qdev->irq_received, 0);
qdev               92 drivers/gpu/drm/qxl/qxl_irq.c 	atomic_set(&qdev->irq_received_display, 0);
qdev               93 drivers/gpu/drm/qxl/qxl_irq.c 	atomic_set(&qdev->irq_received_cursor, 0);
qdev               94 drivers/gpu/drm/qxl/qxl_irq.c 	atomic_set(&qdev->irq_received_io_cmd, 0);
qdev               95 drivers/gpu/drm/qxl/qxl_irq.c 	qdev->irq_received_error = 0;
qdev               96 drivers/gpu/drm/qxl/qxl_irq.c 	ret = drm_irq_install(&qdev->ddev, qdev->ddev.pdev->irq);
qdev               97 drivers/gpu/drm/qxl/qxl_irq.c 	qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
qdev               37 drivers/gpu/drm/qxl/qxl_kms.c static bool qxl_check_device(struct qxl_device *qdev)
qdev               39 drivers/gpu/drm/qxl/qxl_kms.c 	struct qxl_rom *rom = qdev->rom;
qdev               54 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->vram_size = rom->surface0_area_size;
qdev               59 drivers/gpu/drm/qxl/qxl_kms.c static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot)
qdev               61 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
qdev               62 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size;
qdev               63 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index);
qdev               66 drivers/gpu/drm/qxl/qxl_kms.c static void setup_slot(struct qxl_device *qdev,
qdev               80 drivers/gpu/drm/qxl/qxl_kms.c 	setup_hw_slot(qdev, slot);
qdev               82 drivers/gpu/drm/qxl/qxl_kms.c 	slot->generation = qdev->rom->slot_generation;
qdev               83 drivers/gpu/drm/qxl/qxl_kms.c 	high_bits = (qdev->rom->slots_start + slot->index)
qdev               84 drivers/gpu/drm/qxl/qxl_kms.c 		<< qdev->rom->slot_gen_bits;
qdev               86 drivers/gpu/drm/qxl/qxl_kms.c 	high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits));
qdev               96 drivers/gpu/drm/qxl/qxl_kms.c void qxl_reinit_memslots(struct qxl_device *qdev)
qdev               98 drivers/gpu/drm/qxl/qxl_kms.c 	setup_hw_slot(qdev, &qdev->main_slot);
qdev               99 drivers/gpu/drm/qxl/qxl_kms.c 	setup_hw_slot(qdev, &qdev->surfaces_slot);
qdev              104 drivers/gpu/drm/qxl/qxl_kms.c 	struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
qdev              106 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_garbage_collect(qdev);
qdev              109 drivers/gpu/drm/qxl/qxl_kms.c int qxl_device_init(struct qxl_device *qdev,
qdev              115 drivers/gpu/drm/qxl/qxl_kms.c 	r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
qdev              121 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->ddev.pdev = pdev;
qdev              122 drivers/gpu/drm/qxl/qxl_kms.c 	pci_set_drvdata(pdev, &qdev->ddev);
qdev              123 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->ddev.dev_private = qdev;
qdev              125 drivers/gpu/drm/qxl/qxl_kms.c 	mutex_init(&qdev->gem.mutex);
qdev              126 drivers/gpu/drm/qxl/qxl_kms.c 	mutex_init(&qdev->update_area_mutex);
qdev              127 drivers/gpu/drm/qxl/qxl_kms.c 	mutex_init(&qdev->release_mutex);
qdev              128 drivers/gpu/drm/qxl/qxl_kms.c 	mutex_init(&qdev->surf_evict_mutex);
qdev              129 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_gem_init(qdev);
qdev              131 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->rom_base = pci_resource_start(pdev, 2);
qdev              132 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->rom_size = pci_resource_len(pdev, 2);
qdev              133 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->vram_base = pci_resource_start(pdev, 0);
qdev              134 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->io_base = pci_resource_start(pdev, 3);
qdev              136 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
qdev              137 drivers/gpu/drm/qxl/qxl_kms.c 	if (!qdev->vram_mapping) {
qdev              146 drivers/gpu/drm/qxl/qxl_kms.c 		qdev->surfaceram_base = pci_resource_start(pdev, sb);
qdev              147 drivers/gpu/drm/qxl/qxl_kms.c 		qdev->surfaceram_size = pci_resource_len(pdev, sb);
qdev              148 drivers/gpu/drm/qxl/qxl_kms.c 		qdev->surface_mapping =
qdev              149 drivers/gpu/drm/qxl/qxl_kms.c 			io_mapping_create_wc(qdev->surfaceram_base,
qdev              150 drivers/gpu/drm/qxl/qxl_kms.c 					     qdev->surfaceram_size);
qdev              152 drivers/gpu/drm/qxl/qxl_kms.c 	if (qdev->surface_mapping == NULL) {
qdev              155 drivers/gpu/drm/qxl/qxl_kms.c 		qdev->surfaceram_base = pci_resource_start(pdev, sb);
qdev              156 drivers/gpu/drm/qxl/qxl_kms.c 		qdev->surfaceram_size = pci_resource_len(pdev, sb);
qdev              157 drivers/gpu/drm/qxl/qxl_kms.c 		qdev->surface_mapping =
qdev              158 drivers/gpu/drm/qxl/qxl_kms.c 			io_mapping_create_wc(qdev->surfaceram_base,
qdev              159 drivers/gpu/drm/qxl/qxl_kms.c 					     qdev->surfaceram_size);
qdev              160 drivers/gpu/drm/qxl/qxl_kms.c 		if (!qdev->surface_mapping) {
qdev              168 drivers/gpu/drm/qxl/qxl_kms.c 		 (unsigned long long)qdev->vram_base,
qdev              172 drivers/gpu/drm/qxl/qxl_kms.c 		 (unsigned long long)qdev->surfaceram_base,
qdev              174 drivers/gpu/drm/qxl/qxl_kms.c 		 (int)qdev->surfaceram_size / 1024 / 1024,
qdev              175 drivers/gpu/drm/qxl/qxl_kms.c 		 (int)qdev->surfaceram_size / 1024,
qdev              178 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
qdev              179 drivers/gpu/drm/qxl/qxl_kms.c 	if (!qdev->rom) {
qdev              185 drivers/gpu/drm/qxl/qxl_kms.c 	if (!qxl_check_device(qdev)) {
qdev              190 drivers/gpu/drm/qxl/qxl_kms.c 	r = qxl_bo_init(qdev);
qdev              196 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->ram_header = ioremap(qdev->vram_base +
qdev              197 drivers/gpu/drm/qxl/qxl_kms.c 				   qdev->rom->ram_header_offset,
qdev              198 drivers/gpu/drm/qxl/qxl_kms.c 				   sizeof(*qdev->ram_header));
qdev              199 drivers/gpu/drm/qxl/qxl_kms.c 	if (!qdev->ram_header) {
qdev              205 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
qdev              208 drivers/gpu/drm/qxl/qxl_kms.c 					     qdev->io_base + QXL_IO_NOTIFY_CMD,
qdev              210 drivers/gpu/drm/qxl/qxl_kms.c 					     &qdev->display_event);
qdev              211 drivers/gpu/drm/qxl/qxl_kms.c 	if (!qdev->command_ring) {
qdev              217 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->cursor_ring = qxl_ring_create(
qdev              218 drivers/gpu/drm/qxl/qxl_kms.c 				&(qdev->ram_header->cursor_ring_hdr),
qdev              221 drivers/gpu/drm/qxl/qxl_kms.c 				qdev->io_base + QXL_IO_NOTIFY_CMD,
qdev              223 drivers/gpu/drm/qxl/qxl_kms.c 				&qdev->cursor_event);
qdev              225 drivers/gpu/drm/qxl/qxl_kms.c 	if (!qdev->cursor_ring) {
qdev              231 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->release_ring = qxl_ring_create(
qdev              232 drivers/gpu/drm/qxl/qxl_kms.c 				&(qdev->ram_header->release_ring_hdr),
qdev              237 drivers/gpu/drm/qxl/qxl_kms.c 	if (!qdev->release_ring) {
qdev              243 drivers/gpu/drm/qxl/qxl_kms.c 	idr_init(&qdev->release_idr);
qdev              244 drivers/gpu/drm/qxl/qxl_kms.c 	spin_lock_init(&qdev->release_idr_lock);
qdev              245 drivers/gpu/drm/qxl/qxl_kms.c 	spin_lock_init(&qdev->release_lock);
qdev              247 drivers/gpu/drm/qxl/qxl_kms.c 	idr_init(&qdev->surf_id_idr);
qdev              248 drivers/gpu/drm/qxl/qxl_kms.c 	spin_lock_init(&qdev->surf_id_idr_lock);
qdev              250 drivers/gpu/drm/qxl/qxl_kms.c 	mutex_init(&qdev->async_io_mutex);
qdev              254 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_io_reset(qdev);
qdev              257 drivers/gpu/drm/qxl/qxl_kms.c 	r = qxl_irq_init(qdev);
qdev              267 drivers/gpu/drm/qxl/qxl_kms.c 	setup_slot(qdev, &qdev->main_slot, 0, "main",
qdev              268 drivers/gpu/drm/qxl/qxl_kms.c 		   (unsigned long)qdev->vram_base,
qdev              269 drivers/gpu/drm/qxl/qxl_kms.c 		   (unsigned long)qdev->rom->ram_header_offset);
qdev              270 drivers/gpu/drm/qxl/qxl_kms.c 	setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces",
qdev              271 drivers/gpu/drm/qxl/qxl_kms.c 		   (unsigned long)qdev->surfaceram_base,
qdev              272 drivers/gpu/drm/qxl/qxl_kms.c 		   (unsigned long)qdev->surfaceram_size);
qdev              274 drivers/gpu/drm/qxl/qxl_kms.c 	INIT_WORK(&qdev->gc_work, qxl_gc_work);
qdev              279 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_ring_free(qdev->release_ring);
qdev              281 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_ring_free(qdev->cursor_ring);
qdev              283 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_ring_free(qdev->command_ring);
qdev              285 drivers/gpu/drm/qxl/qxl_kms.c 	iounmap(qdev->ram_header);
qdev              287 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_bo_fini(qdev);
qdev              289 drivers/gpu/drm/qxl/qxl_kms.c 	iounmap(qdev->rom);
qdev              291 drivers/gpu/drm/qxl/qxl_kms.c 	io_mapping_free(qdev->surface_mapping);
qdev              293 drivers/gpu/drm/qxl/qxl_kms.c 	io_mapping_free(qdev->vram_mapping);
qdev              298 drivers/gpu/drm/qxl/qxl_kms.c void qxl_device_fini(struct qxl_device *qdev)
qdev              300 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_bo_unref(&qdev->current_release_bo[0]);
qdev              301 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_bo_unref(&qdev->current_release_bo[1]);
qdev              302 drivers/gpu/drm/qxl/qxl_kms.c 	flush_work(&qdev->gc_work);
qdev              303 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_ring_free(qdev->command_ring);
qdev              304 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_ring_free(qdev->cursor_ring);
qdev              305 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_ring_free(qdev->release_ring);
qdev              306 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_gem_fini(qdev);
qdev              307 drivers/gpu/drm/qxl/qxl_kms.c 	qxl_bo_fini(qdev);
qdev              308 drivers/gpu/drm/qxl/qxl_kms.c 	io_mapping_free(qdev->surface_mapping);
qdev              309 drivers/gpu/drm/qxl/qxl_kms.c 	io_mapping_free(qdev->vram_mapping);
qdev              310 drivers/gpu/drm/qxl/qxl_kms.c 	iounmap(qdev->ram_header);
qdev              311 drivers/gpu/drm/qxl/qxl_kms.c 	iounmap(qdev->rom);
qdev              312 drivers/gpu/drm/qxl/qxl_kms.c 	qdev->rom = NULL;
qdev               33 drivers/gpu/drm/qxl/qxl_object.c 	struct qxl_device *qdev;
qdev               36 drivers/gpu/drm/qxl/qxl_object.c 	qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private;
qdev               38 drivers/gpu/drm/qxl/qxl_object.c 	qxl_surface_evict(qdev, bo, false);
qdev               40 drivers/gpu/drm/qxl/qxl_object.c 	mutex_lock(&qdev->gem.mutex);
qdev               42 drivers/gpu/drm/qxl/qxl_object.c 	mutex_unlock(&qdev->gem.mutex);
qdev               80 drivers/gpu/drm/qxl/qxl_object.c int qxl_bo_create(struct qxl_device *qdev,
qdev               98 drivers/gpu/drm/qxl/qxl_object.c 	r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
qdev              113 drivers/gpu/drm/qxl/qxl_object.c 	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
qdev              118 drivers/gpu/drm/qxl/qxl_object.c 			dev_err(qdev->ddev.dev,
qdev              148 drivers/gpu/drm/qxl/qxl_object.c void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
qdev              157 drivers/gpu/drm/qxl/qxl_object.c 		map = qdev->vram_mapping;
qdev              159 drivers/gpu/drm/qxl/qxl_object.c 		map = qdev->surface_mapping;
qdev              193 drivers/gpu/drm/qxl/qxl_object.c void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
qdev              304 drivers/gpu/drm/qxl/qxl_object.c void qxl_bo_force_delete(struct qxl_device *qdev)
qdev              308 drivers/gpu/drm/qxl/qxl_object.c 	if (list_empty(&qdev->gem.objects))
qdev              310 drivers/gpu/drm/qxl/qxl_object.c 	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
qdev              311 drivers/gpu/drm/qxl/qxl_object.c 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
qdev              312 drivers/gpu/drm/qxl/qxl_object.c 		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
qdev              315 drivers/gpu/drm/qxl/qxl_object.c 		mutex_lock(&qdev->gem.mutex);
qdev              317 drivers/gpu/drm/qxl/qxl_object.c 		mutex_unlock(&qdev->gem.mutex);
qdev              323 drivers/gpu/drm/qxl/qxl_object.c int qxl_bo_init(struct qxl_device *qdev)
qdev              325 drivers/gpu/drm/qxl/qxl_object.c 	return qxl_ttm_init(qdev);
qdev              328 drivers/gpu/drm/qxl/qxl_object.c void qxl_bo_fini(struct qxl_device *qdev)
qdev              330 drivers/gpu/drm/qxl/qxl_object.c 	qxl_ttm_fini(qdev);
qdev              333 drivers/gpu/drm/qxl/qxl_object.c int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
qdev              339 drivers/gpu/drm/qxl/qxl_object.c 		ret = qxl_surface_id_alloc(qdev, bo);
qdev              343 drivers/gpu/drm/qxl/qxl_object.c 		ret = qxl_hw_surface_alloc(qdev, bo);
qdev              350 drivers/gpu/drm/qxl/qxl_object.c int qxl_surf_evict(struct qxl_device *qdev)
qdev              352 drivers/gpu/drm/qxl/qxl_object.c 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
qdev              355 drivers/gpu/drm/qxl/qxl_object.c int qxl_vram_evict(struct qxl_device *qdev)
qdev              357 drivers/gpu/drm/qxl/qxl_object.c 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
qdev               89 drivers/gpu/drm/qxl/qxl_object.h extern int qxl_bo_create(struct qxl_device *qdev,
qdev               96 drivers/gpu/drm/qxl/qxl_object.h void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
qdev               97 drivers/gpu/drm/qxl/qxl_object.h void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
qdev               60 drivers/gpu/drm/qxl/qxl_release.c 	struct qxl_device *qdev;
qdev               66 drivers/gpu/drm/qxl/qxl_release.c 	qdev = container_of(fence->lock, struct qxl_device, release_lock);
qdev               76 drivers/gpu/drm/qxl/qxl_release.c 	qxl_io_notify_oom(qdev);
qdev               79 drivers/gpu/drm/qxl/qxl_release.c 		if (!qxl_queue_garbage_collect(qdev, true))
qdev              124 drivers/gpu/drm/qxl/qxl_release.c qxl_release_alloc(struct qxl_device *qdev, int type,
qdev              143 drivers/gpu/drm/qxl/qxl_release.c 	spin_lock(&qdev->release_idr_lock);
qdev              144 drivers/gpu/drm/qxl/qxl_release.c 	handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
qdev              145 drivers/gpu/drm/qxl/qxl_release.c 	release->base.seqno = ++qdev->release_seqno;
qdev              146 drivers/gpu/drm/qxl/qxl_release.c 	spin_unlock(&qdev->release_idr_lock);
qdev              177 drivers/gpu/drm/qxl/qxl_release.c qxl_release_free(struct qxl_device *qdev,
qdev              183 drivers/gpu/drm/qxl/qxl_release.c 		qxl_surface_id_dealloc(qdev, release->surface_release_id);
qdev              185 drivers/gpu/drm/qxl/qxl_release.c 	spin_lock(&qdev->release_idr_lock);
qdev              186 drivers/gpu/drm/qxl/qxl_release.c 	idr_remove(&qdev->release_idr, release->id);
qdev              187 drivers/gpu/drm/qxl/qxl_release.c 	spin_unlock(&qdev->release_idr_lock);
qdev              201 drivers/gpu/drm/qxl/qxl_release.c static int qxl_release_bo_alloc(struct qxl_device *qdev,
qdev              205 drivers/gpu/drm/qxl/qxl_release.c 	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
qdev              289 drivers/gpu/drm/qxl/qxl_release.c int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
qdev              300 drivers/gpu/drm/qxl/qxl_release.c 		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
qdev              310 drivers/gpu/drm/qxl/qxl_release.c 		info = qxl_release_map(qdev, *release);
qdev              312 drivers/gpu/drm/qxl/qxl_release.c 		qxl_release_unmap(qdev, *release, info);
qdev              316 drivers/gpu/drm/qxl/qxl_release.c 	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
qdev              320 drivers/gpu/drm/qxl/qxl_release.c int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
qdev              341 drivers/gpu/drm/qxl/qxl_release.c 	idr_ret = qxl_release_alloc(qdev, type, release);
qdev              348 drivers/gpu/drm/qxl/qxl_release.c 	mutex_lock(&qdev->release_mutex);
qdev              349 drivers/gpu/drm/qxl/qxl_release.c 	if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
qdev              350 drivers/gpu/drm/qxl/qxl_release.c 		qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
qdev              351 drivers/gpu/drm/qxl/qxl_release.c 		qdev->current_release_bo_offset[cur_idx] = 0;
qdev              352 drivers/gpu/drm/qxl/qxl_release.c 		qdev->current_release_bo[cur_idx] = NULL;
qdev              354 drivers/gpu/drm/qxl/qxl_release.c 	if (!qdev->current_release_bo[cur_idx]) {
qdev              355 drivers/gpu/drm/qxl/qxl_release.c 		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
qdev              357 drivers/gpu/drm/qxl/qxl_release.c 			mutex_unlock(&qdev->release_mutex);
qdev              358 drivers/gpu/drm/qxl/qxl_release.c 			qxl_release_free(qdev, *release);
qdev              363 drivers/gpu/drm/qxl/qxl_release.c 	bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
qdev              366 drivers/gpu/drm/qxl/qxl_release.c 	(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
qdev              367 drivers/gpu/drm/qxl/qxl_release.c 	qdev->current_release_bo_offset[cur_idx]++;
qdev              372 drivers/gpu/drm/qxl/qxl_release.c 	mutex_unlock(&qdev->release_mutex);
qdev              377 drivers/gpu/drm/qxl/qxl_release.c 		qxl_release_free(qdev, *release);
qdev              381 drivers/gpu/drm/qxl/qxl_release.c 	info = qxl_release_map(qdev, *release);
qdev              383 drivers/gpu/drm/qxl/qxl_release.c 	qxl_release_unmap(qdev, *release, info);
qdev              388 drivers/gpu/drm/qxl/qxl_release.c struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
qdev              393 drivers/gpu/drm/qxl/qxl_release.c 	spin_lock(&qdev->release_idr_lock);
qdev              394 drivers/gpu/drm/qxl/qxl_release.c 	release = idr_find(&qdev->release_idr, id);
qdev              395 drivers/gpu/drm/qxl/qxl_release.c 	spin_unlock(&qdev->release_idr_lock);
qdev              404 drivers/gpu/drm/qxl/qxl_release.c union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
qdev              411 drivers/gpu/drm/qxl/qxl_release.c 	ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
qdev              418 drivers/gpu/drm/qxl/qxl_release.c void qxl_release_unmap(struct qxl_device *qdev,
qdev              426 drivers/gpu/drm/qxl/qxl_release.c 	qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
qdev              435 drivers/gpu/drm/qxl/qxl_release.c 	struct qxl_device *qdev;
qdev              444 drivers/gpu/drm/qxl/qxl_release.c 	qdev = container_of(bdev, struct qxl_device, mman.bdev);
qdev              450 drivers/gpu/drm/qxl/qxl_release.c 	dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
qdev               44 drivers/gpu/drm/qxl/qxl_ttm.c 	struct qxl_device *qdev;
qdev               47 drivers/gpu/drm/qxl/qxl_ttm.c 	qdev = container_of(mman, struct qxl_device, mman);
qdev               48 drivers/gpu/drm/qxl/qxl_ttm.c 	return qdev;
qdev               70 drivers/gpu/drm/qxl/qxl_ttm.c 	struct qxl_device *qdev = file_priv->minor->dev->dev_private;
qdev               72 drivers/gpu/drm/qxl/qxl_ttm.c 	if (qdev == NULL) {
qdev               80 drivers/gpu/drm/qxl/qxl_ttm.c 	r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
qdev              100 drivers/gpu/drm/qxl/qxl_ttm.c 	struct qxl_device *qdev = qxl_get_qdev(bdev);
qdev              102 drivers/gpu/drm/qxl/qxl_ttm.c 		64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits + 8);
qdev              116 drivers/gpu/drm/qxl/qxl_ttm.c 			&qdev->main_slot : &qdev->surfaces_slot;
qdev              166 drivers/gpu/drm/qxl/qxl_ttm.c 	struct qxl_device *qdev = qxl_get_qdev(bdev);
qdev              181 drivers/gpu/drm/qxl/qxl_ttm.c 		mem->bus.base = qdev->vram_base;
qdev              186 drivers/gpu/drm/qxl/qxl_ttm.c 		mem->bus.base = qdev->surfaceram_base;
qdev              205 drivers/gpu/drm/qxl/qxl_ttm.c 	struct qxl_device		*qdev;
qdev              246 drivers/gpu/drm/qxl/qxl_ttm.c 	struct qxl_device *qdev;
qdev              249 drivers/gpu/drm/qxl/qxl_ttm.c 	qdev = qxl_get_qdev(bo->bdev);
qdev              254 drivers/gpu/drm/qxl/qxl_ttm.c 	gtt->qdev = qdev;
qdev              295 drivers/gpu/drm/qxl/qxl_ttm.c 	struct qxl_device *qdev;
qdev              300 drivers/gpu/drm/qxl/qxl_ttm.c 	qdev = qbo->tbo.base.dev->dev_private;
qdev              303 drivers/gpu/drm/qxl/qxl_ttm.c 		qxl_surface_evict(qdev, qbo, new_mem ? true : false);
qdev              319 drivers/gpu/drm/qxl/qxl_ttm.c int qxl_ttm_init(struct qxl_device *qdev)
qdev              325 drivers/gpu/drm/qxl/qxl_ttm.c 	r = ttm_bo_device_init(&qdev->mman.bdev,
qdev              327 drivers/gpu/drm/qxl/qxl_ttm.c 			       qdev->ddev.anon_inode->i_mapping,
qdev              334 drivers/gpu/drm/qxl/qxl_ttm.c 	num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
qdev              335 drivers/gpu/drm/qxl/qxl_ttm.c 	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
qdev              341 drivers/gpu/drm/qxl/qxl_ttm.c 	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
qdev              342 drivers/gpu/drm/qxl/qxl_ttm.c 			   qdev->surfaceram_size / PAGE_SIZE);
qdev              348 drivers/gpu/drm/qxl/qxl_ttm.c 		 (unsigned int)qdev->vram_size / (1024 * 1024));
qdev              352 drivers/gpu/drm/qxl/qxl_ttm.c 		 (unsigned int)qdev->surfaceram_size / (1024 * 1024));
qdev              356 drivers/gpu/drm/qxl/qxl_ttm.c void qxl_ttm_fini(struct qxl_device *qdev)
qdev              358 drivers/gpu/drm/qxl/qxl_ttm.c 	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
qdev              359 drivers/gpu/drm/qxl/qxl_ttm.c 	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
qdev              360 drivers/gpu/drm/qxl/qxl_ttm.c 	ttm_bo_device_release(&qdev->mman.bdev);
qdev              383 drivers/gpu/drm/qxl/qxl_ttm.c int qxl_ttm_debugfs_init(struct qxl_device *qdev)
qdev              399 drivers/gpu/drm/qxl/qxl_ttm.c 			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
qdev              401 drivers/gpu/drm/qxl/qxl_ttm.c 			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
qdev              404 drivers/gpu/drm/qxl/qxl_ttm.c 	return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
qdev              364 drivers/gpu/drm/virtio/virtgpu_drv.h int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
qdev              410 drivers/gpu/drm/virtio/virtgpu_drv.h 			struct virtio_gpu_device *qdev =
qdev              412 drivers/gpu/drm/virtio/virtgpu_drv.h 			dev_err(qdev->dev, "%p reserve failed\n", bo);
qdev              203 drivers/gpu/drm/virtio/virtgpu_object.c int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
qdev              225 drivers/gpu/drm/virtio/virtgpu_object.c 	max_segment = virtio_max_dma_size(qdev->vdev);
qdev             4675 drivers/md/raid5.c 	struct r5dev *pdev, *qdev;
qdev             4813 drivers/md/raid5.c 	qdev = &sh->dev[sh->qd_idx];
qdev             4823 drivers/md/raid5.c 	    (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
qdev             4824 drivers/md/raid5.c 			     && !test_bit(R5_LOCKED, &qdev->flags)
qdev             4825 drivers/md/raid5.c 			     && (test_bit(R5_UPTODATE, &qdev->flags) ||
qdev             4826 drivers/md/raid5.c 				 test_bit(R5_Discard, &qdev->flags))))))
qdev              104 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_sem_spinlock(struct ql3_adapter *qdev,
qdev              108 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev              123 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
qdev              126 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev              131 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
qdev              134 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev              145 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
qdev              150 drivers/net/ethernet/qlogic/qla3xxx.c 		if (ql_sem_lock(qdev,
qdev              152 drivers/net/ethernet/qlogic/qla3xxx.c 				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
qdev              154 drivers/net/ethernet/qlogic/qla3xxx.c 			netdev_printk(KERN_DEBUG, qdev->ndev,
qdev              161 drivers/net/ethernet/qlogic/qla3xxx.c 	netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
qdev              165 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
qdev              168 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev              173 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->current_page = page;
qdev              176 drivers/net/ethernet/qlogic/qla3xxx.c static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
qdev              181 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev              183 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev              188 drivers/net/ethernet/qlogic/qla3xxx.c static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
qdev              193 drivers/net/ethernet/qlogic/qla3xxx.c static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
qdev              198 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev              200 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->current_page != 0)
qdev              201 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_set_register_page(qdev, 0);
qdev              204 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev              208 drivers/net/ethernet/qlogic/qla3xxx.c static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
qdev              210 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->current_page != 0)
qdev              211 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_set_register_page(qdev, 0);
qdev              215 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_write_common_reg_l(struct ql3_adapter *qdev,
qdev              220 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev              223 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev              226 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_write_common_reg(struct ql3_adapter *qdev,
qdev              233 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_write_nvram_reg(struct ql3_adapter *qdev,
qdev              241 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_write_page0_reg(struct ql3_adapter *qdev,
qdev              244 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->current_page != 0)
qdev              245 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_set_register_page(qdev, 0);
qdev              253 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_write_page1_reg(struct ql3_adapter *qdev,
qdev              256 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->current_page != 1)
qdev              257 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_set_register_page(qdev, 1);
qdev              265 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_write_page2_reg(struct ql3_adapter *qdev,
qdev              268 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->current_page != 2)
qdev              269 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_set_register_page(qdev, 2);
qdev              274 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_disable_interrupts(struct ql3_adapter *qdev)
qdev              277 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev              279 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
qdev              284 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_enable_interrupts(struct ql3_adapter *qdev)
qdev              287 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev              289 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
qdev              294 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
qdev              301 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->lrg_buf_free_tail == NULL) {	/* The list is empty  */
qdev              302 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
qdev              304 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->lrg_buf_free_tail->next = lrg_buf_cb;
qdev              305 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->lrg_buf_free_tail = lrg_buf_cb;
qdev              309 drivers/net/ethernet/qlogic/qla3xxx.c 		lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
qdev              310 drivers/net/ethernet/qlogic/qla3xxx.c 						   qdev->lrg_buffer_len);
qdev              312 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->lrg_buf_skb_check++;
qdev              319 drivers/net/ethernet/qlogic/qla3xxx.c 			map = pci_map_single(qdev->pdev,
qdev              321 drivers/net/ethernet/qlogic/qla3xxx.c 					     qdev->lrg_buffer_len -
qdev              324 drivers/net/ethernet/qlogic/qla3xxx.c 			err = pci_dma_mapping_error(qdev->pdev, map);
qdev              326 drivers/net/ethernet/qlogic/qla3xxx.c 				netdev_err(qdev->ndev,
qdev              332 drivers/net/ethernet/qlogic/qla3xxx.c 				qdev->lrg_buf_skb_check++;
qdev              342 drivers/net/ethernet/qlogic/qla3xxx.c 					  qdev->lrg_buffer_len -
qdev              347 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_free_count++;
qdev              351 drivers/net/ethernet/qlogic/qla3xxx.c 							   *qdev)
qdev              353 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
qdev              356 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->lrg_buf_free_head = lrg_buf_cb->next;
qdev              357 drivers/net/ethernet/qlogic/qla3xxx.c 		if (qdev->lrg_buf_free_head == NULL)
qdev              358 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->lrg_buf_free_tail = NULL;
qdev              359 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->lrg_buf_free_count--;
qdev              368 drivers/net/ethernet/qlogic/qla3xxx.c static void fm93c56a_deselect(struct ql3_adapter *qdev);
qdev              369 drivers/net/ethernet/qlogic/qla3xxx.c static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
qdev              375 drivers/net/ethernet/qlogic/qla3xxx.c static void fm93c56a_select(struct ql3_adapter *qdev)
qdev              378 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev              381 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
qdev              382 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
qdev              388 drivers/net/ethernet/qlogic/qla3xxx.c static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
qdev              395 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev              399 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_nvram_reg(qdev, spir,
qdev              400 drivers/net/ethernet/qlogic/qla3xxx.c 			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
qdev              402 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_nvram_reg(qdev, spir,
qdev              403 drivers/net/ethernet/qlogic/qla3xxx.c 			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
qdev              405 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_nvram_reg(qdev, spir,
qdev              406 drivers/net/ethernet/qlogic/qla3xxx.c 			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
qdev              418 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_write_nvram_reg(qdev, spir,
qdev              420 drivers/net/ethernet/qlogic/qla3xxx.c 					    qdev->eeprom_cmd_data | dataBit));
qdev              423 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_nvram_reg(qdev, spir,
qdev              424 drivers/net/ethernet/qlogic/qla3xxx.c 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
qdev              426 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_nvram_reg(qdev, spir,
qdev              427 drivers/net/ethernet/qlogic/qla3xxx.c 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
qdev              443 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_write_nvram_reg(qdev, spir,
qdev              445 drivers/net/ethernet/qlogic/qla3xxx.c 					    qdev->eeprom_cmd_data | dataBit));
qdev              448 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_nvram_reg(qdev, spir,
qdev              449 drivers/net/ethernet/qlogic/qla3xxx.c 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
qdev              451 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_nvram_reg(qdev, spir,
qdev              452 drivers/net/ethernet/qlogic/qla3xxx.c 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
qdev              461 drivers/net/ethernet/qlogic/qla3xxx.c static void fm93c56a_deselect(struct ql3_adapter *qdev)
qdev              464 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev              467 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
qdev              468 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
qdev              474 drivers/net/ethernet/qlogic/qla3xxx.c static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
qdev              480 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev              486 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_nvram_reg(qdev, spir,
qdev              487 drivers/net/ethernet/qlogic/qla3xxx.c 				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
qdev              489 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_nvram_reg(qdev, spir,
qdev              490 drivers/net/ethernet/qlogic/qla3xxx.c 				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
qdev              492 drivers/net/ethernet/qlogic/qla3xxx.c 		dataBit = (ql_read_common_reg(qdev, spir) &
qdev              502 drivers/net/ethernet/qlogic/qla3xxx.c static void eeprom_readword(struct ql3_adapter *qdev,
qdev              505 drivers/net/ethernet/qlogic/qla3xxx.c 	fm93c56a_select(qdev);
qdev              506 drivers/net/ethernet/qlogic/qla3xxx.c 	fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
qdev              507 drivers/net/ethernet/qlogic/qla3xxx.c 	fm93c56a_datain(qdev, value);
qdev              508 drivers/net/ethernet/qlogic/qla3xxx.c 	fm93c56a_deselect(qdev);
qdev              519 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_get_nvram_params(struct ql3_adapter *qdev)
qdev              526 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev              528 drivers/net/ethernet/qlogic/qla3xxx.c 	pEEPROMData = (u16 *)&qdev->nvram_data;
qdev              529 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->eeprom_cmd_data = 0;
qdev              530 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
qdev              531 drivers/net/ethernet/qlogic/qla3xxx.c 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
qdev              534 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev              539 drivers/net/ethernet/qlogic/qla3xxx.c 		eeprom_readword(qdev, index, pEEPROMData);
qdev              543 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
qdev              546 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
qdev              548 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev              552 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev              560 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
qdev              563 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev              568 drivers/net/ethernet/qlogic/qla3xxx.c 		temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
qdev              577 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
qdev              580 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev              583 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->numPorts > 1) {
qdev              596 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
qdev              599 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
qdev              604 drivers/net/ethernet/qlogic/qla3xxx.c static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
qdev              608 drivers/net/ethernet/qlogic/qla3xxx.c 					qdev->mem_map_registers;
qdev              611 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
qdev              624 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
qdev              627 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
qdev              634 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
qdev              638 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev              641 drivers/net/ethernet/qlogic/qla3xxx.c 	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
qdev              643 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_wait_for_mii_ready(qdev)) {
qdev              644 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
qdev              648 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
qdev              651 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
qdev              654 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_wait_for_mii_ready(qdev)) {
qdev              655 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
qdev              660 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_mii_enable_scan_mode(qdev);
qdev              665 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
qdev              669 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev              673 drivers/net/ethernet/qlogic/qla3xxx.c 	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
qdev              675 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_wait_for_mii_ready(qdev)) {
qdev              676 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
qdev              680 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
qdev              683 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
qdev              686 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
qdev              690 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_wait_for_mii_ready(qdev)) {
qdev              691 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
qdev              695 drivers/net/ethernet/qlogic/qla3xxx.c 	temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
qdev              699 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_mii_enable_scan_mode(qdev);
qdev              704 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
qdev              707 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev              709 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_disable_scan_mode(qdev);
qdev              711 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_wait_for_mii_ready(qdev)) {
qdev              712 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
qdev              716 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
qdev              717 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->PHYAddr | regAddr);
qdev              719 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
qdev              722 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_wait_for_mii_ready(qdev)) {
qdev              723 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
qdev              727 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_enable_scan_mode(qdev);
qdev              732 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
qdev              736 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev              738 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_disable_scan_mode(qdev);
qdev              740 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_wait_for_mii_ready(qdev)) {
qdev              741 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
qdev              745 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
qdev              746 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->PHYAddr | regAddr);
qdev              748 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
qdev              751 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
qdev              755 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_wait_for_mii_ready(qdev)) {
qdev              756 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
qdev              760 drivers/net/ethernet/qlogic/qla3xxx.c 	temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
qdev              763 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_enable_scan_mode(qdev);
qdev              768 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_petbi_reset(struct ql3_adapter *qdev)
qdev              770 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
qdev              773 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_petbi_start_neg(struct ql3_adapter *qdev)
qdev              778 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
qdev              780 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
qdev              782 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
qdev              785 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
qdev              791 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
qdev              793 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
qdev              794 drivers/net/ethernet/qlogic/qla3xxx.c 			    PHYAddr[qdev->mac_index]);
qdev              797 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
qdev              802 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
qdev              803 drivers/net/ethernet/qlogic/qla3xxx.c 			   PHYAddr[qdev->mac_index]);
qdev              805 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
qdev              806 drivers/net/ethernet/qlogic/qla3xxx.c 			    PHYAddr[qdev->mac_index]);
qdev              808 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
qdev              810 drivers/net/ethernet/qlogic/qla3xxx.c 			    PHYAddr[qdev->mac_index]);
qdev              812 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
qdev              815 drivers/net/ethernet/qlogic/qla3xxx.c 			    PHYAddr[qdev->mac_index]);
qdev              818 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_petbi_init(struct ql3_adapter *qdev)
qdev              820 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_petbi_reset(qdev);
qdev              821 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_petbi_start_neg(qdev);
qdev              824 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_petbi_init_ex(struct ql3_adapter *qdev)
qdev              826 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_petbi_reset_ex(qdev);
qdev              827 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_petbi_start_neg_ex(qdev);
qdev              830 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
qdev              834 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
qdev              840 drivers/net/ethernet/qlogic/qla3xxx.c static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
qdev              842 drivers/net/ethernet/qlogic/qla3xxx.c 	netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
qdev              844 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
qdev              846 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
qdev              848 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
qdev              850 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
qdev              852 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
qdev              854 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
qdev              856 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
qdev              858 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
qdev              860 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
qdev              862 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, 0x11,
qdev              863 drivers/net/ethernet/qlogic/qla3xxx.c 			    0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
qdev              869 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg(qdev, 0x12, 0x840a);
qdev              870 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg(qdev, 0x00, 0x1140);
qdev              871 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
qdev              874 drivers/net/ethernet/qlogic/qla3xxx.c static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
qdev              897 drivers/net/ethernet/qlogic/qla3xxx.c 			netdev_info(qdev->ndev, "Phy: %s\n",
qdev              907 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_phy_get_speed(struct ql3_adapter *qdev)
qdev              911 drivers/net/ethernet/qlogic/qla3xxx.c 	switch (qdev->phyType) {
qdev              913 drivers/net/ethernet/qlogic/qla3xxx.c 		if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
qdev              920 drivers/net/ethernet/qlogic/qla3xxx.c 		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
qdev              938 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_is_full_dup(struct ql3_adapter *qdev)
qdev              942 drivers/net/ethernet/qlogic/qla3xxx.c 	switch (qdev->phyType) {
qdev              944 drivers/net/ethernet/qlogic/qla3xxx.c 		if (ql_mii_read_reg(qdev, 0x1A, &reg))
qdev              951 drivers/net/ethernet/qlogic/qla3xxx.c 		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
qdev              958 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
qdev              962 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
qdev              968 drivers/net/ethernet/qlogic/qla3xxx.c static int PHY_Setup(struct ql3_adapter *qdev)
qdev              977 drivers/net/ethernet/qlogic/qla3xxx.c 	err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
qdev              979 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
qdev              983 drivers/net/ethernet/qlogic/qla3xxx.c 	err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
qdev              985 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
qdev              994 drivers/net/ethernet/qlogic/qla3xxx.c 		if (qdev->mac_index == 0)
qdev              999 drivers/net/ethernet/qlogic/qla3xxx.c 		err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
qdev             1001 drivers/net/ethernet/qlogic/qla3xxx.c 			netdev_err(qdev->ndev,
qdev             1006 drivers/net/ethernet/qlogic/qla3xxx.c 		err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
qdev             1008 drivers/net/ethernet/qlogic/qla3xxx.c 			netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
qdev             1018 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->phyType = getPhyType(qdev, reg1, reg2);
qdev             1020 drivers/net/ethernet/qlogic/qla3xxx.c 	if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
qdev             1022 drivers/net/ethernet/qlogic/qla3xxx.c 		phyAgereSpecificInit(qdev, miiAddr);
qdev             1023 drivers/net/ethernet/qlogic/qla3xxx.c 	} else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
qdev             1024 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "PHY is unknown\n");
qdev             1034 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
qdev             1037 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1045 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->mac_index)
qdev             1046 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
qdev             1048 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
qdev             1054 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
qdev             1057 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1065 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->mac_index)
qdev             1066 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
qdev             1068 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
qdev             1074 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
qdev             1077 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1085 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->mac_index)
qdev             1086 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
qdev             1088 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
qdev             1094 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
qdev             1097 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1105 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->mac_index)
qdev             1106 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
qdev             1108 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
qdev             1114 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
qdev             1117 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1127 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->mac_index)
qdev             1128 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
qdev             1130 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
qdev             1136 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_is_fiber(struct ql3_adapter *qdev)
qdev             1139 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1143 drivers/net/ethernet/qlogic/qla3xxx.c 	switch (qdev->mac_index) {
qdev             1152 drivers/net/ethernet/qlogic/qla3xxx.c 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
qdev             1156 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_is_auto_cfg(struct ql3_adapter *qdev)
qdev             1159 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_read_reg(qdev, 0x00, &reg);
qdev             1166 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
qdev             1169 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1173 drivers/net/ethernet/qlogic/qla3xxx.c 	switch (qdev->mac_index) {
qdev             1182 drivers/net/ethernet/qlogic/qla3xxx.c 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
qdev             1184 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
qdev             1187 drivers/net/ethernet/qlogic/qla3xxx.c 	netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
qdev             1194 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_is_neg_pause(struct ql3_adapter *qdev)
qdev             1196 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_is_fiber(qdev))
qdev             1197 drivers/net/ethernet/qlogic/qla3xxx.c 		return ql_is_petbi_neg_pause(qdev);
qdev             1199 drivers/net/ethernet/qlogic/qla3xxx.c 		return ql_is_phy_neg_pause(qdev);
qdev             1202 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_auto_neg_error(struct ql3_adapter *qdev)
qdev             1205 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1209 drivers/net/ethernet/qlogic/qla3xxx.c 	switch (qdev->mac_index) {
qdev             1217 drivers/net/ethernet/qlogic/qla3xxx.c 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
qdev             1221 drivers/net/ethernet/qlogic/qla3xxx.c static u32 ql_get_link_speed(struct ql3_adapter *qdev)
qdev             1223 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_is_fiber(qdev))
qdev             1226 drivers/net/ethernet/qlogic/qla3xxx.c 		return ql_phy_get_speed(qdev);
qdev             1229 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_is_link_full_dup(struct ql3_adapter *qdev)
qdev             1231 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_is_fiber(qdev))
qdev             1234 drivers/net/ethernet/qlogic/qla3xxx.c 		return ql_is_full_dup(qdev);
qdev             1240 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_link_down_detect(struct ql3_adapter *qdev)
qdev             1243 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1247 drivers/net/ethernet/qlogic/qla3xxx.c 	switch (qdev->mac_index) {
qdev             1257 drivers/net/ethernet/qlogic/qla3xxx.c 	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
qdev             1264 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
qdev             1267 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1269 drivers/net/ethernet/qlogic/qla3xxx.c 	switch (qdev->mac_index) {
qdev             1271 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_common_reg(qdev,
qdev             1278 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_common_reg(qdev,
qdev             1294 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
qdev             1297 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1301 drivers/net/ethernet/qlogic/qla3xxx.c 	switch (qdev->mac_index) {
qdev             1312 drivers/net/ethernet/qlogic/qla3xxx.c 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
qdev             1314 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
qdev             1319 drivers/net/ethernet/qlogic/qla3xxx.c 	netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
qdev             1323 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_phy_reset_ex(struct ql3_adapter *qdev)
qdev             1325 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
qdev             1326 drivers/net/ethernet/qlogic/qla3xxx.c 			    PHYAddr[qdev->mac_index]);
qdev             1329 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
qdev             1334 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->phyType == PHY_AGERE_ET1011C)
qdev             1335 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_mii_write_reg(qdev, 0x13, 0x0000);
qdev             1338 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->mac_index == 0)
qdev             1340 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->nvram_data.macCfg_port0.portConfiguration;
qdev             1343 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->nvram_data.macCfg_port1.portConfiguration;
qdev             1351 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
qdev             1352 drivers/net/ethernet/qlogic/qla3xxx.c 			   PHYAddr[qdev->mac_index]);
qdev             1362 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
qdev             1363 drivers/net/ethernet/qlogic/qla3xxx.c 			    PHYAddr[qdev->mac_index]);
qdev             1366 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
qdev             1367 drivers/net/ethernet/qlogic/qla3xxx.c 			   PHYAddr[qdev->mac_index]);
qdev             1392 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
qdev             1393 drivers/net/ethernet/qlogic/qla3xxx.c 			    PHYAddr[qdev->mac_index]);
qdev             1395 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
qdev             1397 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_write_reg_ex(qdev, CONTROL_REG,
qdev             1399 drivers/net/ethernet/qlogic/qla3xxx.c 			    PHYAddr[qdev->mac_index]);
qdev             1402 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_phy_init_ex(struct ql3_adapter *qdev)
qdev             1404 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_phy_reset_ex(qdev);
qdev             1405 drivers/net/ethernet/qlogic/qla3xxx.c 	PHY_Setup(qdev);
qdev             1406 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_phy_start_neg_ex(qdev);
qdev             1412 drivers/net/ethernet/qlogic/qla3xxx.c static u32 ql_get_link_state(struct ql3_adapter *qdev)
qdev             1415 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1419 drivers/net/ethernet/qlogic/qla3xxx.c 	switch (qdev->mac_index) {
qdev             1428 drivers/net/ethernet/qlogic/qla3xxx.c 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
qdev             1437 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_port_start(struct ql3_adapter *qdev)
qdev             1439 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
qdev             1440 drivers/net/ethernet/qlogic/qla3xxx.c 		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
qdev             1442 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
qdev             1446 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_is_fiber(qdev)) {
qdev             1447 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_petbi_init(qdev);
qdev             1450 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_phy_init_ex(qdev);
qdev             1453 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
qdev             1457 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_finish_auto_neg(struct ql3_adapter *qdev)
qdev             1460 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
qdev             1461 drivers/net/ethernet/qlogic/qla3xxx.c 		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
qdev             1465 drivers/net/ethernet/qlogic/qla3xxx.c 	if (!ql_auto_neg_error(qdev)) {
qdev             1466 drivers/net/ethernet/qlogic/qla3xxx.c 		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
qdev             1468 drivers/net/ethernet/qlogic/qla3xxx.c 			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
qdev             1470 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_mac_cfg_soft_reset(qdev, 1);
qdev             1471 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_mac_cfg_gig(qdev,
qdev             1473 drivers/net/ethernet/qlogic/qla3xxx.c 					(qdev) ==
qdev             1475 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_mac_cfg_full_dup(qdev,
qdev             1477 drivers/net/ethernet/qlogic/qla3xxx.c 					    (qdev));
qdev             1478 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_mac_cfg_pause(qdev,
qdev             1480 drivers/net/ethernet/qlogic/qla3xxx.c 					 (qdev));
qdev             1481 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_mac_cfg_soft_reset(qdev, 0);
qdev             1484 drivers/net/ethernet/qlogic/qla3xxx.c 			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
qdev             1486 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_mac_enable(qdev, 1);
qdev             1489 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->port_link_state = LS_UP;
qdev             1490 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_start_queue(qdev->ndev);
qdev             1491 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_carrier_on(qdev->ndev);
qdev             1492 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_info(qdev, link, qdev->ndev,
qdev             1494 drivers/net/ethernet/qlogic/qla3xxx.c 			   ql_get_link_speed(qdev),
qdev             1495 drivers/net/ethernet/qlogic/qla3xxx.c 			   ql_is_link_full_dup(qdev) ? "full" : "half");
qdev             1499 drivers/net/ethernet/qlogic/qla3xxx.c 		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
qdev             1500 drivers/net/ethernet/qlogic/qla3xxx.c 			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
qdev             1506 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
qdev             1507 drivers/net/ethernet/qlogic/qla3xxx.c 			if (ql_port_start(qdev))	/* Restart port */
qdev             1512 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
qdev             1518 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev =
qdev             1524 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev             1526 drivers/net/ethernet/qlogic/qla3xxx.c 	curr_link_state = ql_get_link_state(qdev);
qdev             1528 drivers/net/ethernet/qlogic/qla3xxx.c 	if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
qdev             1529 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_info(qdev, link, qdev->ndev,
qdev             1532 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             1535 drivers/net/ethernet/qlogic/qla3xxx.c 		mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
qdev             1540 drivers/net/ethernet/qlogic/qla3xxx.c 	switch (qdev->port_link_state) {
qdev             1542 drivers/net/ethernet/qlogic/qla3xxx.c 		if (test_bit(QL_LINK_MASTER, &qdev->flags))
qdev             1543 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_port_start(qdev);
qdev             1544 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->port_link_state = LS_DOWN;
qdev             1549 drivers/net/ethernet/qlogic/qla3xxx.c 			netif_info(qdev, link, qdev->ndev, "Link is up\n");
qdev             1550 drivers/net/ethernet/qlogic/qla3xxx.c 			if (ql_is_auto_neg_complete(qdev))
qdev             1551 drivers/net/ethernet/qlogic/qla3xxx.c 				ql_finish_auto_neg(qdev);
qdev             1553 drivers/net/ethernet/qlogic/qla3xxx.c 			if (qdev->port_link_state == LS_UP)
qdev             1554 drivers/net/ethernet/qlogic/qla3xxx.c 				ql_link_down_detect_clear(qdev);
qdev             1556 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->port_link_state = LS_UP;
qdev             1566 drivers/net/ethernet/qlogic/qla3xxx.c 			netif_info(qdev, link, qdev->ndev, "Link is down\n");
qdev             1567 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->port_link_state = LS_DOWN;
qdev             1569 drivers/net/ethernet/qlogic/qla3xxx.c 		if (ql_link_down_detect(qdev))
qdev             1570 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->port_link_state = LS_DOWN;
qdev             1573 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             1576 drivers/net/ethernet/qlogic/qla3xxx.c 	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
qdev             1582 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_get_phy_owner(struct ql3_adapter *qdev)
qdev             1584 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_this_adapter_controls_port(qdev))
qdev             1585 drivers/net/ethernet/qlogic/qla3xxx.c 		set_bit(QL_LINK_MASTER, &qdev->flags);
qdev             1587 drivers/net/ethernet/qlogic/qla3xxx.c 		clear_bit(QL_LINK_MASTER, &qdev->flags);
qdev             1593 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_init_scan_mode(struct ql3_adapter *qdev)
qdev             1595 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_mii_enable_scan_mode(qdev);
qdev             1597 drivers/net/ethernet/qlogic/qla3xxx.c 	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
qdev             1598 drivers/net/ethernet/qlogic/qla3xxx.c 		if (ql_this_adapter_controls_port(qdev))
qdev             1599 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_petbi_init_ex(qdev);
qdev             1601 drivers/net/ethernet/qlogic/qla3xxx.c 		if (ql_this_adapter_controls_port(qdev))
qdev             1602 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_phy_init_ex(qdev);
qdev             1612 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_mii_setup(struct ql3_adapter *qdev)
qdev             1616 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             1618 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
qdev             1619 drivers/net/ethernet/qlogic/qla3xxx.c 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
qdev             1623 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->device_id == QL3032_DEVICE_ID)
qdev             1624 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev,
qdev             1630 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
qdev             1633 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
qdev             1649 drivers/net/ethernet/qlogic/qla3xxx.c static u32 ql_supported_modes(struct ql3_adapter *qdev)
qdev             1651 drivers/net/ethernet/qlogic/qla3xxx.c 	if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
qdev             1657 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
qdev             1661 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev             1662 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
qdev             1664 drivers/net/ethernet/qlogic/qla3xxx.c 			     (qdev->mac_index) * 2) << 7)) {
qdev             1665 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             1668 drivers/net/ethernet/qlogic/qla3xxx.c 	status = ql_is_auto_cfg(qdev);
qdev             1669 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
qdev             1670 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             1674 drivers/net/ethernet/qlogic/qla3xxx.c static u32 ql_get_speed(struct ql3_adapter *qdev)
qdev             1678 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev             1679 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
qdev             1681 drivers/net/ethernet/qlogic/qla3xxx.c 			     (qdev->mac_index) * 2) << 7)) {
qdev             1682 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             1685 drivers/net/ethernet/qlogic/qla3xxx.c 	status = ql_get_link_speed(qdev);
qdev             1686 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
qdev             1687 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             1691 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_get_full_dup(struct ql3_adapter *qdev)
qdev             1695 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev             1696 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
qdev             1698 drivers/net/ethernet/qlogic/qla3xxx.c 			     (qdev->mac_index) * 2) << 7)) {
qdev             1699 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             1702 drivers/net/ethernet/qlogic/qla3xxx.c 	status = ql_is_link_full_dup(qdev);
qdev             1703 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
qdev             1704 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             1711 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             1714 drivers/net/ethernet/qlogic/qla3xxx.c 	supported = ql_supported_modes(qdev);
qdev             1716 drivers/net/ethernet/qlogic/qla3xxx.c 	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
qdev             1720 drivers/net/ethernet/qlogic/qla3xxx.c 		cmd->base.phy_address = qdev->PHYAddr;
qdev             1722 drivers/net/ethernet/qlogic/qla3xxx.c 	advertising = ql_supported_modes(qdev);
qdev             1723 drivers/net/ethernet/qlogic/qla3xxx.c 	cmd->base.autoneg = ql_get_auto_cfg_status(qdev);
qdev             1724 drivers/net/ethernet/qlogic/qla3xxx.c 	cmd->base.speed = ql_get_speed(qdev);
qdev             1725 drivers/net/ethernet/qlogic/qla3xxx.c 	cmd->base.duplex = ql_get_full_dup(qdev);
qdev             1738 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             1742 drivers/net/ethernet/qlogic/qla3xxx.c 	strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
qdev             1748 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             1749 drivers/net/ethernet/qlogic/qla3xxx.c 	return qdev->msg_enable;
qdev             1754 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             1755 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->msg_enable = value;
qdev             1761 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             1763 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev             1766 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->mac_index == 0)
qdev             1767 drivers/net/ethernet/qlogic/qla3xxx.c 		reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
qdev             1769 drivers/net/ethernet/qlogic/qla3xxx.c 		reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
qdev             1771 drivers/net/ethernet/qlogic/qla3xxx.c 	pause->autoneg  = ql_get_auto_cfg_status(qdev);
qdev             1785 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_populate_free_queue(struct ql3_adapter *qdev)
qdev             1787 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
qdev             1794 drivers/net/ethernet/qlogic/qla3xxx.c 				netdev_alloc_skb(qdev->ndev,
qdev             1795 drivers/net/ethernet/qlogic/qla3xxx.c 						 qdev->lrg_buffer_len);
qdev             1797 drivers/net/ethernet/qlogic/qla3xxx.c 				netdev_printk(KERN_DEBUG, qdev->ndev,
qdev             1806 drivers/net/ethernet/qlogic/qla3xxx.c 				map = pci_map_single(qdev->pdev,
qdev             1808 drivers/net/ethernet/qlogic/qla3xxx.c 						     qdev->lrg_buffer_len -
qdev             1812 drivers/net/ethernet/qlogic/qla3xxx.c 				err = pci_dma_mapping_error(qdev->pdev, map);
qdev             1814 drivers/net/ethernet/qlogic/qla3xxx.c 					netdev_err(qdev->ndev,
qdev             1829 drivers/net/ethernet/qlogic/qla3xxx.c 						  qdev->lrg_buffer_len -
qdev             1831 drivers/net/ethernet/qlogic/qla3xxx.c 				--qdev->lrg_buf_skb_check;
qdev             1832 drivers/net/ethernet/qlogic/qla3xxx.c 				if (!qdev->lrg_buf_skb_check)
qdev             1844 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
qdev             1847 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev             1849 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->small_buf_release_cnt >= 16) {
qdev             1850 drivers/net/ethernet/qlogic/qla3xxx.c 		while (qdev->small_buf_release_cnt >= 16) {
qdev             1851 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->small_buf_q_producer_index++;
qdev             1853 drivers/net/ethernet/qlogic/qla3xxx.c 			if (qdev->small_buf_q_producer_index ==
qdev             1855 drivers/net/ethernet/qlogic/qla3xxx.c 				qdev->small_buf_q_producer_index = 0;
qdev             1856 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->small_buf_release_cnt -= 8;
qdev             1859 drivers/net/ethernet/qlogic/qla3xxx.c 		writel_relaxed(qdev->small_buf_q_producer_index,
qdev             1867 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
qdev             1873 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev             1875 drivers/net/ethernet/qlogic/qla3xxx.c 	if ((qdev->lrg_buf_free_count >= 8) &&
qdev             1876 drivers/net/ethernet/qlogic/qla3xxx.c 	    (qdev->lrg_buf_release_cnt >= 16)) {
qdev             1878 drivers/net/ethernet/qlogic/qla3xxx.c 		if (qdev->lrg_buf_skb_check)
qdev             1879 drivers/net/ethernet/qlogic/qla3xxx.c 			if (!ql_populate_free_queue(qdev))
qdev             1882 drivers/net/ethernet/qlogic/qla3xxx.c 		lrg_buf_q_ele = qdev->lrg_buf_next_free;
qdev             1884 drivers/net/ethernet/qlogic/qla3xxx.c 		while ((qdev->lrg_buf_release_cnt >= 16) &&
qdev             1885 drivers/net/ethernet/qlogic/qla3xxx.c 		       (qdev->lrg_buf_free_count >= 8)) {
qdev             1889 drivers/net/ethernet/qlogic/qla3xxx.c 				    ql_get_from_lrg_buf_free_list(qdev);
qdev             1896 drivers/net/ethernet/qlogic/qla3xxx.c 				qdev->lrg_buf_release_cnt--;
qdev             1899 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->lrg_buf_q_producer_index++;
qdev             1901 drivers/net/ethernet/qlogic/qla3xxx.c 			if (qdev->lrg_buf_q_producer_index ==
qdev             1902 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->num_lbufq_entries)
qdev             1903 drivers/net/ethernet/qlogic/qla3xxx.c 				qdev->lrg_buf_q_producer_index = 0;
qdev             1905 drivers/net/ethernet/qlogic/qla3xxx.c 			if (qdev->lrg_buf_q_producer_index ==
qdev             1906 drivers/net/ethernet/qlogic/qla3xxx.c 			    (qdev->num_lbufq_entries - 1)) {
qdev             1907 drivers/net/ethernet/qlogic/qla3xxx.c 				lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
qdev             1911 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->lrg_buf_next_free = lrg_buf_q_ele;
qdev             1912 drivers/net/ethernet/qlogic/qla3xxx.c 		writel(qdev->lrg_buf_q_producer_index,
qdev             1917 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
qdev             1924 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_warn(qdev->ndev,
qdev             1928 drivers/net/ethernet/qlogic/qla3xxx.c 	tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
qdev             1932 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev,
qdev             1935 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->ndev->stats.tx_errors++;
qdev             1940 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
qdev             1943 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->ndev->stats.tx_errors++;
qdev             1947 drivers/net/ethernet/qlogic/qla3xxx.c 	pci_unmap_single(qdev->pdev,
qdev             1954 drivers/net/ethernet/qlogic/qla3xxx.c 			pci_unmap_page(qdev->pdev,
qdev             1961 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->ndev->stats.tx_packets++;
qdev             1962 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
qdev             1969 drivers/net/ethernet/qlogic/qla3xxx.c 	atomic_inc(&qdev->tx_count);
qdev             1972 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_get_sbuf(struct ql3_adapter *qdev)
qdev             1974 drivers/net/ethernet/qlogic/qla3xxx.c 	if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
qdev             1975 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->small_buf_index = 0;
qdev             1976 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_release_cnt++;
qdev             1979 drivers/net/ethernet/qlogic/qla3xxx.c static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
qdev             1982 drivers/net/ethernet/qlogic/qla3xxx.c 	lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
qdev             1983 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_release_cnt++;
qdev             1984 drivers/net/ethernet/qlogic/qla3xxx.c 	if (++qdev->lrg_buf_index == qdev->num_large_buffers)
qdev             1985 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->lrg_buf_index = 0;
qdev             2001 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
qdev             2012 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_get_sbuf(qdev);
qdev             2014 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->device_id == QL3022_DEVICE_ID)
qdev             2015 drivers/net/ethernet/qlogic/qla3xxx.c 		lrg_buf_cb1 = ql_get_lbuf(qdev);
qdev             2018 drivers/net/ethernet/qlogic/qla3xxx.c 	lrg_buf_cb2 = ql_get_lbuf(qdev);
qdev             2021 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->ndev->stats.rx_packets++;
qdev             2022 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->ndev->stats.rx_bytes += length;
qdev             2025 drivers/net/ethernet/qlogic/qla3xxx.c 	pci_unmap_single(qdev->pdev,
qdev             2031 drivers/net/ethernet/qlogic/qla3xxx.c 	skb->protocol = eth_type_trans(skb, qdev->ndev);
qdev             2033 drivers/net/ethernet/qlogic/qla3xxx.c 	napi_gro_receive(&qdev->napi, skb);
qdev             2036 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->device_id == QL3022_DEVICE_ID)
qdev             2037 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
qdev             2038 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
qdev             2041 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
qdev             2047 drivers/net/ethernet/qlogic/qla3xxx.c 	struct net_device *ndev = qdev->ndev;
qdev             2055 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_get_sbuf(qdev);
qdev             2057 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->device_id == QL3022_DEVICE_ID) {
qdev             2059 drivers/net/ethernet/qlogic/qla3xxx.c 		lrg_buf_cb1 = ql_get_lbuf(qdev);
qdev             2067 drivers/net/ethernet/qlogic/qla3xxx.c 	lrg_buf_cb2 = ql_get_lbuf(qdev);
qdev             2071 drivers/net/ethernet/qlogic/qla3xxx.c 	pci_unmap_single(qdev->pdev,
qdev             2078 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->device_id == QL3022_DEVICE_ID) {
qdev             2101 drivers/net/ethernet/qlogic/qla3xxx.c 	skb2->protocol = eth_type_trans(skb2, qdev->ndev);
qdev             2103 drivers/net/ethernet/qlogic/qla3xxx.c 	napi_gro_receive(&qdev->napi, skb2);
qdev             2108 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->device_id == QL3022_DEVICE_ID)
qdev             2109 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
qdev             2110 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
qdev             2113 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
qdev             2116 drivers/net/ethernet/qlogic/qla3xxx.c 	struct net_device *ndev = qdev->ndev;
qdev             2120 drivers/net/ethernet/qlogic/qla3xxx.c 	while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
qdev             2121 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->rsp_consumer_index) && (work_done < budget)) {
qdev             2123 drivers/net/ethernet/qlogic/qla3xxx.c 		net_rsp = qdev->rsp_current;
qdev             2129 drivers/net/ethernet/qlogic/qla3xxx.c 		if (qdev->device_id == QL3032_DEVICE_ID)
qdev             2135 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
qdev             2141 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
qdev             2148 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
qdev             2166 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->rsp_consumer_index++;
qdev             2168 drivers/net/ethernet/qlogic/qla3xxx.c 		if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
qdev             2169 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->rsp_consumer_index = 0;
qdev             2170 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->rsp_current = qdev->rsp_q_virt_addr;
qdev             2172 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->rsp_current++;
qdev             2182 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
qdev             2184 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev             2187 drivers/net/ethernet/qlogic/qla3xxx.c 	work_done = ql_tx_rx_clean(qdev, budget);
qdev             2192 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_lock_irqsave(&qdev->hw_lock, flags);
qdev             2193 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_update_small_bufq_prod_index(qdev);
qdev             2194 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_update_lrg_bufq_prod_index(qdev);
qdev             2195 drivers/net/ethernet/qlogic/qla3xxx.c 		writel(qdev->rsp_consumer_index,
qdev             2197 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_unlock_irqrestore(&qdev->hw_lock, flags);
qdev             2199 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_enable_interrupts(qdev);
qdev             2208 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             2210 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev             2215 drivers/net/ethernet/qlogic/qla3xxx.c 	value = ql_read_common_reg_l(qdev,
qdev             2219 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_lock(&qdev->adapter_lock);
qdev             2220 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_stop_queue(qdev->ndev);
qdev             2221 drivers/net/ethernet/qlogic/qla3xxx.c 		netif_carrier_off(qdev->ndev);
qdev             2222 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_disable_interrupts(qdev);
qdev             2223 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->port_link_state = LS_DOWN;
qdev             2224 drivers/net/ethernet/qlogic/qla3xxx.c 		set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
qdev             2231 drivers/net/ethernet/qlogic/qla3xxx.c 			    ql_read_page0_reg_l(qdev,
qdev             2236 drivers/net/ethernet/qlogic/qla3xxx.c 			set_bit(QL_RESET_START, &qdev->flags) ;
qdev             2241 drivers/net/ethernet/qlogic/qla3xxx.c 			set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
qdev             2246 drivers/net/ethernet/qlogic/qla3xxx.c 		queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
qdev             2247 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_unlock(&qdev->adapter_lock);
qdev             2249 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_disable_interrupts(qdev);
qdev             2250 drivers/net/ethernet/qlogic/qla3xxx.c 		if (likely(napi_schedule_prep(&qdev->napi)))
qdev             2251 drivers/net/ethernet/qlogic/qla3xxx.c 			__napi_schedule(&qdev->napi);
qdev             2265 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
qdev             2267 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->device_id == QL3022_DEVICE_ID)
qdev             2305 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_send_map(struct ql3_adapter *qdev,
qdev             2323 drivers/net/ethernet/qlogic/qla3xxx.c 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
qdev             2325 drivers/net/ethernet/qlogic/qla3xxx.c 	err = pci_dma_mapping_error(qdev->pdev, map);
qdev             2327 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
qdev             2361 drivers/net/ethernet/qlogic/qla3xxx.c 			map = pci_map_single(qdev->pdev, oal,
qdev             2365 drivers/net/ethernet/qlogic/qla3xxx.c 			err = pci_dma_mapping_error(qdev->pdev, map);
qdev             2367 drivers/net/ethernet/qlogic/qla3xxx.c 				netdev_err(qdev->ndev,
qdev             2385 drivers/net/ethernet/qlogic/qla3xxx.c 		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
qdev             2388 drivers/net/ethernet/qlogic/qla3xxx.c 		err = dma_mapping_error(&qdev->pdev->dev, map);
qdev             2390 drivers/net/ethernet/qlogic/qla3xxx.c 			netdev_err(qdev->ndev,
qdev             2427 drivers/net/ethernet/qlogic/qla3xxx.c 			pci_unmap_single(qdev->pdev,
qdev             2435 drivers/net/ethernet/qlogic/qla3xxx.c 		pci_unmap_page(qdev->pdev,
qdev             2441 drivers/net/ethernet/qlogic/qla3xxx.c 	pci_unmap_single(qdev->pdev,
qdev             2464 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             2466 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             2471 drivers/net/ethernet/qlogic/qla3xxx.c 	if (unlikely(atomic_read(&qdev->tx_count) < 2))
qdev             2474 drivers/net/ethernet/qlogic/qla3xxx.c 	tx_cb = &qdev->tx_buf[qdev->req_producer_index];
qdev             2475 drivers/net/ethernet/qlogic/qla3xxx.c 	tx_cb->seg_count = ql_get_seg_count(qdev,
qdev             2484 drivers/net/ethernet/qlogic/qla3xxx.c 	mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
qdev             2486 drivers/net/ethernet/qlogic/qla3xxx.c 	mac_iocb_ptr->flags |= qdev->mb_bit_mask;
qdev             2487 drivers/net/ethernet/qlogic/qla3xxx.c 	mac_iocb_ptr->transaction_id = qdev->req_producer_index;
qdev             2490 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->device_id == QL3032_DEVICE_ID &&
qdev             2494 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
qdev             2500 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->req_producer_index++;
qdev             2501 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
qdev             2502 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->req_producer_index = 0;
qdev             2504 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_common_reg_l(qdev,
qdev             2506 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->req_producer_index);
qdev             2508 drivers/net/ethernet/qlogic/qla3xxx.c 	netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
qdev             2510 drivers/net/ethernet/qlogic/qla3xxx.c 		     qdev->req_producer_index, skb->len);
qdev             2512 drivers/net/ethernet/qlogic/qla3xxx.c 	atomic_dec(&qdev->tx_count);
qdev             2516 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
qdev             2518 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->req_q_size =
qdev             2521 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
qdev             2528 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->req_q_virt_addr =
qdev             2529 drivers/net/ethernet/qlogic/qla3xxx.c 	    pci_alloc_consistent(qdev->pdev,
qdev             2530 drivers/net/ethernet/qlogic/qla3xxx.c 				 (size_t) qdev->req_q_size,
qdev             2531 drivers/net/ethernet/qlogic/qla3xxx.c 				 &qdev->req_q_phy_addr);
qdev             2533 drivers/net/ethernet/qlogic/qla3xxx.c 	if ((qdev->req_q_virt_addr == NULL) ||
qdev             2534 drivers/net/ethernet/qlogic/qla3xxx.c 	    LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
qdev             2535 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "reqQ failed\n");
qdev             2539 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->rsp_q_virt_addr =
qdev             2540 drivers/net/ethernet/qlogic/qla3xxx.c 	    pci_alloc_consistent(qdev->pdev,
qdev             2541 drivers/net/ethernet/qlogic/qla3xxx.c 				 (size_t) qdev->rsp_q_size,
qdev             2542 drivers/net/ethernet/qlogic/qla3xxx.c 				 &qdev->rsp_q_phy_addr);
qdev             2544 drivers/net/ethernet/qlogic/qla3xxx.c 	if ((qdev->rsp_q_virt_addr == NULL) ||
qdev             2545 drivers/net/ethernet/qlogic/qla3xxx.c 	    LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
qdev             2546 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "rspQ allocation failed\n");
qdev             2547 drivers/net/ethernet/qlogic/qla3xxx.c 		pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
qdev             2548 drivers/net/ethernet/qlogic/qla3xxx.c 				    qdev->req_q_virt_addr,
qdev             2549 drivers/net/ethernet/qlogic/qla3xxx.c 				    qdev->req_q_phy_addr);
qdev             2553 drivers/net/ethernet/qlogic/qla3xxx.c 	set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
qdev             2558 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
qdev             2560 drivers/net/ethernet/qlogic/qla3xxx.c 	if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
qdev             2561 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_info(qdev->ndev, "Already done\n");
qdev             2565 drivers/net/ethernet/qlogic/qla3xxx.c 	pci_free_consistent(qdev->pdev,
qdev             2566 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->req_q_size,
qdev             2567 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->req_q_virt_addr, qdev->req_q_phy_addr);
qdev             2569 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->req_q_virt_addr = NULL;
qdev             2571 drivers/net/ethernet/qlogic/qla3xxx.c 	pci_free_consistent(qdev->pdev,
qdev             2572 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->rsp_q_size,
qdev             2573 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
qdev             2575 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->rsp_q_virt_addr = NULL;
qdev             2577 drivers/net/ethernet/qlogic/qla3xxx.c 	clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
qdev             2580 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
qdev             2583 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_q_size =
qdev             2584 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
qdev             2585 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->lrg_buf_q_size < PAGE_SIZE)
qdev             2586 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
qdev             2588 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
qdev             2590 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
qdev             2593 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->lrg_buf == NULL)
qdev             2596 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_q_alloc_virt_addr =
qdev             2597 drivers/net/ethernet/qlogic/qla3xxx.c 		pci_alloc_consistent(qdev->pdev,
qdev             2598 drivers/net/ethernet/qlogic/qla3xxx.c 				     qdev->lrg_buf_q_alloc_size,
qdev             2599 drivers/net/ethernet/qlogic/qla3xxx.c 				     &qdev->lrg_buf_q_alloc_phy_addr);
qdev             2601 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
qdev             2602 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "lBufQ failed\n");
qdev             2605 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
qdev             2606 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
qdev             2609 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_q_size =
qdev             2611 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->small_buf_q_size < PAGE_SIZE)
qdev             2612 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->small_buf_q_alloc_size = PAGE_SIZE;
qdev             2614 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
qdev             2616 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_q_alloc_virt_addr =
qdev             2617 drivers/net/ethernet/qlogic/qla3xxx.c 		pci_alloc_consistent(qdev->pdev,
qdev             2618 drivers/net/ethernet/qlogic/qla3xxx.c 				     qdev->small_buf_q_alloc_size,
qdev             2619 drivers/net/ethernet/qlogic/qla3xxx.c 				     &qdev->small_buf_q_alloc_phy_addr);
qdev             2621 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->small_buf_q_alloc_virt_addr == NULL) {
qdev             2622 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
qdev             2623 drivers/net/ethernet/qlogic/qla3xxx.c 		pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
qdev             2624 drivers/net/ethernet/qlogic/qla3xxx.c 				    qdev->lrg_buf_q_alloc_virt_addr,
qdev             2625 drivers/net/ethernet/qlogic/qla3xxx.c 				    qdev->lrg_buf_q_alloc_phy_addr);
qdev             2629 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
qdev             2630 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
qdev             2631 drivers/net/ethernet/qlogic/qla3xxx.c 	set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
qdev             2635 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_free_buffer_queues(struct ql3_adapter *qdev)
qdev             2637 drivers/net/ethernet/qlogic/qla3xxx.c 	if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
qdev             2638 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_info(qdev->ndev, "Already done\n");
qdev             2641 drivers/net/ethernet/qlogic/qla3xxx.c 	kfree(qdev->lrg_buf);
qdev             2642 drivers/net/ethernet/qlogic/qla3xxx.c 	pci_free_consistent(qdev->pdev,
qdev             2643 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->lrg_buf_q_alloc_size,
qdev             2644 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->lrg_buf_q_alloc_virt_addr,
qdev             2645 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->lrg_buf_q_alloc_phy_addr);
qdev             2647 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_q_virt_addr = NULL;
qdev             2649 drivers/net/ethernet/qlogic/qla3xxx.c 	pci_free_consistent(qdev->pdev,
qdev             2650 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->small_buf_q_alloc_size,
qdev             2651 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->small_buf_q_alloc_virt_addr,
qdev             2652 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->small_buf_q_alloc_phy_addr);
qdev             2654 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_q_virt_addr = NULL;
qdev             2656 drivers/net/ethernet/qlogic/qla3xxx.c 	clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
qdev             2659 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
qdev             2665 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_total_size =
qdev             2669 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_virt_addr =
qdev             2670 drivers/net/ethernet/qlogic/qla3xxx.c 		pci_alloc_consistent(qdev->pdev,
qdev             2671 drivers/net/ethernet/qlogic/qla3xxx.c 				     qdev->small_buf_total_size,
qdev             2672 drivers/net/ethernet/qlogic/qla3xxx.c 				     &qdev->small_buf_phy_addr);
qdev             2674 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->small_buf_virt_addr == NULL) {
qdev             2675 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
qdev             2679 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
qdev             2680 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
qdev             2682 drivers/net/ethernet/qlogic/qla3xxx.c 	small_buf_q_entry = qdev->small_buf_q_virt_addr;
qdev             2687 drivers/net/ethernet/qlogic/qla3xxx.c 		    cpu_to_le32(qdev->small_buf_phy_addr_high);
qdev             2689 drivers/net/ethernet/qlogic/qla3xxx.c 		    cpu_to_le32(qdev->small_buf_phy_addr_low +
qdev             2693 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_index = 0;
qdev             2694 drivers/net/ethernet/qlogic/qla3xxx.c 	set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
qdev             2698 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_free_small_buffers(struct ql3_adapter *qdev)
qdev             2700 drivers/net/ethernet/qlogic/qla3xxx.c 	if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
qdev             2701 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_info(qdev->ndev, "Already done\n");
qdev             2704 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->small_buf_virt_addr != NULL) {
qdev             2705 drivers/net/ethernet/qlogic/qla3xxx.c 		pci_free_consistent(qdev->pdev,
qdev             2706 drivers/net/ethernet/qlogic/qla3xxx.c 				    qdev->small_buf_total_size,
qdev             2707 drivers/net/ethernet/qlogic/qla3xxx.c 				    qdev->small_buf_virt_addr,
qdev             2708 drivers/net/ethernet/qlogic/qla3xxx.c 				    qdev->small_buf_phy_addr);
qdev             2710 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->small_buf_virt_addr = NULL;
qdev             2714 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_free_large_buffers(struct ql3_adapter *qdev)
qdev             2719 drivers/net/ethernet/qlogic/qla3xxx.c 	for (i = 0; i < qdev->num_large_buffers; i++) {
qdev             2720 drivers/net/ethernet/qlogic/qla3xxx.c 		lrg_buf_cb = &qdev->lrg_buf[i];
qdev             2723 drivers/net/ethernet/qlogic/qla3xxx.c 			pci_unmap_single(qdev->pdev,
qdev             2734 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_init_large_buffers(struct ql3_adapter *qdev)
qdev             2738 drivers/net/ethernet/qlogic/qla3xxx.c 	struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
qdev             2740 drivers/net/ethernet/qlogic/qla3xxx.c 	for (i = 0; i < qdev->num_large_buffers; i++) {
qdev             2741 drivers/net/ethernet/qlogic/qla3xxx.c 		lrg_buf_cb = &qdev->lrg_buf[i];
qdev             2746 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_index = 0;
qdev             2747 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_skb_check = 0;
qdev             2750 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
qdev             2758 drivers/net/ethernet/qlogic/qla3xxx.c 	for (i = 0; i < qdev->num_large_buffers; i++) {
qdev             2759 drivers/net/ethernet/qlogic/qla3xxx.c 		lrg_buf_cb = &qdev->lrg_buf[i];
qdev             2762 drivers/net/ethernet/qlogic/qla3xxx.c 		skb = netdev_alloc_skb(qdev->ndev,
qdev             2763 drivers/net/ethernet/qlogic/qla3xxx.c 				       qdev->lrg_buffer_len);
qdev             2766 drivers/net/ethernet/qlogic/qla3xxx.c 			netdev_err(qdev->ndev,
qdev             2768 drivers/net/ethernet/qlogic/qla3xxx.c 				   qdev->lrg_buffer_len * 2, i);
qdev             2769 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_free_large_buffers(qdev);
qdev             2778 drivers/net/ethernet/qlogic/qla3xxx.c 			map = pci_map_single(qdev->pdev,
qdev             2780 drivers/net/ethernet/qlogic/qla3xxx.c 					     qdev->lrg_buffer_len -
qdev             2784 drivers/net/ethernet/qlogic/qla3xxx.c 			err = pci_dma_mapping_error(qdev->pdev, map);
qdev             2786 drivers/net/ethernet/qlogic/qla3xxx.c 				netdev_err(qdev->ndev,
qdev             2790 drivers/net/ethernet/qlogic/qla3xxx.c 				ql_free_large_buffers(qdev);
qdev             2797 drivers/net/ethernet/qlogic/qla3xxx.c 					  qdev->lrg_buffer_len -
qdev             2808 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_free_send_free_list(struct ql3_adapter *qdev)
qdev             2813 drivers/net/ethernet/qlogic/qla3xxx.c 	tx_cb = &qdev->tx_buf[0];
qdev             2821 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_create_send_free_list(struct ql3_adapter *qdev)
qdev             2825 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
qdev             2830 drivers/net/ethernet/qlogic/qla3xxx.c 		tx_cb = &qdev->tx_buf[i];
qdev             2841 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
qdev             2843 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
qdev             2844 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
qdev             2845 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
qdev             2846 drivers/net/ethernet/qlogic/qla3xxx.c 	} else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
qdev             2850 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
qdev             2851 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
qdev             2853 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "Invalid mtu size: %d.  Only %d and %d are accepted.\n",
qdev             2854 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
qdev             2857 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->num_large_buffers =
qdev             2858 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
qdev             2859 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
qdev             2860 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->max_frame_size =
qdev             2861 drivers/net/ethernet/qlogic/qla3xxx.c 		(qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
qdev             2868 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->shadow_reg_virt_addr =
qdev             2869 drivers/net/ethernet/qlogic/qla3xxx.c 		pci_alloc_consistent(qdev->pdev,
qdev             2870 drivers/net/ethernet/qlogic/qla3xxx.c 				     PAGE_SIZE, &qdev->shadow_reg_phy_addr);
qdev             2872 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->shadow_reg_virt_addr != NULL) {
qdev             2873 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
qdev             2874 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->req_consumer_index_phy_addr_high =
qdev             2875 drivers/net/ethernet/qlogic/qla3xxx.c 			MS_64BITS(qdev->shadow_reg_phy_addr);
qdev             2876 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->req_consumer_index_phy_addr_low =
qdev             2877 drivers/net/ethernet/qlogic/qla3xxx.c 			LS_64BITS(qdev->shadow_reg_phy_addr);
qdev             2879 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->prsp_producer_index =
qdev             2880 drivers/net/ethernet/qlogic/qla3xxx.c 			(__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
qdev             2881 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->rsp_producer_index_phy_addr_high =
qdev             2882 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->req_consumer_index_phy_addr_high;
qdev             2883 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->rsp_producer_index_phy_addr_low =
qdev             2884 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->req_consumer_index_phy_addr_low + 8;
qdev             2886 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
qdev             2890 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
qdev             2891 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
qdev             2895 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_alloc_buffer_queues(qdev) != 0) {
qdev             2896 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
qdev             2900 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_alloc_small_buffers(qdev) != 0) {
qdev             2901 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
qdev             2905 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_alloc_large_buffers(qdev) != 0) {
qdev             2906 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
qdev             2911 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_init_large_buffers(qdev);
qdev             2912 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_create_send_free_list(qdev))
qdev             2915 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->rsp_current = qdev->rsp_q_virt_addr;
qdev             2919 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_free_send_free_list(qdev);
qdev             2921 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_free_buffer_queues(qdev);
qdev             2923 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_free_net_req_rsp_queues(qdev);
qdev             2925 drivers/net/ethernet/qlogic/qla3xxx.c 	pci_free_consistent(qdev->pdev,
qdev             2927 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->shadow_reg_virt_addr,
qdev             2928 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->shadow_reg_phy_addr);
qdev             2933 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_free_mem_resources(struct ql3_adapter *qdev)
qdev             2935 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_free_send_free_list(qdev);
qdev             2936 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_free_large_buffers(qdev);
qdev             2937 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_free_small_buffers(qdev);
qdev             2938 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_free_buffer_queues(qdev);
qdev             2939 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_free_net_req_rsp_queues(qdev);
qdev             2940 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->shadow_reg_virt_addr != NULL) {
qdev             2941 drivers/net/ethernet/qlogic/qla3xxx.c 		pci_free_consistent(qdev->pdev,
qdev             2943 drivers/net/ethernet/qlogic/qla3xxx.c 				    qdev->shadow_reg_virt_addr,
qdev             2944 drivers/net/ethernet/qlogic/qla3xxx.c 				    qdev->shadow_reg_phy_addr);
qdev             2945 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->shadow_reg_virt_addr = NULL;
qdev             2949 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_init_misc_registers(struct ql3_adapter *qdev)
qdev             2952 drivers/net/ethernet/qlogic/qla3xxx.c 	    (void __iomem *)qdev->mem_map_registers;
qdev             2954 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
qdev             2955 drivers/net/ethernet/qlogic/qla3xxx.c 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
qdev             2959 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             2960 drivers/net/ethernet/qlogic/qla3xxx.c 			   &local_ram->bufletSize, qdev->nvram_data.bufletSize);
qdev             2962 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             2964 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->nvram_data.bufletCount);
qdev             2966 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             2968 drivers/net/ethernet/qlogic/qla3xxx.c 			   (qdev->nvram_data.tcpWindowThreshold25 << 16) |
qdev             2969 drivers/net/ethernet/qlogic/qla3xxx.c 			   (qdev->nvram_data.tcpWindowThreshold0));
qdev             2971 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             2973 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->nvram_data.tcpWindowThreshold50);
qdev             2975 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             2977 drivers/net/ethernet/qlogic/qla3xxx.c 			   (qdev->nvram_data.ipHashTableBaseHi << 16) |
qdev             2978 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->nvram_data.ipHashTableBaseLo);
qdev             2979 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             2981 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->nvram_data.ipHashTableSize);
qdev             2982 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             2984 drivers/net/ethernet/qlogic/qla3xxx.c 			   (qdev->nvram_data.tcpHashTableBaseHi << 16) |
qdev             2985 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->nvram_data.tcpHashTableBaseLo);
qdev             2986 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             2988 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->nvram_data.tcpHashTableSize);
qdev             2989 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             2991 drivers/net/ethernet/qlogic/qla3xxx.c 			   (qdev->nvram_data.ncbTableBaseHi << 16) |
qdev             2992 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->nvram_data.ncbTableBaseLo);
qdev             2993 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             2995 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->nvram_data.ncbTableSize);
qdev             2996 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             2998 drivers/net/ethernet/qlogic/qla3xxx.c 			   (qdev->nvram_data.drbTableBaseHi << 16) |
qdev             2999 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->nvram_data.drbTableBaseLo);
qdev             3000 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page2_reg(qdev,
qdev             3002 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->nvram_data.drbTableSize);
qdev             3003 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
qdev             3007 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_adapter_initialize(struct ql3_adapter *qdev)
qdev             3011 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev             3018 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_mii_setup(qdev))
qdev             3022 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_common_reg(qdev, spir,
qdev             3027 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->port_link_state = LS_DOWN;
qdev             3028 drivers/net/ethernet/qlogic/qla3xxx.c 	netif_carrier_off(qdev->ndev);
qdev             3031 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_common_reg(qdev, spir,
qdev             3036 drivers/net/ethernet/qlogic/qla3xxx.c 	*((u32 *)(qdev->preq_consumer_index)) = 0;
qdev             3037 drivers/net/ethernet/qlogic/qla3xxx.c 	atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
qdev             3038 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->req_producer_index = 0;
qdev             3040 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3042 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->req_consumer_index_phy_addr_high);
qdev             3043 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3045 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->req_consumer_index_phy_addr_low);
qdev             3047 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3049 drivers/net/ethernet/qlogic/qla3xxx.c 			   MS_64BITS(qdev->req_q_phy_addr));
qdev             3050 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3052 drivers/net/ethernet/qlogic/qla3xxx.c 			   LS_64BITS(qdev->req_q_phy_addr));
qdev             3053 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
qdev             3056 drivers/net/ethernet/qlogic/qla3xxx.c 	*((__le16 *) (qdev->prsp_producer_index)) = 0;
qdev             3057 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->rsp_consumer_index = 0;
qdev             3058 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->rsp_current = qdev->rsp_q_virt_addr;
qdev             3060 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3062 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->rsp_producer_index_phy_addr_high);
qdev             3064 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3066 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->rsp_producer_index_phy_addr_low);
qdev             3068 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3070 drivers/net/ethernet/qlogic/qla3xxx.c 			   MS_64BITS(qdev->rsp_q_phy_addr));
qdev             3072 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3074 drivers/net/ethernet/qlogic/qla3xxx.c 			   LS_64BITS(qdev->rsp_q_phy_addr));
qdev             3076 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
qdev             3079 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3081 drivers/net/ethernet/qlogic/qla3xxx.c 			   MS_64BITS(qdev->lrg_buf_q_phy_addr));
qdev             3083 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3085 drivers/net/ethernet/qlogic/qla3xxx.c 			   LS_64BITS(qdev->lrg_buf_q_phy_addr));
qdev             3087 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3089 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->num_lbufq_entries);
qdev             3091 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3093 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->lrg_buffer_len);
qdev             3096 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3098 drivers/net/ethernet/qlogic/qla3xxx.c 			   MS_64BITS(qdev->small_buf_q_phy_addr));
qdev             3100 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3102 drivers/net/ethernet/qlogic/qla3xxx.c 			   LS_64BITS(qdev->small_buf_q_phy_addr));
qdev             3104 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
qdev             3105 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page1_reg(qdev,
qdev             3109 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
qdev             3110 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_release_cnt = 8;
qdev             3111 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
qdev             3112 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_release_cnt = 8;
qdev             3113 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
qdev             3114 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->small_buf_index = 0;
qdev             3115 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_index = 0;
qdev             3116 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_free_count = 0;
qdev             3117 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_free_head = NULL;
qdev             3118 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->lrg_buf_free_tail = NULL;
qdev             3120 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_common_reg(qdev,
qdev             3123 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->small_buf_q_producer_index);
qdev             3124 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_common_reg(qdev,
qdev             3127 drivers/net/ethernet/qlogic/qla3xxx.c 			    qdev->lrg_buf_q_producer_index);
qdev             3133 drivers/net/ethernet/qlogic/qla3xxx.c 	clear_bit(QL_LINK_MASTER, &qdev->flags);
qdev             3134 drivers/net/ethernet/qlogic/qla3xxx.c 	value = ql_read_page0_reg(qdev, &port_regs->portStatus);
qdev             3138 drivers/net/ethernet/qlogic/qla3xxx.c 		if (ql_init_misc_registers(qdev)) {
qdev             3143 drivers/net/ethernet/qlogic/qla3xxx.c 		value = qdev->nvram_data.tcpMaxWindowSize;
qdev             3144 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
qdev             3146 drivers/net/ethernet/qlogic/qla3xxx.c 		value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
qdev             3148 drivers/net/ethernet/qlogic/qla3xxx.c 		if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
qdev             3149 drivers/net/ethernet/qlogic/qla3xxx.c 				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
qdev             3154 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
qdev             3155 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
qdev             3159 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
qdev             3162 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->mac_index)
qdev             3163 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev,
qdev             3165 drivers/net/ethernet/qlogic/qla3xxx.c 				   qdev->max_frame_size);
qdev             3167 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev,
qdev             3169 drivers/net/ethernet/qlogic/qla3xxx.c 					   qdev->max_frame_size);
qdev             3171 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
qdev             3172 drivers/net/ethernet/qlogic/qla3xxx.c 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
qdev             3178 drivers/net/ethernet/qlogic/qla3xxx.c 	PHY_Setup(qdev);
qdev             3179 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_init_scan_mode(qdev);
qdev             3180 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_get_phy_owner(qdev);
qdev             3185 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
qdev             3187 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
qdev             3188 drivers/net/ethernet/qlogic/qla3xxx.c 			   ((qdev->ndev->dev_addr[2] << 24)
qdev             3189 drivers/net/ethernet/qlogic/qla3xxx.c 			    | (qdev->ndev->dev_addr[3] << 16)
qdev             3190 drivers/net/ethernet/qlogic/qla3xxx.c 			    | (qdev->ndev->dev_addr[4] << 8)
qdev             3191 drivers/net/ethernet/qlogic/qla3xxx.c 			    | qdev->ndev->dev_addr[5]));
qdev             3194 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
qdev             3196 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
qdev             3197 drivers/net/ethernet/qlogic/qla3xxx.c 			   ((qdev->ndev->dev_addr[0] << 8)
qdev             3198 drivers/net/ethernet/qlogic/qla3xxx.c 			    | qdev->ndev->dev_addr[1]));
qdev             3201 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
qdev             3206 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
qdev             3208 drivers/net/ethernet/qlogic/qla3xxx.c 			    (qdev->mac_index << 2)));
qdev             3209 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
qdev             3211 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
qdev             3213 drivers/net/ethernet/qlogic/qla3xxx.c 			    ((qdev->mac_index << 2) + 1)));
qdev             3214 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
qdev             3216 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
qdev             3219 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev,
qdev             3224 drivers/net/ethernet/qlogic/qla3xxx.c 		value = ql_read_page0_reg(qdev, &port_regs->portStatus);
qdev             3227 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_unlock_irq(&qdev->hw_lock);
qdev             3229 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_lock_irq(&qdev->hw_lock);
qdev             3233 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev, "Hw Initialization timeout\n");
qdev             3239 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->device_id == QL3032_DEVICE_ID) {
qdev             3244 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->functionControl,
qdev             3250 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_page0_reg(qdev, &port_regs->portControl,
qdev             3262 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_adapter_reset(struct ql3_adapter *qdev)
qdev             3265 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev             3270 drivers/net/ethernet/qlogic/qla3xxx.c 	set_bit(QL_RESET_ACTIVE, &qdev->flags);
qdev             3271 drivers/net/ethernet/qlogic/qla3xxx.c 	clear_bit(QL_RESET_DONE, &qdev->flags);
qdev             3276 drivers/net/ethernet/qlogic/qla3xxx.c 	netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
qdev             3277 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_common_reg(qdev,
qdev             3282 drivers/net/ethernet/qlogic/qla3xxx.c 	netdev_printk(KERN_DEBUG, qdev->ndev,
qdev             3289 drivers/net/ethernet/qlogic/qla3xxx.c 		    ql_read_common_reg(qdev,
qdev             3302 drivers/net/ethernet/qlogic/qla3xxx.c 	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
qdev             3304 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_printk(KERN_DEBUG, qdev->ndev,
qdev             3306 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_common_reg(qdev,
qdev             3314 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_common_reg(qdev,
qdev             3325 drivers/net/ethernet/qlogic/qla3xxx.c 			value = ql_read_common_reg(qdev,
qdev             3336 drivers/net/ethernet/qlogic/qla3xxx.c 	clear_bit(QL_RESET_ACTIVE, &qdev->flags);
qdev             3337 drivers/net/ethernet/qlogic/qla3xxx.c 	set_bit(QL_RESET_DONE, &qdev->flags);
qdev             3341 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_set_mac_info(struct ql3_adapter *qdev)
qdev             3344 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev             3350 drivers/net/ethernet/qlogic/qla3xxx.c 	    ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
qdev             3352 drivers/net/ethernet/qlogic/qla3xxx.c 	port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
qdev             3355 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mac_index = 0;
qdev             3356 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
qdev             3357 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mb_bit_mask = FN0_MA_BITS_MASK;
qdev             3358 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->PHYAddr = PORT0_PHY_ADDRESS;
qdev             3360 drivers/net/ethernet/qlogic/qla3xxx.c 			set_bit(QL_LINK_OPTICAL, &qdev->flags);
qdev             3362 drivers/net/ethernet/qlogic/qla3xxx.c 			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
qdev             3366 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mac_index = 1;
qdev             3367 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
qdev             3368 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mb_bit_mask = FN1_MA_BITS_MASK;
qdev             3369 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->PHYAddr = PORT1_PHY_ADDRESS;
qdev             3371 drivers/net/ethernet/qlogic/qla3xxx.c 			set_bit(QL_LINK_OPTICAL, &qdev->flags);
qdev             3373 drivers/net/ethernet/qlogic/qla3xxx.c 			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
qdev             3379 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_printk(KERN_DEBUG, qdev->ndev,
qdev             3384 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
qdev             3389 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             3390 drivers/net/ethernet/qlogic/qla3xxx.c 	struct pci_dev *pdev = qdev->pdev;
qdev             3394 drivers/net/ethernet/qlogic/qla3xxx.c 		    DRV_NAME, qdev->index, qdev->chip_rev_id,
qdev             3395 drivers/net/ethernet/qlogic/qla3xxx.c 		    qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
qdev             3396 drivers/net/ethernet/qlogic/qla3xxx.c 		    qdev->pci_slot);
qdev             3398 drivers/net/ethernet/qlogic/qla3xxx.c 		test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
qdev             3404 drivers/net/ethernet/qlogic/qla3xxx.c 		    ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
qdev             3405 drivers/net/ethernet/qlogic/qla3xxx.c 		    ((qdev->pci_x) ? "PCI-X" : "PCI"));
qdev             3408 drivers/net/ethernet/qlogic/qla3xxx.c 		    qdev->mem_map_registers);
qdev             3411 drivers/net/ethernet/qlogic/qla3xxx.c 	netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
qdev             3414 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
qdev             3416 drivers/net/ethernet/qlogic/qla3xxx.c 	struct net_device *ndev = qdev->ndev;
qdev             3422 drivers/net/ethernet/qlogic/qla3xxx.c 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
qdev             3423 drivers/net/ethernet/qlogic/qla3xxx.c 	clear_bit(QL_LINK_MASTER, &qdev->flags);
qdev             3425 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_disable_interrupts(qdev);
qdev             3427 drivers/net/ethernet/qlogic/qla3xxx.c 	free_irq(qdev->pdev->irq, ndev);
qdev             3429 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
qdev             3430 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
qdev             3431 drivers/net/ethernet/qlogic/qla3xxx.c 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
qdev             3432 drivers/net/ethernet/qlogic/qla3xxx.c 		pci_disable_msi(qdev->pdev);
qdev             3435 drivers/net/ethernet/qlogic/qla3xxx.c 	del_timer_sync(&qdev->adapter_timer);
qdev             3437 drivers/net/ethernet/qlogic/qla3xxx.c 	napi_disable(&qdev->napi);
qdev             3443 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev             3444 drivers/net/ethernet/qlogic/qla3xxx.c 		if (ql_wait_for_drvr_lock(qdev)) {
qdev             3445 drivers/net/ethernet/qlogic/qla3xxx.c 			soft_reset = ql_adapter_reset(qdev);
qdev             3448 drivers/net/ethernet/qlogic/qla3xxx.c 					   qdev->index);
qdev             3457 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             3459 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_free_mem_resources(qdev);
qdev             3463 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_adapter_up(struct ql3_adapter *qdev)
qdev             3465 drivers/net/ethernet/qlogic/qla3xxx.c 	struct net_device *ndev = qdev->ndev;
qdev             3470 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_alloc_mem_resources(qdev)) {
qdev             3475 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->msi) {
qdev             3476 drivers/net/ethernet/qlogic/qla3xxx.c 		if (pci_enable_msi(qdev->pdev)) {
qdev             3479 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->msi = 0;
qdev             3482 drivers/net/ethernet/qlogic/qla3xxx.c 			set_bit(QL_MSI_ENABLED, &qdev->flags);
qdev             3487 drivers/net/ethernet/qlogic/qla3xxx.c 	err = request_irq(qdev->pdev->irq, ql3xxx_isr,
qdev             3492 drivers/net/ethernet/qlogic/qla3xxx.c 			   qdev->pdev->irq);
qdev             3496 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev             3498 drivers/net/ethernet/qlogic/qla3xxx.c 	err = ql_wait_for_drvr_lock(qdev);
qdev             3500 drivers/net/ethernet/qlogic/qla3xxx.c 		err = ql_adapter_initialize(qdev);
qdev             3506 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
qdev             3512 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             3514 drivers/net/ethernet/qlogic/qla3xxx.c 	set_bit(QL_ADAPTER_UP, &qdev->flags);
qdev             3516 drivers/net/ethernet/qlogic/qla3xxx.c 	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
qdev             3518 drivers/net/ethernet/qlogic/qla3xxx.c 	napi_enable(&qdev->napi);
qdev             3519 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_enable_interrupts(qdev);
qdev             3523 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
qdev             3525 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             3526 drivers/net/ethernet/qlogic/qla3xxx.c 	free_irq(qdev->pdev->irq, ndev);
qdev             3528 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
qdev             3530 drivers/net/ethernet/qlogic/qla3xxx.c 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
qdev             3531 drivers/net/ethernet/qlogic/qla3xxx.c 		pci_disable_msi(qdev->pdev);
qdev             3536 drivers/net/ethernet/qlogic/qla3xxx.c static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
qdev             3538 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
qdev             3539 drivers/net/ethernet/qlogic/qla3xxx.c 		netdev_err(qdev->ndev,
qdev             3542 drivers/net/ethernet/qlogic/qla3xxx.c 		dev_close(qdev->ndev);
qdev             3551 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             3557 drivers/net/ethernet/qlogic/qla3xxx.c 	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
qdev             3560 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_adapter_down(qdev, QL_DO_RESET);
qdev             3566 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             3567 drivers/net/ethernet/qlogic/qla3xxx.c 	return ql_adapter_up(qdev);
qdev             3572 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             3574 drivers/net/ethernet/qlogic/qla3xxx.c 			qdev->mem_map_registers;
qdev             3586 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev             3588 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
qdev             3590 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
qdev             3596 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
qdev             3598 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
qdev             3600 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             3607 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             3618 drivers/net/ethernet/qlogic/qla3xxx.c 	queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
qdev             3623 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev =
qdev             3625 drivers/net/ethernet/qlogic/qla3xxx.c 	struct net_device *ndev = qdev->ndev;
qdev             3630 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev             3633 drivers/net/ethernet/qlogic/qla3xxx.c 	if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
qdev             3634 drivers/net/ethernet/qlogic/qla3xxx.c 		clear_bit(QL_LINK_MASTER, &qdev->flags);
qdev             3641 drivers/net/ethernet/qlogic/qla3xxx.c 			tx_cb = &qdev->tx_buf[i];
qdev             3645 drivers/net/ethernet/qlogic/qla3xxx.c 				pci_unmap_single(qdev->pdev,
qdev             3651 drivers/net/ethernet/qlogic/qla3xxx.c 					pci_unmap_page(qdev->pdev,
qdev             3664 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev             3665 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_write_common_reg(qdev,
qdev             3674 drivers/net/ethernet/qlogic/qla3xxx.c 			value = ql_read_common_reg(qdev,
qdev             3687 drivers/net/ethernet/qlogic/qla3xxx.c 				ql_write_common_reg(qdev,
qdev             3695 drivers/net/ethernet/qlogic/qla3xxx.c 			spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             3697 drivers/net/ethernet/qlogic/qla3xxx.c 			spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev             3699 drivers/net/ethernet/qlogic/qla3xxx.c 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev             3710 drivers/net/ethernet/qlogic/qla3xxx.c 			clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
qdev             3711 drivers/net/ethernet/qlogic/qla3xxx.c 			clear_bit(QL_RESET_START, &qdev->flags);
qdev             3712 drivers/net/ethernet/qlogic/qla3xxx.c 			ql_cycle_adapter(qdev, QL_DO_RESET);
qdev             3716 drivers/net/ethernet/qlogic/qla3xxx.c 		clear_bit(QL_RESET_ACTIVE, &qdev->flags);
qdev             3717 drivers/net/ethernet/qlogic/qla3xxx.c 		clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
qdev             3718 drivers/net/ethernet/qlogic/qla3xxx.c 		clear_bit(QL_RESET_START, &qdev->flags);
qdev             3719 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_cycle_adapter(qdev, QL_NO_RESET);
qdev             3725 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev =
qdev             3728 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_cycle_adapter(qdev, QL_DO_RESET);
qdev             3731 drivers/net/ethernet/qlogic/qla3xxx.c static void ql_get_board_info(struct ql3_adapter *qdev)
qdev             3734 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->mem_map_registers;
qdev             3737 drivers/net/ethernet/qlogic/qla3xxx.c 	value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
qdev             3739 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
qdev             3741 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->pci_width = 64;
qdev             3743 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->pci_width = 32;
qdev             3745 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->pci_x = 1;
qdev             3747 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->pci_x = 0;
qdev             3748 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
qdev             3753 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
qdev             3754 drivers/net/ethernet/qlogic/qla3xxx.c 	queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
qdev             3770 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = NULL;
qdev             3811 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev = netdev_priv(ndev);
qdev             3812 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->index = cards_found;
qdev             3813 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->ndev = ndev;
qdev             3814 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->pdev = pdev;
qdev             3815 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->device_id = pci_entry->device;
qdev             3816 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->port_link_state = LS_DOWN;
qdev             3818 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->msi = 1;
qdev             3820 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->msg_enable = netif_msg_init(debug, default_msg);
qdev             3824 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->device_id == QL3032_DEVICE_ID)
qdev             3827 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
qdev             3828 drivers/net/ethernet/qlogic/qla3xxx.c 	if (!qdev->mem_map_registers) {
qdev             3834 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_init(&qdev->adapter_lock);
qdev             3835 drivers/net/ethernet/qlogic/qla3xxx.c 	spin_lock_init(&qdev->hw_lock);
qdev             3842 drivers/net/ethernet/qlogic/qla3xxx.c 	netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
qdev             3847 drivers/net/ethernet/qlogic/qla3xxx.c 	if (ql_get_nvram_params(qdev)) {
qdev             3849 drivers/net/ethernet/qlogic/qla3xxx.c 			 __func__, qdev->index);
qdev             3854 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_set_mac_info(qdev);
qdev             3857 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->mac_index) {
qdev             3858 drivers/net/ethernet/qlogic/qla3xxx.c 		ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
qdev             3859 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
qdev             3861 drivers/net/ethernet/qlogic/qla3xxx.c 		ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
qdev             3862 drivers/net/ethernet/qlogic/qla3xxx.c 		ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
qdev             3868 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_get_board_info(qdev);
qdev             3874 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->pci_x)
qdev             3888 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->workqueue = create_singlethread_workqueue(ndev->name);
qdev             3889 drivers/net/ethernet/qlogic/qla3xxx.c 	if (!qdev->workqueue) {
qdev             3895 drivers/net/ethernet/qlogic/qla3xxx.c 	INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
qdev             3896 drivers/net/ethernet/qlogic/qla3xxx.c 	INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
qdev             3897 drivers/net/ethernet/qlogic/qla3xxx.c 	INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
qdev             3899 drivers/net/ethernet/qlogic/qla3xxx.c 	timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
qdev             3900 drivers/net/ethernet/qlogic/qla3xxx.c 	qdev->adapter_timer.expires = jiffies + HZ * 2;	/* two second delay */
qdev             3913 drivers/net/ethernet/qlogic/qla3xxx.c 	iounmap(qdev->mem_map_registers);
qdev             3927 drivers/net/ethernet/qlogic/qla3xxx.c 	struct ql3_adapter *qdev = netdev_priv(ndev);
qdev             3931 drivers/net/ethernet/qlogic/qla3xxx.c 	ql_disable_interrupts(qdev);
qdev             3933 drivers/net/ethernet/qlogic/qla3xxx.c 	if (qdev->workqueue) {
qdev             3934 drivers/net/ethernet/qlogic/qla3xxx.c 		cancel_delayed_work(&qdev->reset_work);
qdev             3935 drivers/net/ethernet/qlogic/qla3xxx.c 		cancel_delayed_work(&qdev->tx_timeout_work);
qdev             3936 drivers/net/ethernet/qlogic/qla3xxx.c 		destroy_workqueue(qdev->workqueue);
qdev             3937 drivers/net/ethernet/qlogic/qla3xxx.c 		qdev->workqueue = NULL;
qdev             3940 drivers/net/ethernet/qlogic/qla3xxx.c 	iounmap(qdev->mem_map_registers);
qdev              322 drivers/s390/cio/qdio.h 	struct qdio_irq *qdev = (__q)->irq_ptr;				\
qdev              323 drivers/s390/cio/qdio.h 	if (qdev->perf_stat_enabled)					\
qdev              324 drivers/s390/cio/qdio.h 		(qdev->perf_stat.__attr)++;				\
qdev             1380 drivers/staging/qlge/qlge.h #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
qdev             1403 drivers/staging/qlge/qlge.h 	struct ql_adapter *qdev;
qdev             1472 drivers/staging/qlge/qlge.h 	struct ql_adapter *qdev;
qdev             1977 drivers/staging/qlge/qlge.h 	struct ql_adapter *qdev;
qdev             2158 drivers/staging/qlge/qlge.h static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
qdev             2160 drivers/staging/qlge/qlge.h 	return readl(qdev->reg_base + reg);
qdev             2166 drivers/staging/qlge/qlge.h static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
qdev             2168 drivers/staging/qlge/qlge.h 	writel(val, qdev->reg_base + reg);
qdev             2224 drivers/staging/qlge/qlge.h int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
qdev             2225 drivers/staging/qlge/qlge.h void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
qdev             2226 drivers/staging/qlge/qlge.h int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
qdev             2227 drivers/staging/qlge/qlge.h int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
qdev             2229 drivers/staging/qlge/qlge.h int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
qdev             2230 drivers/staging/qlge/qlge.h int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
qdev             2232 drivers/staging/qlge/qlge.h void ql_queue_fw_error(struct ql_adapter *qdev);
qdev             2236 drivers/staging/qlge/qlge.h int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
qdev             2237 drivers/staging/qlge/qlge.h void ql_queue_asic_error(struct ql_adapter *qdev);
qdev             2238 drivers/staging/qlge/qlge.h u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
qdev             2240 drivers/staging/qlge/qlge.h int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
qdev             2243 drivers/staging/qlge/qlge.h int ql_mb_get_fw_state(struct ql_adapter *qdev);
qdev             2244 drivers/staging/qlge/qlge.h int ql_cam_route_initialize(struct ql_adapter *qdev);
qdev             2245 drivers/staging/qlge/qlge.h int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
qdev             2246 drivers/staging/qlge/qlge.h int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
qdev             2247 drivers/staging/qlge/qlge.h int ql_unpause_mpi_risc(struct ql_adapter *qdev);
qdev             2248 drivers/staging/qlge/qlge.h int ql_pause_mpi_risc(struct ql_adapter *qdev);
qdev             2249 drivers/staging/qlge/qlge.h int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
qdev             2250 drivers/staging/qlge/qlge.h int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
qdev             2251 drivers/staging/qlge/qlge.h int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
qdev             2253 drivers/staging/qlge/qlge.h int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
qdev             2254 drivers/staging/qlge/qlge.h int ql_mb_about_fw(struct ql_adapter *qdev);
qdev             2255 drivers/staging/qlge/qlge.h int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
qdev             2256 drivers/staging/qlge/qlge.h int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
qdev             2257 drivers/staging/qlge/qlge.h int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
qdev             2258 drivers/staging/qlge/qlge.h int ql_mb_get_led_cfg(struct ql_adapter *qdev);
qdev             2259 drivers/staging/qlge/qlge.h void ql_link_on(struct ql_adapter *qdev);
qdev             2260 drivers/staging/qlge/qlge.h void ql_link_off(struct ql_adapter *qdev);
qdev             2261 drivers/staging/qlge/qlge.h int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
qdev             2262 drivers/staging/qlge/qlge.h int ql_mb_get_port_cfg(struct ql_adapter *qdev);
qdev             2263 drivers/staging/qlge/qlge.h int ql_mb_set_port_cfg(struct ql_adapter *qdev);
qdev             2264 drivers/staging/qlge/qlge.h int ql_wait_fifo_empty(struct ql_adapter *qdev);
qdev             2265 drivers/staging/qlge/qlge.h void ql_get_dump(struct ql_adapter *qdev, void *buff);
qdev             2268 drivers/staging/qlge/qlge.h int ql_own_firmware(struct ql_adapter *qdev);
qdev             2279 drivers/staging/qlge/qlge.h void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
qdev             2280 drivers/staging/qlge/qlge.h void ql_dump_routing_entries(struct ql_adapter *qdev);
qdev             2281 drivers/staging/qlge/qlge.h void ql_dump_regs(struct ql_adapter *qdev);
qdev             2282 drivers/staging/qlge/qlge.h #define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
qdev             2283 drivers/staging/qlge/qlge.h #define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
qdev             2284 drivers/staging/qlge/qlge.h #define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
qdev             2286 drivers/staging/qlge/qlge.h #define QL_DUMP_REGS(qdev)
qdev             2287 drivers/staging/qlge/qlge.h #define QL_DUMP_ROUTE(qdev)
qdev             2288 drivers/staging/qlge/qlge.h #define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
qdev             2292 drivers/staging/qlge/qlge.h void ql_dump_stat(struct ql_adapter *qdev);
qdev             2293 drivers/staging/qlge/qlge.h #define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
qdev             2295 drivers/staging/qlge/qlge.h #define QL_DUMP_STAT(qdev)
qdev             2299 drivers/staging/qlge/qlge.h void ql_dump_qdev(struct ql_adapter *qdev);
qdev             2300 drivers/staging/qlge/qlge.h #define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
qdev             2302 drivers/staging/qlge/qlge.h #define QL_DUMP_QDEV(qdev)
qdev             2311 drivers/staging/qlge/qlge.h void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
qdev             2317 drivers/staging/qlge/qlge.h #define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
qdev             2318 drivers/staging/qlge/qlge.h 		ql_dump_hw_cb(qdev, size, bit, q_id)
qdev             2325 drivers/staging/qlge/qlge.h #define QL_DUMP_HW_CB(qdev, size, bit, q_id)
qdev             2347 drivers/staging/qlge/qlge.h void ql_dump_all(struct ql_adapter *qdev);
qdev             2348 drivers/staging/qlge/qlge.h #define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
qdev             2350 drivers/staging/qlge/qlge.h #define QL_DUMP_ALL(qdev)
qdev                9 drivers/staging/qlge/qlge_dbg.c static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
qdev               18 drivers/staging/qlge/qlge_dbg.c 				| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
qdev               20 drivers/staging/qlge/qlge_dbg.c 	status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
qdev               28 drivers/staging/qlge/qlge_dbg.c static int ql_write_other_func_reg(struct ql_adapter *qdev,
qdev               36 drivers/staging/qlge/qlge_dbg.c 				| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
qdev               38 drivers/staging/qlge/qlge_dbg.c 	status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
qdev               43 drivers/staging/qlge/qlge_dbg.c static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
qdev               50 drivers/staging/qlge/qlge_dbg.c 		temp = ql_read_other_func_reg(qdev, reg);
qdev               63 drivers/staging/qlge/qlge_dbg.c static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
qdev               69 drivers/staging/qlge/qlge_dbg.c 	status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
qdev               75 drivers/staging/qlge/qlge_dbg.c 	ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
qdev               78 drivers/staging/qlge/qlge_dbg.c 	status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
qdev               84 drivers/staging/qlge/qlge_dbg.c 	*data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
qdev               90 drivers/staging/qlge/qlge_dbg.c static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
qdev               95 drivers/staging/qlge/qlge_dbg.c 	status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
qdev              100 drivers/staging/qlge/qlge_dbg.c 	ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
qdev              103 drivers/staging/qlge/qlge_dbg.c 	status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
qdev              108 drivers/staging/qlge/qlge_dbg.c 	*data = ql_read32(qdev, XG_SERDES_DATA);
qdev              113 drivers/staging/qlge/qlge_dbg.c static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
qdev              121 drivers/staging/qlge/qlge_dbg.c 		status = ql_read_serdes_reg(qdev, addr, direct_ptr);
qdev              129 drivers/staging/qlge/qlge_dbg.c 						qdev, addr, indirect_ptr);
qdev              135 drivers/staging/qlge/qlge_dbg.c static int ql_get_serdes_regs(struct ql_adapter *qdev,
qdev              148 drivers/staging/qlge/qlge_dbg.c 	status = ql_read_other_func_serdes_reg(qdev,
qdev              157 drivers/staging/qlge/qlge_dbg.c 	status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
qdev              170 drivers/staging/qlge/qlge_dbg.c 	status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
qdev              177 drivers/staging/qlge/qlge_dbg.c 		if (qdev->func & 1)
qdev              186 drivers/staging/qlge/qlge_dbg.c 		if (qdev->func & 1)
qdev              194 drivers/staging/qlge/qlge_dbg.c 	if (qdev->func & 1) {
qdev              205 drivers/staging/qlge/qlge_dbg.c 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
qdev              209 drivers/staging/qlge/qlge_dbg.c 	if (qdev->func & 1) {
qdev              222 drivers/staging/qlge/qlge_dbg.c 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
qdev              226 drivers/staging/qlge/qlge_dbg.c 	if (qdev->func & 1) {
qdev              235 drivers/staging/qlge/qlge_dbg.c 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
qdev              239 drivers/staging/qlge/qlge_dbg.c 	if (qdev->func & 1) {
qdev              250 drivers/staging/qlge/qlge_dbg.c 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
qdev              254 drivers/staging/qlge/qlge_dbg.c 	if (qdev->func & 1) {
qdev              267 drivers/staging/qlge/qlge_dbg.c 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
qdev              271 drivers/staging/qlge/qlge_dbg.c 	if (qdev->func & 1) {
qdev              282 drivers/staging/qlge/qlge_dbg.c 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
qdev              286 drivers/staging/qlge/qlge_dbg.c 	if (qdev->func & 1) {
qdev              298 drivers/staging/qlge/qlge_dbg.c 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
qdev              303 drivers/staging/qlge/qlge_dbg.c 	if (qdev->func & 1) {
qdev              315 drivers/staging/qlge/qlge_dbg.c 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
qdev              320 drivers/staging/qlge/qlge_dbg.c static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
qdev              326 drivers/staging/qlge/qlge_dbg.c 	status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
qdev              332 drivers/staging/qlge/qlge_dbg.c 	ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
qdev              335 drivers/staging/qlge/qlge_dbg.c 	status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
qdev              341 drivers/staging/qlge/qlge_dbg.c 	*data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
qdev              349 drivers/staging/qlge/qlge_dbg.c static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
qdev              378 drivers/staging/qlge/qlge_dbg.c 				ql_read_other_func_xgmac_reg(qdev, i, buf);
qdev              380 drivers/staging/qlge/qlge_dbg.c 				status = ql_read_xgmac_reg(qdev, i, buf);
qdev              390 drivers/staging/qlge/qlge_dbg.c static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
qdev              396 drivers/staging/qlge/qlge_dbg.c 		ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
qdev              397 drivers/staging/qlge/qlge_dbg.c 		*buf = ql_read32(qdev, NIC_ETS);
qdev              401 drivers/staging/qlge/qlge_dbg.c 		ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
qdev              402 drivers/staging/qlge/qlge_dbg.c 		*buf = ql_read32(qdev, CNA_ETS);
qdev              408 drivers/staging/qlge/qlge_dbg.c static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
qdev              412 drivers/staging/qlge/qlge_dbg.c 	for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
qdev              413 drivers/staging/qlge/qlge_dbg.c 		ql_write32(qdev, INTR_EN,
qdev              414 drivers/staging/qlge/qlge_dbg.c 				qdev->intr_context[i].intr_read_mask);
qdev              415 drivers/staging/qlge/qlge_dbg.c 		*buf = ql_read32(qdev, INTR_EN);
qdev              419 drivers/staging/qlge/qlge_dbg.c static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
qdev              424 drivers/staging/qlge/qlge_dbg.c 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
qdev              429 drivers/staging/qlge/qlge_dbg.c 		status = ql_get_mac_addr_reg(qdev,
qdev              432 drivers/staging/qlge/qlge_dbg.c 			netif_err(qdev, drv, qdev->ndev,
qdev              441 drivers/staging/qlge/qlge_dbg.c 		status = ql_get_mac_addr_reg(qdev,
qdev              444 drivers/staging/qlge/qlge_dbg.c 			netif_err(qdev, drv, qdev->ndev,
qdev              452 drivers/staging/qlge/qlge_dbg.c 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
qdev              456 drivers/staging/qlge/qlge_dbg.c static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
qdev              461 drivers/staging/qlge/qlge_dbg.c 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
qdev              466 drivers/staging/qlge/qlge_dbg.c 		status = ql_get_routing_reg(qdev, i, &value);
qdev              468 drivers/staging/qlge/qlge_dbg.c 			netif_err(qdev, drv, qdev->ndev,
qdev              476 drivers/staging/qlge/qlge_dbg.c 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
qdev              481 drivers/staging/qlge/qlge_dbg.c static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
qdev              487 drivers/staging/qlge/qlge_dbg.c 		status = ql_write_mpi_reg(qdev, RISC_124,
qdev              491 drivers/staging/qlge/qlge_dbg.c 		status = ql_read_mpi_reg(qdev, RISC_127, buf);
qdev              500 drivers/staging/qlge/qlge_dbg.c static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
qdev              505 drivers/staging/qlge/qlge_dbg.c 		status = ql_read_mpi_reg(qdev, offset + i, buf);
qdev              513 drivers/staging/qlge/qlge_dbg.c static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
qdev              526 drivers/staging/qlge/qlge_dbg.c 			ql_write32(qdev, PRB_MX_ADDR, probe);
qdev              527 drivers/staging/qlge/qlge_dbg.c 			lo_val = ql_read32(qdev, PRB_MX_DATA);
qdev              533 drivers/staging/qlge/qlge_dbg.c 			ql_write32(qdev, PRB_MX_ADDR, probe);
qdev              534 drivers/staging/qlge/qlge_dbg.c 			hi_val = ql_read32(qdev, PRB_MX_DATA);
qdev              544 drivers/staging/qlge/qlge_dbg.c static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
qdev              547 drivers/staging/qlge/qlge_dbg.c 	ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
qdev              548 drivers/staging/qlge/qlge_dbg.c 	buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
qdev              550 drivers/staging/qlge/qlge_dbg.c 	buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
qdev              552 drivers/staging/qlge/qlge_dbg.c 	buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
qdev              554 drivers/staging/qlge/qlge_dbg.c 	buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
qdev              561 drivers/staging/qlge/qlge_dbg.c static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
qdev              569 drivers/staging/qlge/qlge_dbg.c 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
qdev              582 drivers/staging/qlge/qlge_dbg.c 			ql_write32(qdev, RT_IDX, val);
qdev              585 drivers/staging/qlge/qlge_dbg.c 				result_index = ql_read32(qdev, RT_IDX);
qdev              586 drivers/staging/qlge/qlge_dbg.c 			result_data = ql_read32(qdev, RT_DATA);
qdev              597 drivers/staging/qlge/qlge_dbg.c 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
qdev              602 drivers/staging/qlge/qlge_dbg.c static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
qdev              666 drivers/staging/qlge/qlge_dbg.c 				ql_write32(qdev, MAC_ADDR_IDX, val);
qdev              669 drivers/staging/qlge/qlge_dbg.c 					result_index = ql_read32(qdev,
qdev              672 drivers/staging/qlge/qlge_dbg.c 				result_data = ql_read32(qdev, MAC_ADDR_DATA);
qdev              682 drivers/staging/qlge/qlge_dbg.c static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
qdev              691 drivers/staging/qlge/qlge_dbg.c 		status = ql_read_mpi_reg(qdev, reg, &reg_val);
qdev              719 drivers/staging/qlge/qlge_dbg.c int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
qdev              725 drivers/staging/qlge/qlge_dbg.c 		netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
qdev              733 drivers/staging/qlge/qlge_dbg.c 	ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
qdev              735 drivers/staging/qlge/qlge_dbg.c 	status = ql_pause_mpi_risc(qdev);
qdev              737 drivers/staging/qlge/qlge_dbg.c 		netif_err(qdev, drv, qdev->ndev,
qdev              775 drivers/staging/qlge/qlge_dbg.c 	if (qdev->func & 1) {
qdev              779 drivers/staging/qlge/qlge_dbg.c 					 ql_read32(qdev, i * sizeof(u32));
qdev              783 drivers/staging/qlge/qlge_dbg.c 			ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
qdev              785 drivers/staging/qlge/qlge_dbg.c 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
qdev              786 drivers/staging/qlge/qlge_dbg.c 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
qdev              791 drivers/staging/qlge/qlge_dbg.c 					ql_read32(qdev, i * sizeof(u32));
qdev              794 drivers/staging/qlge/qlge_dbg.c 			ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
qdev              796 drivers/staging/qlge/qlge_dbg.c 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
qdev              797 drivers/staging/qlge/qlge_dbg.c 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
qdev              897 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_serdes_regs(qdev, mpi_coredump);
qdev              899 drivers/staging/qlge/qlge_dbg.c 		netif_err(qdev, drv, qdev->ndev,
qdev              913 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
qdev              918 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_shadow_regs(qdev,
qdev              929 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
qdev              940 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
qdev              951 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
qdev              964 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
qdev              975 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
qdev              986 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
qdev              997 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
qdev             1008 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
qdev             1019 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
qdev             1030 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
qdev             1041 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
qdev             1052 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
qdev             1063 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
qdev             1074 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
qdev             1084 drivers/staging/qlge/qlge_dbg.c 	mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
qdev             1085 drivers/staging/qlge/qlge_dbg.c 	mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
qdev             1086 drivers/staging/qlge/qlge_dbg.c 	mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
qdev             1087 drivers/staging/qlge/qlge_dbg.c 	mpi_coredump->misc_nic_info.function = qdev->func;
qdev             1096 drivers/staging/qlge/qlge_dbg.c 	ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
qdev             1103 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
qdev             1112 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_routing_entries(qdev,
qdev             1123 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
qdev             1132 drivers/staging/qlge/qlge_dbg.c 	ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
qdev             1139 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_routing_index_registers(qdev,
qdev             1149 drivers/staging/qlge/qlge_dbg.c 	ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
qdev             1157 drivers/staging/qlge/qlge_dbg.c 	ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
qdev             1160 drivers/staging/qlge/qlge_dbg.c 	ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
qdev             1163 drivers/staging/qlge/qlge_dbg.c 	status = ql_unpause_mpi_risc(qdev);
qdev             1165 drivers/staging/qlge/qlge_dbg.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1171 drivers/staging/qlge/qlge_dbg.c 	status = ql_hard_reset_mpi_risc(qdev);
qdev             1173 drivers/staging/qlge/qlge_dbg.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1183 drivers/staging/qlge/qlge_dbg.c 	status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
qdev             1186 drivers/staging/qlge/qlge_dbg.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1198 drivers/staging/qlge/qlge_dbg.c 	status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
qdev             1201 drivers/staging/qlge/qlge_dbg.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1207 drivers/staging/qlge/qlge_dbg.c 	ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
qdev             1212 drivers/staging/qlge/qlge_dbg.c static void ql_get_core_dump(struct ql_adapter *qdev)
qdev             1214 drivers/staging/qlge/qlge_dbg.c 	if (!ql_own_firmware(qdev)) {
qdev             1215 drivers/staging/qlge/qlge_dbg.c 		netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
qdev             1219 drivers/staging/qlge/qlge_dbg.c 	if (!netif_running(qdev->ndev)) {
qdev             1220 drivers/staging/qlge/qlge_dbg.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             1224 drivers/staging/qlge/qlge_dbg.c 	ql_queue_fw_error(qdev);
qdev             1227 drivers/staging/qlge/qlge_dbg.c static void ql_gen_reg_dump(struct ql_adapter *qdev,
qdev             1250 drivers/staging/qlge/qlge_dbg.c 	mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
qdev             1251 drivers/staging/qlge/qlge_dbg.c 	mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
qdev             1252 drivers/staging/qlge/qlge_dbg.c 	mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
qdev             1253 drivers/staging/qlge/qlge_dbg.c 	mpi_coredump->misc_nic_info.function = qdev->func;
qdev             1263 drivers/staging/qlge/qlge_dbg.c 		mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
qdev             1272 drivers/staging/qlge/qlge_dbg.c 	ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
qdev             1279 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
qdev             1288 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_routing_entries(qdev,
qdev             1299 drivers/staging/qlge/qlge_dbg.c 	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
qdev             1304 drivers/staging/qlge/qlge_dbg.c void ql_get_dump(struct ql_adapter *qdev, void *buff)
qdev             1315 drivers/staging/qlge/qlge_dbg.c 	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
qdev             1316 drivers/staging/qlge/qlge_dbg.c 		if (!ql_core_dump(qdev, buff))
qdev             1317 drivers/staging/qlge/qlge_dbg.c 			ql_soft_reset_mpi_risc(qdev);
qdev             1319 drivers/staging/qlge/qlge_dbg.c 			netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
qdev             1321 drivers/staging/qlge/qlge_dbg.c 		ql_gen_reg_dump(qdev, buff);
qdev             1322 drivers/staging/qlge/qlge_dbg.c 		ql_get_core_dump(qdev);
qdev             1329 drivers/staging/qlge/qlge_dbg.c 	struct ql_adapter *qdev =
qdev             1335 drivers/staging/qlge/qlge_dbg.c 	tmp = (u32 *)qdev->mpi_coredump;
qdev             1336 drivers/staging/qlge/qlge_dbg.c 	netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
qdev             1355 drivers/staging/qlge/qlge_dbg.c static void ql_dump_intr_states(struct ql_adapter *qdev)
qdev             1359 drivers/staging/qlge/qlge_dbg.c 	for (i = 0; i < qdev->intr_count; i++) {
qdev             1360 drivers/staging/qlge/qlge_dbg.c 		ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
qdev             1361 drivers/staging/qlge/qlge_dbg.c 		value = ql_read32(qdev, INTR_EN);
qdev             1363 drivers/staging/qlge/qlge_dbg.c 		       qdev->ndev->name, i,
qdev             1368 drivers/staging/qlge/qlge_dbg.c #define DUMP_XGMAC(qdev, reg)					\
qdev             1371 drivers/staging/qlge/qlge_dbg.c 	ql_read_xgmac_reg(qdev, reg, &data);			\
qdev             1372 drivers/staging/qlge/qlge_dbg.c 	pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
qdev             1375 drivers/staging/qlge/qlge_dbg.c void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
qdev             1377 drivers/staging/qlge/qlge_dbg.c 	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
qdev             1381 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, PAUSE_SRC_LO);
qdev             1382 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, PAUSE_SRC_HI);
qdev             1383 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, GLOBAL_CFG);
qdev             1384 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, TX_CFG);
qdev             1385 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, RX_CFG);
qdev             1386 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, FLOW_CTL);
qdev             1387 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, PAUSE_OPCODE);
qdev             1388 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, PAUSE_TIMER);
qdev             1389 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
qdev             1390 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
qdev             1391 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, MAC_TX_PARAMS);
qdev             1392 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, MAC_RX_PARAMS);
qdev             1393 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, MAC_SYS_INT);
qdev             1394 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
qdev             1395 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, MAC_MGMT_INT);
qdev             1396 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
qdev             1397 drivers/staging/qlge/qlge_dbg.c 	DUMP_XGMAC(qdev, EXT_ARB_MODE);
qdev             1398 drivers/staging/qlge/qlge_dbg.c 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
qdev             1401 drivers/staging/qlge/qlge_dbg.c static void ql_dump_ets_regs(struct ql_adapter *qdev)
qdev             1405 drivers/staging/qlge/qlge_dbg.c static void ql_dump_cam_entries(struct ql_adapter *qdev)
qdev             1410 drivers/staging/qlge/qlge_dbg.c 	i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
qdev             1414 drivers/staging/qlge/qlge_dbg.c 		if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
qdev             1421 drivers/staging/qlge/qlge_dbg.c 				       qdev->ndev->name, i, value[1], value[0],
qdev             1427 drivers/staging/qlge/qlge_dbg.c 		    (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
qdev             1434 drivers/staging/qlge/qlge_dbg.c 				       qdev->ndev->name, i, value[1], value[0]);
qdev             1437 drivers/staging/qlge/qlge_dbg.c 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
qdev             1440 drivers/staging/qlge/qlge_dbg.c void ql_dump_routing_entries(struct ql_adapter *qdev)
qdev             1444 drivers/staging/qlge/qlge_dbg.c 	i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
qdev             1449 drivers/staging/qlge/qlge_dbg.c 		if (ql_get_routing_reg(qdev, i, &value)) {
qdev             1456 drivers/staging/qlge/qlge_dbg.c 				       qdev->ndev->name, i, value);
qdev             1459 drivers/staging/qlge/qlge_dbg.c 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
qdev             1462 drivers/staging/qlge/qlge_dbg.c #define DUMP_REG(qdev, reg)			\
qdev             1463 drivers/staging/qlge/qlge_dbg.c 	pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
qdev             1465 drivers/staging/qlge/qlge_dbg.c void ql_dump_regs(struct ql_adapter *qdev)
qdev             1467 drivers/staging/qlge/qlge_dbg.c 	pr_err("reg dump for function #%d\n", qdev->func);
qdev             1468 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, SYS);
qdev             1469 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, RST_FO);
qdev             1470 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, FSC);
qdev             1471 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, CSR);
qdev             1472 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, ICB_RID);
qdev             1473 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, ICB_L);
qdev             1474 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, ICB_H);
qdev             1475 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, CFG);
qdev             1476 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, BIOS_ADDR);
qdev             1477 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, STS);
qdev             1478 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, INTR_EN);
qdev             1479 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, INTR_MASK);
qdev             1480 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, ISR1);
qdev             1481 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, ISR2);
qdev             1482 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, ISR3);
qdev             1483 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, ISR4);
qdev             1484 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, REV_ID);
qdev             1485 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, FRC_ECC_ERR);
qdev             1486 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, ERR_STS);
qdev             1487 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, RAM_DBG_ADDR);
qdev             1488 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, RAM_DBG_DATA);
qdev             1489 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, ECC_ERR_CNT);
qdev             1490 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, SEM);
qdev             1491 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, GPIO_1);
qdev             1492 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, GPIO_2);
qdev             1493 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, GPIO_3);
qdev             1494 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, XGMAC_ADDR);
qdev             1495 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, XGMAC_DATA);
qdev             1496 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, NIC_ETS);
qdev             1497 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, CNA_ETS);
qdev             1498 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, FLASH_ADDR);
qdev             1499 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, FLASH_DATA);
qdev             1500 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, CQ_STOP);
qdev             1501 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, PAGE_TBL_RID);
qdev             1502 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, WQ_PAGE_TBL_LO);
qdev             1503 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, WQ_PAGE_TBL_HI);
qdev             1504 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, CQ_PAGE_TBL_LO);
qdev             1505 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, CQ_PAGE_TBL_HI);
qdev             1506 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, COS_DFLT_CQ1);
qdev             1507 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, COS_DFLT_CQ2);
qdev             1508 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, SPLT_HDR);
qdev             1509 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, FC_PAUSE_THRES);
qdev             1510 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, NIC_PAUSE_THRES);
qdev             1511 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, FC_ETHERTYPE);
qdev             1512 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, FC_RCV_CFG);
qdev             1513 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, NIC_RCV_CFG);
qdev             1514 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, FC_COS_TAGS);
qdev             1515 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, NIC_COS_TAGS);
qdev             1516 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, MGMT_RCV_CFG);
qdev             1517 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, XG_SERDES_ADDR);
qdev             1518 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, XG_SERDES_DATA);
qdev             1519 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, PRB_MX_ADDR);
qdev             1520 drivers/staging/qlge/qlge_dbg.c 	DUMP_REG(qdev, PRB_MX_DATA);
qdev             1521 drivers/staging/qlge/qlge_dbg.c 	ql_dump_intr_states(qdev);
qdev             1522 drivers/staging/qlge/qlge_dbg.c 	ql_dump_xgmac_control_regs(qdev);
qdev             1523 drivers/staging/qlge/qlge_dbg.c 	ql_dump_ets_regs(qdev);
qdev             1524 drivers/staging/qlge/qlge_dbg.c 	ql_dump_cam_entries(qdev);
qdev             1525 drivers/staging/qlge/qlge_dbg.c 	ql_dump_routing_entries(qdev);
qdev             1531 drivers/staging/qlge/qlge_dbg.c #define DUMP_STAT(qdev, stat)	\
qdev             1532 drivers/staging/qlge/qlge_dbg.c 	pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
qdev             1534 drivers/staging/qlge/qlge_dbg.c void ql_dump_stat(struct ql_adapter *qdev)
qdev             1537 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_pkts);
qdev             1538 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_bytes);
qdev             1539 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_mcast_pkts);
qdev             1540 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_bcast_pkts);
qdev             1541 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_ucast_pkts);
qdev             1542 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_ctl_pkts);
qdev             1543 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_pause_pkts);
qdev             1544 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_64_pkt);
qdev             1545 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_65_to_127_pkt);
qdev             1546 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_128_to_255_pkt);
qdev             1547 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_256_511_pkt);
qdev             1548 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_512_to_1023_pkt);
qdev             1549 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_1024_to_1518_pkt);
qdev             1550 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_1519_to_max_pkt);
qdev             1551 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_undersize_pkt);
qdev             1552 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, tx_oversize_pkt);
qdev             1553 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_bytes);
qdev             1554 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_bytes_ok);
qdev             1555 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_pkts);
qdev             1556 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_pkts_ok);
qdev             1557 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_bcast_pkts);
qdev             1558 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_mcast_pkts);
qdev             1559 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_ucast_pkts);
qdev             1560 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_undersize_pkts);
qdev             1561 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_oversize_pkts);
qdev             1562 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_jabber_pkts);
qdev             1563 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
qdev             1564 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_drop_events);
qdev             1565 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_fcerr_pkts);
qdev             1566 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_align_err);
qdev             1567 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_symbol_err);
qdev             1568 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_mac_err);
qdev             1569 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_ctl_pkts);
qdev             1570 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_pause_pkts);
qdev             1571 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_64_pkts);
qdev             1572 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_65_to_127_pkts);
qdev             1573 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_128_255_pkts);
qdev             1574 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_256_511_pkts);
qdev             1575 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_512_to_1023_pkts);
qdev             1576 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_1024_to_1518_pkts);
qdev             1577 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_1519_to_max_pkts);
qdev             1578 drivers/staging/qlge/qlge_dbg.c 	DUMP_STAT(qdev, rx_len_err_pkts);
qdev             1584 drivers/staging/qlge/qlge_dbg.c #define DUMP_QDEV_FIELD(qdev, type, field)		\
qdev             1585 drivers/staging/qlge/qlge_dbg.c 	pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
qdev             1586 drivers/staging/qlge/qlge_dbg.c #define DUMP_QDEV_DMA_FIELD(qdev, field)		\
qdev             1587 drivers/staging/qlge/qlge_dbg.c 	pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
qdev             1588 drivers/staging/qlge/qlge_dbg.c #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
qdev             1590 drivers/staging/qlge/qlge_dbg.c 	       #array, index, #field, qdev->array[index].field);
qdev             1591 drivers/staging/qlge/qlge_dbg.c void ql_dump_qdev(struct ql_adapter *qdev)
qdev             1594 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%lx", flags);
qdev             1595 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
qdev             1596 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%p", pdev);
qdev             1597 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%p", ndev);
qdev             1598 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
qdev             1599 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%p", reg_base);
qdev             1600 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
qdev             1601 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
qdev             1602 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
qdev             1603 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
qdev             1604 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
qdev             1605 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
qdev             1606 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
qdev             1607 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%d", intr_count);
qdev             1608 drivers/staging/qlge/qlge_dbg.c 	if (qdev->msi_x_entry)
qdev             1609 drivers/staging/qlge/qlge_dbg.c 		for (i = 0; i < qdev->intr_count; i++) {
qdev             1610 drivers/staging/qlge/qlge_dbg.c 			DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
qdev             1611 drivers/staging/qlge/qlge_dbg.c 			DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
qdev             1613 drivers/staging/qlge/qlge_dbg.c 	for (i = 0; i < qdev->intr_count; i++) {
qdev             1614 drivers/staging/qlge/qlge_dbg.c 		DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
qdev             1615 drivers/staging/qlge/qlge_dbg.c 		DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
qdev             1616 drivers/staging/qlge/qlge_dbg.c 		DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
qdev             1617 drivers/staging/qlge/qlge_dbg.c 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
qdev             1618 drivers/staging/qlge/qlge_dbg.c 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
qdev             1619 drivers/staging/qlge/qlge_dbg.c 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
qdev             1621 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
qdev             1622 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
qdev             1623 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
qdev             1624 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
qdev             1625 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%d", intr_count);
qdev             1626 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
qdev             1627 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
qdev             1628 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
qdev             1629 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
qdev             1630 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
qdev             1631 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
qdev             1632 drivers/staging/qlge/qlge_dbg.c 	DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
qdev             1799 drivers/staging/qlge/qlge_dbg.c 	pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
qdev             1802 drivers/staging/qlge/qlge_dbg.c void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
qdev             1812 drivers/staging/qlge/qlge_dbg.c 	if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
qdev             2009 drivers/staging/qlge/qlge_dbg.c void ql_dump_all(struct ql_adapter *qdev)
qdev             2013 drivers/staging/qlge/qlge_dbg.c 	QL_DUMP_REGS(qdev);
qdev             2014 drivers/staging/qlge/qlge_dbg.c 	QL_DUMP_QDEV(qdev);
qdev             2015 drivers/staging/qlge/qlge_dbg.c 	for (i = 0; i < qdev->tx_ring_count; i++) {
qdev             2016 drivers/staging/qlge/qlge_dbg.c 		QL_DUMP_TX_RING(&qdev->tx_ring[i]);
qdev             2017 drivers/staging/qlge/qlge_dbg.c 		QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
qdev             2019 drivers/staging/qlge/qlge_dbg.c 	for (i = 0; i < qdev->rx_ring_count; i++) {
qdev             2020 drivers/staging/qlge/qlge_dbg.c 		QL_DUMP_RX_RING(&qdev->rx_ring[i]);
qdev             2021 drivers/staging/qlge/qlge_dbg.c 		QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
qdev              186 drivers/staging/qlge/qlge_ethtool.c static int ql_update_ring_coalescing(struct ql_adapter *qdev)
qdev              192 drivers/staging/qlge/qlge_ethtool.c 	if (!netif_running(qdev->ndev))
qdev              198 drivers/staging/qlge/qlge_ethtool.c 	cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
qdev              199 drivers/staging/qlge/qlge_ethtool.c 	if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
qdev              201 drivers/staging/qlge/qlge_ethtool.c 				qdev->tx_max_coalesced_frames) {
qdev              202 drivers/staging/qlge/qlge_ethtool.c 		for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
qdev              203 drivers/staging/qlge/qlge_ethtool.c 			rx_ring = &qdev->rx_ring[i];
qdev              205 drivers/staging/qlge/qlge_ethtool.c 			cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
qdev              207 drivers/staging/qlge/qlge_ethtool.c 			    cpu_to_le16(qdev->tx_max_coalesced_frames);
qdev              209 drivers/staging/qlge/qlge_ethtool.c 			status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
qdev              212 drivers/staging/qlge/qlge_ethtool.c 				netif_err(qdev, ifup, qdev->ndev,
qdev              220 drivers/staging/qlge/qlge_ethtool.c 	cqicb = (struct cqicb *)&qdev->rx_ring[0];
qdev              221 drivers/staging/qlge/qlge_ethtool.c 	if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
qdev              223 drivers/staging/qlge/qlge_ethtool.c 					qdev->rx_max_coalesced_frames) {
qdev              224 drivers/staging/qlge/qlge_ethtool.c 		for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
qdev              225 drivers/staging/qlge/qlge_ethtool.c 			rx_ring = &qdev->rx_ring[i];
qdev              227 drivers/staging/qlge/qlge_ethtool.c 			cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
qdev              229 drivers/staging/qlge/qlge_ethtool.c 			    cpu_to_le16(qdev->rx_max_coalesced_frames);
qdev              231 drivers/staging/qlge/qlge_ethtool.c 			status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
qdev              234 drivers/staging/qlge/qlge_ethtool.c 				netif_err(qdev, ifup, qdev->ndev,
qdev              244 drivers/staging/qlge/qlge_ethtool.c static void ql_update_stats(struct ql_adapter *qdev)
qdev              248 drivers/staging/qlge/qlge_ethtool.c 	u64 *iter = &qdev->nic_stats.tx_pkts;
qdev              250 drivers/staging/qlge/qlge_ethtool.c 	spin_lock(&qdev->stats_lock);
qdev              251 drivers/staging/qlge/qlge_ethtool.c 	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
qdev              252 drivers/staging/qlge/qlge_ethtool.c 		netif_err(qdev, drv, qdev->ndev,
qdev              260 drivers/staging/qlge/qlge_ethtool.c 		if (ql_read_xgmac_reg64(qdev, i, &data)) {
qdev              261 drivers/staging/qlge/qlge_ethtool.c 			netif_err(qdev, drv, qdev->ndev,
qdev              274 drivers/staging/qlge/qlge_ethtool.c 		if (ql_read_xgmac_reg64(qdev, i, &data)) {
qdev              275 drivers/staging/qlge/qlge_ethtool.c 			netif_err(qdev, drv, qdev->ndev,
qdev              291 drivers/staging/qlge/qlge_ethtool.c 		if (ql_read_xgmac_reg64(qdev, i, &data)) {
qdev              292 drivers/staging/qlge/qlge_ethtool.c 			netif_err(qdev, drv, qdev->ndev,
qdev              305 drivers/staging/qlge/qlge_ethtool.c 		if (ql_read_xgmac_reg64(qdev, i, &data)) {
qdev              306 drivers/staging/qlge/qlge_ethtool.c 			netif_err(qdev, drv, qdev->ndev,
qdev              318 drivers/staging/qlge/qlge_ethtool.c 	if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
qdev              319 drivers/staging/qlge/qlge_ethtool.c 		netif_err(qdev, drv, qdev->ndev,
qdev              325 drivers/staging/qlge/qlge_ethtool.c 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
qdev              327 drivers/staging/qlge/qlge_ethtool.c 	spin_unlock(&qdev->stats_lock);
qdev              329 drivers/staging/qlge/qlge_ethtool.c 	QL_DUMP_STAT(qdev);
qdev              365 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              369 drivers/staging/qlge/qlge_ethtool.c 	ql_update_stats(qdev);
qdev              372 drivers/staging/qlge/qlge_ethtool.c 		char *p = (char *)qdev +
qdev              382 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              388 drivers/staging/qlge/qlge_ethtool.c 	if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
qdev              414 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              420 drivers/staging/qlge/qlge_ethtool.c 		 (qdev->fw_rev_id & 0x00ff0000) >> 16,
qdev              421 drivers/staging/qlge/qlge_ethtool.c 		 (qdev->fw_rev_id & 0x0000ff00) >> 8,
qdev              422 drivers/staging/qlge/qlge_ethtool.c 		 (qdev->fw_rev_id & 0x000000ff));
qdev              423 drivers/staging/qlge/qlge_ethtool.c 	strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
qdev              429 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              430 drivers/staging/qlge/qlge_ethtool.c 	unsigned short ssys_dev = qdev->pdev->subsystem_device;
qdev              436 drivers/staging/qlge/qlge_ethtool.c 		wol->wolopts = qdev->wol;
qdev              442 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              443 drivers/staging/qlge/qlge_ethtool.c 	unsigned short ssys_dev = qdev->pdev->subsystem_device;
qdev              448 drivers/staging/qlge/qlge_ethtool.c 		netif_info(qdev, drv, qdev->ndev,
qdev              454 drivers/staging/qlge/qlge_ethtool.c 	qdev->wol = wol->wolopts;
qdev              456 drivers/staging/qlge/qlge_ethtool.c 	netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
qdev              464 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              469 drivers/staging/qlge/qlge_ethtool.c 		if (ql_mb_get_led_cfg(qdev))
qdev              473 drivers/staging/qlge/qlge_ethtool.c 		ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
qdev              478 drivers/staging/qlge/qlge_ethtool.c 		if (ql_mb_set_led_cfg(qdev, qdev->led_config))
qdev              487 drivers/staging/qlge/qlge_ethtool.c static int ql_start_loopback(struct ql_adapter *qdev)
qdev              489 drivers/staging/qlge/qlge_ethtool.c 	if (netif_carrier_ok(qdev->ndev)) {
qdev              490 drivers/staging/qlge/qlge_ethtool.c 		set_bit(QL_LB_LINK_UP, &qdev->flags);
qdev              491 drivers/staging/qlge/qlge_ethtool.c 		netif_carrier_off(qdev->ndev);
qdev              493 drivers/staging/qlge/qlge_ethtool.c 		clear_bit(QL_LB_LINK_UP, &qdev->flags);
qdev              494 drivers/staging/qlge/qlge_ethtool.c 	qdev->link_config |= CFG_LOOPBACK_PCS;
qdev              495 drivers/staging/qlge/qlge_ethtool.c 	return ql_mb_set_port_cfg(qdev);
qdev              498 drivers/staging/qlge/qlge_ethtool.c static void ql_stop_loopback(struct ql_adapter *qdev)
qdev              500 drivers/staging/qlge/qlge_ethtool.c 	qdev->link_config &= ~CFG_LOOPBACK_PCS;
qdev              501 drivers/staging/qlge/qlge_ethtool.c 	ql_mb_set_port_cfg(qdev);
qdev              502 drivers/staging/qlge/qlge_ethtool.c 	if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
qdev              503 drivers/staging/qlge/qlge_ethtool.c 		netif_carrier_on(qdev->ndev);
qdev              504 drivers/staging/qlge/qlge_ethtool.c 		clear_bit(QL_LB_LINK_UP, &qdev->flags);
qdev              518 drivers/staging/qlge/qlge_ethtool.c void ql_check_lb_frame(struct ql_adapter *qdev,
qdev              526 drivers/staging/qlge/qlge_ethtool.c 			atomic_dec(&qdev->lb_count);
qdev              531 drivers/staging/qlge/qlge_ethtool.c static int ql_run_loopback_test(struct ql_adapter *qdev)
qdev              539 drivers/staging/qlge/qlge_ethtool.c 		skb = netdev_alloc_skb(qdev->ndev, size);
qdev              546 drivers/staging/qlge/qlge_ethtool.c 		rc = ql_lb_send(skb, qdev->ndev);
qdev              549 drivers/staging/qlge/qlge_ethtool.c 		atomic_inc(&qdev->lb_count);
qdev              553 drivers/staging/qlge/qlge_ethtool.c 	ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
qdev              554 drivers/staging/qlge/qlge_ethtool.c 	return atomic_read(&qdev->lb_count) ? -EIO : 0;
qdev              557 drivers/staging/qlge/qlge_ethtool.c static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
qdev              559 drivers/staging/qlge/qlge_ethtool.c 	*data = ql_start_loopback(qdev);
qdev              562 drivers/staging/qlge/qlge_ethtool.c 	*data = ql_run_loopback_test(qdev);
qdev              564 drivers/staging/qlge/qlge_ethtool.c 	ql_stop_loopback(qdev);
qdev              571 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              576 drivers/staging/qlge/qlge_ethtool.c 		set_bit(QL_SELFTEST, &qdev->flags);
qdev              579 drivers/staging/qlge/qlge_ethtool.c 			if (ql_loopback_test(qdev, &data[0]))
qdev              586 drivers/staging/qlge/qlge_ethtool.c 		clear_bit(QL_SELFTEST, &qdev->flags);
qdev              592 drivers/staging/qlge/qlge_ethtool.c 		netif_err(qdev, drv, qdev->ndev,
qdev              600 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              602 drivers/staging/qlge/qlge_ethtool.c 	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
qdev              611 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              613 drivers/staging/qlge/qlge_ethtool.c 	ql_get_dump(qdev, p);
qdev              614 drivers/staging/qlge/qlge_ethtool.c 	qdev->core_is_dumped = 0;
qdev              615 drivers/staging/qlge/qlge_ethtool.c 	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
qdev              623 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(dev);
qdev              625 drivers/staging/qlge/qlge_ethtool.c 	c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
qdev              626 drivers/staging/qlge/qlge_ethtool.c 	c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
qdev              638 drivers/staging/qlge/qlge_ethtool.c 	c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
qdev              639 drivers/staging/qlge/qlge_ethtool.c 	c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
qdev              646 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              649 drivers/staging/qlge/qlge_ethtool.c 	if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
qdev              654 drivers/staging/qlge/qlge_ethtool.c 	if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
qdev              660 drivers/staging/qlge/qlge_ethtool.c 	if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
qdev              661 drivers/staging/qlge/qlge_ethtool.c 	    qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
qdev              662 drivers/staging/qlge/qlge_ethtool.c 	    qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
qdev              663 drivers/staging/qlge/qlge_ethtool.c 	    qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
qdev              666 drivers/staging/qlge/qlge_ethtool.c 	qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
qdev              667 drivers/staging/qlge/qlge_ethtool.c 	qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
qdev              668 drivers/staging/qlge/qlge_ethtool.c 	qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
qdev              669 drivers/staging/qlge/qlge_ethtool.c 	qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
qdev              671 drivers/staging/qlge/qlge_ethtool.c 	return ql_update_ring_coalescing(qdev);
qdev              677 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(netdev);
qdev              679 drivers/staging/qlge/qlge_ethtool.c 	ql_mb_get_port_cfg(qdev);
qdev              680 drivers/staging/qlge/qlge_ethtool.c 	if (qdev->link_config & CFG_PAUSE_STD) {
qdev              689 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(netdev);
qdev              693 drivers/staging/qlge/qlge_ethtool.c 		qdev->link_config |= CFG_PAUSE_STD;
qdev              695 drivers/staging/qlge/qlge_ethtool.c 		qdev->link_config &= ~CFG_PAUSE_STD;
qdev              699 drivers/staging/qlge/qlge_ethtool.c 	status = ql_mb_set_port_cfg(qdev);
qdev              705 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              706 drivers/staging/qlge/qlge_ethtool.c 	return qdev->msg_enable;
qdev              711 drivers/staging/qlge/qlge_ethtool.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev              712 drivers/staging/qlge/qlge_ethtool.c 	qdev->msg_enable = value;
qdev              107 drivers/staging/qlge/qlge_main.c static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
qdev              137 drivers/staging/qlge/qlge_main.c 		netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
qdev              141 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, SEM, sem_bits | sem_mask);
qdev              142 drivers/staging/qlge/qlge_main.c 	return !(ql_read32(qdev, SEM) & sem_bits);
qdev              145 drivers/staging/qlge/qlge_main.c int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
qdev              149 drivers/staging/qlge/qlge_main.c 		if (!ql_sem_trylock(qdev, sem_mask))
qdev              156 drivers/staging/qlge/qlge_main.c void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
qdev              158 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, SEM, sem_mask);
qdev              159 drivers/staging/qlge/qlge_main.c 	ql_read32(qdev, SEM);	/* flush */
qdev              167 drivers/staging/qlge/qlge_main.c int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
qdev              173 drivers/staging/qlge/qlge_main.c 		temp = ql_read32(qdev, reg);
qdev              177 drivers/staging/qlge/qlge_main.c 			netif_alert(qdev, probe, qdev->ndev,
qdev              186 drivers/staging/qlge/qlge_main.c 	netif_alert(qdev, probe, qdev->ndev,
qdev              194 drivers/staging/qlge/qlge_main.c static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
qdev              200 drivers/staging/qlge/qlge_main.c 		temp = ql_read32(qdev, CFG);
qdev              215 drivers/staging/qlge/qlge_main.c int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
qdev              228 drivers/staging/qlge/qlge_main.c 	map = pci_map_single(qdev->pdev, ptr, size, direction);
qdev              229 drivers/staging/qlge/qlge_main.c 	if (pci_dma_mapping_error(qdev->pdev, map)) {
qdev              230 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
qdev              234 drivers/staging/qlge/qlge_main.c 	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
qdev              238 drivers/staging/qlge/qlge_main.c 	status = ql_wait_cfg(qdev, bit);
qdev              240 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev              245 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, ICB_L, (u32) map);
qdev              246 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, ICB_H, (u32) (map >> 32));
qdev              250 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, CFG, (mask | value));
qdev              255 drivers/staging/qlge/qlge_main.c 	status = ql_wait_cfg(qdev, bit);
qdev              257 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
qdev              258 drivers/staging/qlge/qlge_main.c 	pci_unmap_single(qdev->pdev, map, size, direction);
qdev              263 drivers/staging/qlge/qlge_main.c int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
qdev              274 drivers/staging/qlge/qlge_main.c 			    ql_wait_reg_rdy(qdev,
qdev              278 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
qdev              282 drivers/staging/qlge/qlge_main.c 			    ql_wait_reg_rdy(qdev,
qdev              286 drivers/staging/qlge/qlge_main.c 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
qdev              288 drivers/staging/qlge/qlge_main.c 			    ql_wait_reg_rdy(qdev,
qdev              292 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
qdev              296 drivers/staging/qlge/qlge_main.c 			    ql_wait_reg_rdy(qdev,
qdev              300 drivers/staging/qlge/qlge_main.c 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
qdev              303 drivers/staging/qlge/qlge_main.c 				    ql_wait_reg_rdy(qdev,
qdev              307 drivers/staging/qlge/qlge_main.c 				ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
qdev              311 drivers/staging/qlge/qlge_main.c 				    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
qdev              315 drivers/staging/qlge/qlge_main.c 				*value++ = ql_read32(qdev, MAC_ADDR_DATA);
qdev              322 drivers/staging/qlge/qlge_main.c 		netif_crit(qdev, ifup, qdev->ndev,
qdev              333 drivers/staging/qlge/qlge_main.c static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
qdev              347 drivers/staging/qlge/qlge_main.c 				ql_wait_reg_rdy(qdev,
qdev              351 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
qdev              354 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_DATA, lower);
qdev              356 drivers/staging/qlge/qlge_main.c 				ql_wait_reg_rdy(qdev,
qdev              360 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
qdev              364 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_DATA, upper);
qdev              366 drivers/staging/qlge/qlge_main.c 				ql_wait_reg_rdy(qdev,
qdev              380 drivers/staging/qlge/qlge_main.c 			    ql_wait_reg_rdy(qdev,
qdev              384 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
qdev              387 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_DATA, lower);
qdev              389 drivers/staging/qlge/qlge_main.c 			    ql_wait_reg_rdy(qdev,
qdev              393 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
qdev              396 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_DATA, upper);
qdev              398 drivers/staging/qlge/qlge_main.c 			    ql_wait_reg_rdy(qdev,
qdev              402 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_IDX, (offset) |	/* offset */
qdev              410 drivers/staging/qlge/qlge_main.c 				      (qdev->
qdev              413 drivers/staging/qlge/qlge_main.c 			if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
qdev              416 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_DATA, cam_output);
qdev              428 drivers/staging/qlge/qlge_main.c 			    ql_wait_reg_rdy(qdev,
qdev              432 drivers/staging/qlge/qlge_main.c 			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
qdev              440 drivers/staging/qlge/qlge_main.c 		netif_crit(qdev, ifup, qdev->ndev,
qdev              452 drivers/staging/qlge/qlge_main.c static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
qdev              459 drivers/staging/qlge/qlge_main.c 		addr = &qdev->current_mac_addr[0];
qdev              460 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
qdev              465 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
qdev              468 drivers/staging/qlge/qlge_main.c 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
qdev              471 drivers/staging/qlge/qlge_main.c 	status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
qdev              472 drivers/staging/qlge/qlge_main.c 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
qdev              473 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
qdev              475 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev              480 drivers/staging/qlge/qlge_main.c void ql_link_on(struct ql_adapter *qdev)
qdev              482 drivers/staging/qlge/qlge_main.c 	netif_err(qdev, link, qdev->ndev, "Link is up.\n");
qdev              483 drivers/staging/qlge/qlge_main.c 	netif_carrier_on(qdev->ndev);
qdev              484 drivers/staging/qlge/qlge_main.c 	ql_set_mac_addr(qdev, 1);
qdev              487 drivers/staging/qlge/qlge_main.c void ql_link_off(struct ql_adapter *qdev)
qdev              489 drivers/staging/qlge/qlge_main.c 	netif_err(qdev, link, qdev->ndev, "Link is down.\n");
qdev              490 drivers/staging/qlge/qlge_main.c 	netif_carrier_off(qdev->ndev);
qdev              491 drivers/staging/qlge/qlge_main.c 	ql_set_mac_addr(qdev, 0);
qdev              497 drivers/staging/qlge/qlge_main.c int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
qdev              501 drivers/staging/qlge/qlge_main.c 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
qdev              505 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, RT_IDX,
qdev              507 drivers/staging/qlge/qlge_main.c 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
qdev              510 drivers/staging/qlge/qlge_main.c 	*value = ql_read32(qdev, RT_DATA);
qdev              520 drivers/staging/qlge/qlge_main.c static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
qdev              600 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev              607 drivers/staging/qlge/qlge_main.c 		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
qdev              611 drivers/staging/qlge/qlge_main.c 		ql_write32(qdev, RT_IDX, value);
qdev              612 drivers/staging/qlge/qlge_main.c 		ql_write32(qdev, RT_DATA, enable ? mask : 0);
qdev              618 drivers/staging/qlge/qlge_main.c static void ql_enable_interrupts(struct ql_adapter *qdev)
qdev              620 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
qdev              623 drivers/staging/qlge/qlge_main.c static void ql_disable_interrupts(struct ql_adapter *qdev)
qdev              625 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
qdev              634 drivers/staging/qlge/qlge_main.c u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
qdev              638 drivers/staging/qlge/qlge_main.c 	struct intr_context *ctx = qdev->intr_context + intr;
qdev              640 drivers/staging/qlge/qlge_main.c 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
qdev              644 drivers/staging/qlge/qlge_main.c 		ql_write32(qdev, INTR_EN,
qdev              646 drivers/staging/qlge/qlge_main.c 		var = ql_read32(qdev, STS);
qdev              650 drivers/staging/qlge/qlge_main.c 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
qdev              652 drivers/staging/qlge/qlge_main.c 		ql_write32(qdev, INTR_EN,
qdev              654 drivers/staging/qlge/qlge_main.c 		var = ql_read32(qdev, STS);
qdev              656 drivers/staging/qlge/qlge_main.c 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
qdev              660 drivers/staging/qlge/qlge_main.c static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
qdev              668 drivers/staging/qlge/qlge_main.c 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
qdev              671 drivers/staging/qlge/qlge_main.c 	ctx = qdev->intr_context + intr;
qdev              672 drivers/staging/qlge/qlge_main.c 	spin_lock(&qdev->hw_lock);
qdev              674 drivers/staging/qlge/qlge_main.c 		ql_write32(qdev, INTR_EN,
qdev              676 drivers/staging/qlge/qlge_main.c 		var = ql_read32(qdev, STS);
qdev              679 drivers/staging/qlge/qlge_main.c 	spin_unlock(&qdev->hw_lock);
qdev              683 drivers/staging/qlge/qlge_main.c static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
qdev              686 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->intr_count; i++) {
qdev              691 drivers/staging/qlge/qlge_main.c 		if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
qdev              693 drivers/staging/qlge/qlge_main.c 			atomic_set(&qdev->intr_context[i].irq_cnt, 1);
qdev              694 drivers/staging/qlge/qlge_main.c 		ql_enable_completion_interrupt(qdev, i);
qdev              699 drivers/staging/qlge/qlge_main.c static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
qdev              703 drivers/staging/qlge/qlge_main.c 	__le16 *flash = (__le16 *)&qdev->flash;
qdev              705 drivers/staging/qlge/qlge_main.c 	status = strncmp((char *)&qdev->flash, str, 4);
qdev              707 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
qdev              715 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev              721 drivers/staging/qlge/qlge_main.c static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
qdev              725 drivers/staging/qlge/qlge_main.c 	status = ql_wait_reg_rdy(qdev,
qdev              730 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
qdev              732 drivers/staging/qlge/qlge_main.c 	status = ql_wait_reg_rdy(qdev,
qdev              740 drivers/staging/qlge/qlge_main.c 	*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
qdev              745 drivers/staging/qlge/qlge_main.c static int ql_get_8000_flash_params(struct ql_adapter *qdev)
qdev              749 drivers/staging/qlge/qlge_main.c 	__le32 *p = (__le32 *)&qdev->flash;
qdev              756 drivers/staging/qlge/qlge_main.c 	if (!qdev->port)
qdev              761 drivers/staging/qlge/qlge_main.c 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
qdev              766 drivers/staging/qlge/qlge_main.c 		status = ql_read_flash_word(qdev, i+offset, p);
qdev              768 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev              774 drivers/staging/qlge/qlge_main.c 	status = ql_validate_flash(qdev,
qdev              778 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
qdev              786 drivers/staging/qlge/qlge_main.c 	if (qdev->flash.flash_params_8000.data_type1 == 2)
qdev              788 drivers/staging/qlge/qlge_main.c 			qdev->flash.flash_params_8000.mac_addr1,
qdev              789 drivers/staging/qlge/qlge_main.c 			qdev->ndev->addr_len);
qdev              792 drivers/staging/qlge/qlge_main.c 			qdev->flash.flash_params_8000.mac_addr,
qdev              793 drivers/staging/qlge/qlge_main.c 			qdev->ndev->addr_len);
qdev              796 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
qdev              801 drivers/staging/qlge/qlge_main.c 	memcpy(qdev->ndev->dev_addr,
qdev              803 drivers/staging/qlge/qlge_main.c 		qdev->ndev->addr_len);
qdev              806 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
qdev              810 drivers/staging/qlge/qlge_main.c static int ql_get_8012_flash_params(struct ql_adapter *qdev)
qdev              814 drivers/staging/qlge/qlge_main.c 	__le32 *p = (__le32 *)&qdev->flash;
qdev              821 drivers/staging/qlge/qlge_main.c 	if (qdev->port)
qdev              824 drivers/staging/qlge/qlge_main.c 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
qdev              828 drivers/staging/qlge/qlge_main.c 		status = ql_read_flash_word(qdev, i+offset, p);
qdev              830 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev              837 drivers/staging/qlge/qlge_main.c 	status = ql_validate_flash(qdev,
qdev              841 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
qdev              846 drivers/staging/qlge/qlge_main.c 	if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
qdev              851 drivers/staging/qlge/qlge_main.c 	memcpy(qdev->ndev->dev_addr,
qdev              852 drivers/staging/qlge/qlge_main.c 		qdev->flash.flash_params_8012.mac_addr,
qdev              853 drivers/staging/qlge/qlge_main.c 		qdev->ndev->addr_len);
qdev              856 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
qdev              864 drivers/staging/qlge/qlge_main.c static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
qdev              868 drivers/staging/qlge/qlge_main.c 	status = ql_wait_reg_rdy(qdev,
qdev              873 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, XGMAC_DATA, data);
qdev              875 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, XGMAC_ADDR, reg);
qdev              883 drivers/staging/qlge/qlge_main.c int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
qdev              887 drivers/staging/qlge/qlge_main.c 	status = ql_wait_reg_rdy(qdev,
qdev              892 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
qdev              894 drivers/staging/qlge/qlge_main.c 	status = ql_wait_reg_rdy(qdev,
qdev              899 drivers/staging/qlge/qlge_main.c 	*data = ql_read32(qdev, XGMAC_DATA);
qdev              905 drivers/staging/qlge/qlge_main.c int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
qdev              911 drivers/staging/qlge/qlge_main.c 	status = ql_read_xgmac_reg(qdev, reg, &lo);
qdev              915 drivers/staging/qlge/qlge_main.c 	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
qdev              925 drivers/staging/qlge/qlge_main.c static int ql_8000_port_initialize(struct ql_adapter *qdev)
qdev              932 drivers/staging/qlge/qlge_main.c 	status = ql_mb_about_fw(qdev);
qdev              935 drivers/staging/qlge/qlge_main.c 	status = ql_mb_get_fw_state(qdev);
qdev              939 drivers/staging/qlge/qlge_main.c 	queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
qdev              950 drivers/staging/qlge/qlge_main.c static int ql_8012_port_initialize(struct ql_adapter *qdev)
qdev              955 drivers/staging/qlge/qlge_main.c 	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
qdev              959 drivers/staging/qlge/qlge_main.c 		netif_info(qdev, link, qdev->ndev,
qdev              961 drivers/staging/qlge/qlge_main.c 		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
qdev              963 drivers/staging/qlge/qlge_main.c 			netif_crit(qdev, link, qdev->ndev,
qdev              969 drivers/staging/qlge/qlge_main.c 	netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
qdev              971 drivers/staging/qlge/qlge_main.c 	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
qdev              975 drivers/staging/qlge/qlge_main.c 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
qdev              984 drivers/staging/qlge/qlge_main.c 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
qdev              989 drivers/staging/qlge/qlge_main.c 	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
qdev              994 drivers/staging/qlge/qlge_main.c 	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
qdev              999 drivers/staging/qlge/qlge_main.c 	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
qdev             1004 drivers/staging/qlge/qlge_main.c 	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
qdev             1010 drivers/staging/qlge/qlge_main.c 	    ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
qdev             1014 drivers/staging/qlge/qlge_main.c 	    ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
qdev             1019 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
qdev             1021 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
qdev             1025 drivers/staging/qlge/qlge_main.c static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
qdev             1027 drivers/staging/qlge/qlge_main.c 	return PAGE_SIZE << qdev->lbq_buf_order;
qdev             1041 drivers/staging/qlge/qlge_main.c static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
qdev             1046 drivers/staging/qlge/qlge_main.c 	pci_dma_sync_single_for_cpu(qdev->pdev,
qdev             1055 drivers/staging/qlge/qlge_main.c 					== ql_lbq_block_size(qdev))
qdev             1056 drivers/staging/qlge/qlge_main.c 		pci_unmap_page(qdev->pdev,
qdev             1058 drivers/staging/qlge/qlge_main.c 				ql_lbq_block_size(qdev),
qdev             1090 drivers/staging/qlge/qlge_main.c static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
qdev             1096 drivers/staging/qlge/qlge_main.c 						qdev->lbq_buf_order);
qdev             1098 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, drv, qdev->ndev,
qdev             1103 drivers/staging/qlge/qlge_main.c 		map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
qdev             1104 drivers/staging/qlge/qlge_main.c 					0, ql_lbq_block_size(qdev),
qdev             1106 drivers/staging/qlge/qlge_main.c 		if (pci_dma_mapping_error(qdev->pdev, map)) {
qdev             1108 drivers/staging/qlge/qlge_main.c 					qdev->lbq_buf_order);
qdev             1110 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, drv, qdev->ndev,
qdev             1127 drivers/staging/qlge/qlge_main.c 	if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
qdev             1138 drivers/staging/qlge/qlge_main.c static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
qdev             1148 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1152 drivers/staging/qlge/qlge_main.c 			if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
qdev             1154 drivers/staging/qlge/qlge_main.c 				netif_err(qdev, ifup, qdev->ndev,
qdev             1167 drivers/staging/qlge/qlge_main.c 			pci_dma_sync_single_for_device(qdev->pdev, map,
qdev             1183 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1192 drivers/staging/qlge/qlge_main.c static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
qdev             1203 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1207 drivers/staging/qlge/qlge_main.c 				netif_printk(qdev, rx_status, KERN_DEBUG,
qdev             1208 drivers/staging/qlge/qlge_main.c 					     qdev->ndev,
qdev             1212 drivers/staging/qlge/qlge_main.c 				    netdev_alloc_skb(qdev->ndev,
qdev             1219 drivers/staging/qlge/qlge_main.c 				map = pci_map_single(qdev->pdev,
qdev             1223 drivers/staging/qlge/qlge_main.c 				if (pci_dma_mapping_error(qdev->pdev, map)) {
qdev             1224 drivers/staging/qlge/qlge_main.c 					netif_err(qdev, ifup, qdev->ndev,
qdev             1249 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1257 drivers/staging/qlge/qlge_main.c static void ql_update_buffer_queues(struct ql_adapter *qdev,
qdev             1260 drivers/staging/qlge/qlge_main.c 	ql_update_sbq(qdev, rx_ring);
qdev             1261 drivers/staging/qlge/qlge_main.c 	ql_update_lbq(qdev, rx_ring);
qdev             1267 drivers/staging/qlge/qlge_main.c static void ql_unmap_send(struct ql_adapter *qdev,
qdev             1283 drivers/staging/qlge/qlge_main.c 				netif_printk(qdev, tx_done, KERN_DEBUG,
qdev             1284 drivers/staging/qlge/qlge_main.c 					     qdev->ndev,
qdev             1287 drivers/staging/qlge/qlge_main.c 			pci_unmap_single(qdev->pdev,
qdev             1294 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
qdev             1296 drivers/staging/qlge/qlge_main.c 			pci_unmap_page(qdev->pdev,
qdev             1309 drivers/staging/qlge/qlge_main.c static int ql_map_send(struct ql_adapter *qdev,
qdev             1320 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
qdev             1326 drivers/staging/qlge/qlge_main.c 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
qdev             1328 drivers/staging/qlge/qlge_main.c 	err = pci_dma_mapping_error(qdev->pdev, map);
qdev             1330 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, tx_queued, qdev->ndev,
qdev             1372 drivers/staging/qlge/qlge_main.c 			map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
qdev             1375 drivers/staging/qlge/qlge_main.c 			err = pci_dma_mapping_error(qdev->pdev, map);
qdev             1377 drivers/staging/qlge/qlge_main.c 				netif_err(qdev, tx_queued, qdev->ndev,
qdev             1400 drivers/staging/qlge/qlge_main.c 		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
qdev             1403 drivers/staging/qlge/qlge_main.c 		err = dma_mapping_error(&qdev->pdev->dev, map);
qdev             1405 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, tx_queued, qdev->ndev,
qdev             1431 drivers/staging/qlge/qlge_main.c 	ql_unmap_send(qdev, tx_ring_desc, map_idx);
qdev             1436 drivers/staging/qlge/qlge_main.c static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
qdev             1439 drivers/staging/qlge/qlge_main.c 	struct nic_stats *stats = &qdev->nic_stats;
qdev             1471 drivers/staging/qlge/qlge_main.c static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
qdev             1477 drivers/staging/qlge/qlge_main.c 	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
qdev             1491 drivers/staging/qlge/qlge_main.c static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
qdev             1498 drivers/staging/qlge/qlge_main.c 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
qdev             1503 drivers/staging/qlge/qlge_main.c 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
qdev             1507 drivers/staging/qlge/qlge_main.c 	napi->dev = qdev->ndev;
qdev             1511 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1538 drivers/staging/qlge/qlge_main.c static void ql_process_mac_rx_page(struct ql_adapter *qdev,
qdev             1544 drivers/staging/qlge/qlge_main.c 	struct net_device *ndev = qdev->ndev;
qdev             1547 drivers/staging/qlge/qlge_main.c 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
qdev             1563 drivers/staging/qlge/qlge_main.c 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
qdev             1568 drivers/staging/qlge/qlge_main.c 	ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
qdev             1574 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1580 drivers/staging/qlge/qlge_main.c 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1599 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1610 drivers/staging/qlge/qlge_main.c 				netif_printk(qdev, rx_status, KERN_DEBUG,
qdev             1611 drivers/staging/qlge/qlge_main.c 					     qdev->ndev,
qdev             1631 drivers/staging/qlge/qlge_main.c static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
qdev             1637 drivers/staging/qlge/qlge_main.c 	struct net_device *ndev = qdev->ndev;
qdev             1644 drivers/staging/qlge/qlge_main.c 	new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
qdev             1651 drivers/staging/qlge/qlge_main.c 	pci_dma_sync_single_for_cpu(qdev->pdev,
qdev             1658 drivers/staging/qlge/qlge_main.c 	pci_dma_sync_single_for_device(qdev->pdev,
qdev             1666 drivers/staging/qlge/qlge_main.c 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
qdev             1672 drivers/staging/qlge/qlge_main.c 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
qdev             1673 drivers/staging/qlge/qlge_main.c 		ql_check_lb_frame(qdev, skb);
qdev             1689 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1699 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1714 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1724 drivers/staging/qlge/qlge_main.c 				netif_printk(qdev, rx_status, KERN_DEBUG,
qdev             1725 drivers/staging/qlge/qlge_main.c 					     qdev->ndev,
qdev             1758 drivers/staging/qlge/qlge_main.c static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
qdev             1774 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1780 drivers/staging/qlge/qlge_main.c 		pci_unmap_single(qdev->pdev,
qdev             1794 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1801 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1812 drivers/staging/qlge/qlge_main.c 			pci_dma_sync_single_for_cpu(qdev->pdev,
qdev             1819 drivers/staging/qlge/qlge_main.c 			pci_dma_sync_single_for_device(qdev->pdev,
qdev             1828 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1835 drivers/staging/qlge/qlge_main.c 			pci_unmap_single(qdev->pdev,
qdev             1845 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1853 drivers/staging/qlge/qlge_main.c 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
qdev             1854 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1869 drivers/staging/qlge/qlge_main.c 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
qdev             1870 drivers/staging/qlge/qlge_main.c 			skb = netdev_alloc_skb(qdev->ndev, length);
qdev             1872 drivers/staging/qlge/qlge_main.c 				netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
qdev             1876 drivers/staging/qlge/qlge_main.c 			pci_unmap_page(qdev->pdev,
qdev             1882 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1892 drivers/staging/qlge/qlge_main.c 			ql_update_mac_hdr_len(qdev, ib_mac_rsp,
qdev             1911 drivers/staging/qlge/qlge_main.c 		pci_unmap_single(qdev->pdev,
qdev             1925 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1933 drivers/staging/qlge/qlge_main.c 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
qdev             1937 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1950 drivers/staging/qlge/qlge_main.c 		ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
qdev             1958 drivers/staging/qlge/qlge_main.c static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
qdev             1963 drivers/staging/qlge/qlge_main.c 	struct net_device *ndev = qdev->ndev;
qdev             1968 drivers/staging/qlge/qlge_main.c 	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
qdev             1970 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             1978 drivers/staging/qlge/qlge_main.c 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
qdev             1993 drivers/staging/qlge/qlge_main.c 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
qdev             1994 drivers/staging/qlge/qlge_main.c 		ql_check_lb_frame(qdev, skb);
qdev             2001 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
qdev             2011 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             2025 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             2035 drivers/staging/qlge/qlge_main.c 				netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             2053 drivers/staging/qlge/qlge_main.c static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
qdev             2059 drivers/staging/qlge/qlge_main.c 			(qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
qdev             2069 drivers/staging/qlge/qlge_main.c 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
qdev             2076 drivers/staging/qlge/qlge_main.c 		ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
qdev             2084 drivers/staging/qlge/qlge_main.c 		ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
qdev             2090 drivers/staging/qlge/qlge_main.c 		ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
qdev             2096 drivers/staging/qlge/qlge_main.c 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
qdev             2104 drivers/staging/qlge/qlge_main.c static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
qdev             2111 drivers/staging/qlge/qlge_main.c 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
qdev             2113 drivers/staging/qlge/qlge_main.c 	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
qdev             2124 drivers/staging/qlge/qlge_main.c 			netif_warn(qdev, tx_done, qdev->ndev,
qdev             2128 drivers/staging/qlge/qlge_main.c 			netif_warn(qdev, tx_done, qdev->ndev,
qdev             2132 drivers/staging/qlge/qlge_main.c 			netif_warn(qdev, tx_done, qdev->ndev,
qdev             2136 drivers/staging/qlge/qlge_main.c 			netif_warn(qdev, tx_done, qdev->ndev,
qdev             2144 drivers/staging/qlge/qlge_main.c void ql_queue_fw_error(struct ql_adapter *qdev)
qdev             2146 drivers/staging/qlge/qlge_main.c 	ql_link_off(qdev);
qdev             2147 drivers/staging/qlge/qlge_main.c 	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
qdev             2150 drivers/staging/qlge/qlge_main.c void ql_queue_asic_error(struct ql_adapter *qdev)
qdev             2152 drivers/staging/qlge/qlge_main.c 	ql_link_off(qdev);
qdev             2153 drivers/staging/qlge/qlge_main.c 	ql_disable_interrupts(qdev);
qdev             2158 drivers/staging/qlge/qlge_main.c 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
qdev             2162 drivers/staging/qlge/qlge_main.c 	set_bit(QL_ASIC_RECOVERY, &qdev->flags);
qdev             2163 drivers/staging/qlge/qlge_main.c 	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
qdev             2166 drivers/staging/qlge/qlge_main.c static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
qdev             2171 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, rx_err, qdev->ndev,
qdev             2173 drivers/staging/qlge/qlge_main.c 		ql_queue_fw_error(qdev);
qdev             2177 drivers/staging/qlge/qlge_main.c 		netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
qdev             2178 drivers/staging/qlge/qlge_main.c 		netdev_err(qdev->ndev, "This event shouldn't occur.\n");
qdev             2179 drivers/staging/qlge/qlge_main.c 		ql_queue_asic_error(qdev);
qdev             2183 drivers/staging/qlge/qlge_main.c 		netdev_err(qdev->ndev, "Soft ECC error detected.\n");
qdev             2184 drivers/staging/qlge/qlge_main.c 		ql_queue_asic_error(qdev);
qdev             2188 drivers/staging/qlge/qlge_main.c 		netdev_err(qdev->ndev, "PCI error occurred when reading "
qdev             2191 drivers/staging/qlge/qlge_main.c 		ql_queue_asic_error(qdev);
qdev             2195 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
qdev             2197 drivers/staging/qlge/qlge_main.c 		ql_queue_asic_error(qdev);
qdev             2204 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = rx_ring->qdev;
qdev             2213 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             2223 drivers/staging/qlge/qlge_main.c 			ql_process_mac_tx_intr(qdev, net_rsp);
qdev             2226 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             2237 drivers/staging/qlge/qlge_main.c 	tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
qdev             2238 drivers/staging/qlge/qlge_main.c 	if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
qdev             2244 drivers/staging/qlge/qlge_main.c 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
qdev             2252 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = rx_ring->qdev;
qdev             2260 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             2268 drivers/staging/qlge/qlge_main.c 			ql_process_mac_rx_intr(qdev, rx_ring,
qdev             2274 drivers/staging/qlge/qlge_main.c 			ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
qdev             2278 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             2289 drivers/staging/qlge/qlge_main.c 	ql_update_buffer_queues(qdev, rx_ring);
qdev             2297 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = rx_ring->qdev;
qdev             2300 drivers/staging/qlge/qlge_main.c 	struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
qdev             2302 drivers/staging/qlge/qlge_main.c 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
qdev             2307 drivers/staging/qlge/qlge_main.c 	for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
qdev             2308 drivers/staging/qlge/qlge_main.c 		trx_ring = &qdev->rx_ring[i];
qdev             2315 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
qdev             2327 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
qdev             2335 drivers/staging/qlge/qlge_main.c 		ql_enable_completion_interrupt(qdev, rx_ring->irq);
qdev             2342 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             2345 drivers/staging/qlge/qlge_main.c 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
qdev             2348 drivers/staging/qlge/qlge_main.c 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
qdev             2359 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             2364 drivers/staging/qlge/qlge_main.c 		status = ql_adapter_down(qdev);
qdev             2366 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, link, qdev->ndev,
qdev             2376 drivers/staging/qlge/qlge_main.c 		status = ql_adapter_up(qdev);
qdev             2378 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, link, qdev->ndev,
qdev             2405 drivers/staging/qlge/qlge_main.c static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
qdev             2410 drivers/staging/qlge/qlge_main.c 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
qdev             2413 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             2420 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             2424 drivers/staging/qlge/qlge_main.c 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
qdev             2428 drivers/staging/qlge/qlge_main.c 	err = __qlge_vlan_rx_add_vid(qdev, vid);
qdev             2429 drivers/staging/qlge/qlge_main.c 	set_bit(vid, qdev->active_vlans);
qdev             2431 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
qdev             2436 drivers/staging/qlge/qlge_main.c static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
qdev             2441 drivers/staging/qlge/qlge_main.c 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
qdev             2444 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             2451 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             2455 drivers/staging/qlge/qlge_main.c 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
qdev             2459 drivers/staging/qlge/qlge_main.c 	err = __qlge_vlan_rx_kill_vid(qdev, vid);
qdev             2460 drivers/staging/qlge/qlge_main.c 	clear_bit(vid, qdev->active_vlans);
qdev             2462 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
qdev             2467 drivers/staging/qlge/qlge_main.c static void qlge_restore_vlan(struct ql_adapter *qdev)
qdev             2472 drivers/staging/qlge/qlge_main.c 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
qdev             2476 drivers/staging/qlge/qlge_main.c 	for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
qdev             2477 drivers/staging/qlge/qlge_main.c 		__qlge_vlan_rx_add_vid(qdev, vid);
qdev             2479 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
qdev             2498 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = rx_ring->qdev;
qdev             2499 drivers/staging/qlge/qlge_main.c 	struct intr_context *intr_context = &qdev->intr_context[0];
qdev             2503 drivers/staging/qlge/qlge_main.c 	spin_lock(&qdev->hw_lock);
qdev             2504 drivers/staging/qlge/qlge_main.c 	if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
qdev             2505 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
qdev             2507 drivers/staging/qlge/qlge_main.c 		spin_unlock(&qdev->hw_lock);
qdev             2510 drivers/staging/qlge/qlge_main.c 	spin_unlock(&qdev->hw_lock);
qdev             2512 drivers/staging/qlge/qlge_main.c 	var = ql_disable_completion_interrupt(qdev, intr_context->intr);
qdev             2518 drivers/staging/qlge/qlge_main.c 		ql_queue_asic_error(qdev);
qdev             2519 drivers/staging/qlge/qlge_main.c 		netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
qdev             2520 drivers/staging/qlge/qlge_main.c 		var = ql_read32(qdev, ERR_STS);
qdev             2521 drivers/staging/qlge/qlge_main.c 		netdev_err(qdev->ndev, "Resetting chip. "
qdev             2530 drivers/staging/qlge/qlge_main.c 		(ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
qdev             2535 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, intr, qdev->ndev,
qdev             2537 drivers/staging/qlge/qlge_main.c 		ql_disable_completion_interrupt(qdev, intr_context->intr);
qdev             2538 drivers/staging/qlge/qlge_main.c 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
qdev             2540 drivers/staging/qlge/qlge_main.c 				qdev->workqueue, &qdev->mpi_work, 0);
qdev             2549 drivers/staging/qlge/qlge_main.c 	var = ql_read32(qdev, ISR1);
qdev             2551 drivers/staging/qlge/qlge_main.c 		netif_info(qdev, intr, qdev->ndev,
qdev             2553 drivers/staging/qlge/qlge_main.c 		ql_disable_completion_interrupt(qdev, intr_context->intr);
qdev             2557 drivers/staging/qlge/qlge_main.c 	ql_enable_completion_interrupt(qdev, intr_context->intr);
qdev             2638 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             2643 drivers/staging/qlge/qlge_main.c 	tx_ring = &qdev->tx_ring[tx_ring_idx];
qdev             2649 drivers/staging/qlge/qlge_main.c 		netif_info(qdev, tx_queued, qdev->ndev,
qdev             2671 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
qdev             2684 drivers/staging/qlge/qlge_main.c 	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
qdev             2686 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, tx_queued, qdev->ndev,
qdev             2698 drivers/staging/qlge/qlge_main.c 	netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
qdev             2711 drivers/staging/qlge/qlge_main.c 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
qdev             2717 drivers/staging/qlge/qlge_main.c static void ql_free_shadow_space(struct ql_adapter *qdev)
qdev             2719 drivers/staging/qlge/qlge_main.c 	if (qdev->rx_ring_shadow_reg_area) {
qdev             2720 drivers/staging/qlge/qlge_main.c 		pci_free_consistent(qdev->pdev,
qdev             2722 drivers/staging/qlge/qlge_main.c 				    qdev->rx_ring_shadow_reg_area,
qdev             2723 drivers/staging/qlge/qlge_main.c 				    qdev->rx_ring_shadow_reg_dma);
qdev             2724 drivers/staging/qlge/qlge_main.c 		qdev->rx_ring_shadow_reg_area = NULL;
qdev             2726 drivers/staging/qlge/qlge_main.c 	if (qdev->tx_ring_shadow_reg_area) {
qdev             2727 drivers/staging/qlge/qlge_main.c 		pci_free_consistent(qdev->pdev,
qdev             2729 drivers/staging/qlge/qlge_main.c 				    qdev->tx_ring_shadow_reg_area,
qdev             2730 drivers/staging/qlge/qlge_main.c 				    qdev->tx_ring_shadow_reg_dma);
qdev             2731 drivers/staging/qlge/qlge_main.c 		qdev->tx_ring_shadow_reg_area = NULL;
qdev             2735 drivers/staging/qlge/qlge_main.c static int ql_alloc_shadow_space(struct ql_adapter *qdev)
qdev             2737 drivers/staging/qlge/qlge_main.c 	qdev->rx_ring_shadow_reg_area =
qdev             2738 drivers/staging/qlge/qlge_main.c 		pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
qdev             2739 drivers/staging/qlge/qlge_main.c 				      &qdev->rx_ring_shadow_reg_dma);
qdev             2740 drivers/staging/qlge/qlge_main.c 	if (qdev->rx_ring_shadow_reg_area == NULL) {
qdev             2741 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             2746 drivers/staging/qlge/qlge_main.c 	qdev->tx_ring_shadow_reg_area =
qdev             2747 drivers/staging/qlge/qlge_main.c 		pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
qdev             2748 drivers/staging/qlge/qlge_main.c 				      &qdev->tx_ring_shadow_reg_dma);
qdev             2749 drivers/staging/qlge/qlge_main.c 	if (qdev->tx_ring_shadow_reg_area == NULL) {
qdev             2750 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             2757 drivers/staging/qlge/qlge_main.c 	pci_free_consistent(qdev->pdev,
qdev             2759 drivers/staging/qlge/qlge_main.c 			    qdev->rx_ring_shadow_reg_area,
qdev             2760 drivers/staging/qlge/qlge_main.c 			    qdev->rx_ring_shadow_reg_dma);
qdev             2764 drivers/staging/qlge/qlge_main.c static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
qdev             2782 drivers/staging/qlge/qlge_main.c static void ql_free_tx_resources(struct ql_adapter *qdev,
qdev             2786 drivers/staging/qlge/qlge_main.c 		pci_free_consistent(qdev->pdev, tx_ring->wq_size,
qdev             2794 drivers/staging/qlge/qlge_main.c static int ql_alloc_tx_resources(struct ql_adapter *qdev,
qdev             2798 drivers/staging/qlge/qlge_main.c 	    pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
qdev             2813 drivers/staging/qlge/qlge_main.c 	pci_free_consistent(qdev->pdev, tx_ring->wq_size,
qdev             2817 drivers/staging/qlge/qlge_main.c 	netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
qdev             2821 drivers/staging/qlge/qlge_main.c static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
qdev             2833 drivers/staging/qlge/qlge_main.c 			pci_unmap_page(qdev->pdev,
qdev             2835 drivers/staging/qlge/qlge_main.c 				ql_lbq_block_size(qdev),
qdev             2848 drivers/staging/qlge/qlge_main.c 		pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
qdev             2849 drivers/staging/qlge/qlge_main.c 			ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
qdev             2855 drivers/staging/qlge/qlge_main.c static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
qdev             2863 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             2868 drivers/staging/qlge/qlge_main.c 			pci_unmap_single(qdev->pdev,
qdev             2881 drivers/staging/qlge/qlge_main.c static void ql_free_rx_buffers(struct ql_adapter *qdev)
qdev             2886 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rx_ring_count; i++) {
qdev             2887 drivers/staging/qlge/qlge_main.c 		rx_ring = &qdev->rx_ring[i];
qdev             2889 drivers/staging/qlge/qlge_main.c 			ql_free_lbq_buffers(qdev, rx_ring);
qdev             2891 drivers/staging/qlge/qlge_main.c 			ql_free_sbq_buffers(qdev, rx_ring);
qdev             2895 drivers/staging/qlge/qlge_main.c static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
qdev             2900 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rx_ring_count; i++) {
qdev             2901 drivers/staging/qlge/qlge_main.c 		rx_ring = &qdev->rx_ring[i];
qdev             2903 drivers/staging/qlge/qlge_main.c 			ql_update_buffer_queues(qdev, rx_ring);
qdev             2907 drivers/staging/qlge/qlge_main.c static void ql_init_lbq_ring(struct ql_adapter *qdev,
qdev             2924 drivers/staging/qlge/qlge_main.c static void ql_init_sbq_ring(struct ql_adapter *qdev,
qdev             2941 drivers/staging/qlge/qlge_main.c static void ql_free_rx_resources(struct ql_adapter *qdev,
qdev             2946 drivers/staging/qlge/qlge_main.c 		pci_free_consistent(qdev->pdev,
qdev             2958 drivers/staging/qlge/qlge_main.c 		pci_free_consistent(qdev->pdev,
qdev             2970 drivers/staging/qlge/qlge_main.c 		pci_free_consistent(qdev->pdev,
qdev             2979 drivers/staging/qlge/qlge_main.c static int ql_alloc_rx_resources(struct ql_adapter *qdev,
qdev             2987 drivers/staging/qlge/qlge_main.c 	    pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
qdev             2991 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
qdev             3000 drivers/staging/qlge/qlge_main.c 		    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
qdev             3004 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             3018 drivers/staging/qlge/qlge_main.c 		ql_init_sbq_ring(qdev, rx_ring);
qdev             3026 drivers/staging/qlge/qlge_main.c 		    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
qdev             3030 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             3043 drivers/staging/qlge/qlge_main.c 		ql_init_lbq_ring(qdev, rx_ring);
qdev             3049 drivers/staging/qlge/qlge_main.c 	ql_free_rx_resources(qdev, rx_ring);
qdev             3053 drivers/staging/qlge/qlge_main.c static void ql_tx_ring_clean(struct ql_adapter *qdev)
qdev             3063 drivers/staging/qlge/qlge_main.c 	for (j = 0; j < qdev->tx_ring_count; j++) {
qdev             3064 drivers/staging/qlge/qlge_main.c 		tx_ring = &qdev->tx_ring[j];
qdev             3068 drivers/staging/qlge/qlge_main.c 				netif_err(qdev, ifdown, qdev->ndev,
qdev             3072 drivers/staging/qlge/qlge_main.c 				ql_unmap_send(qdev, tx_ring_desc,
qdev             3081 drivers/staging/qlge/qlge_main.c static void ql_free_mem_resources(struct ql_adapter *qdev)
qdev             3085 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->tx_ring_count; i++)
qdev             3086 drivers/staging/qlge/qlge_main.c 		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
qdev             3087 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rx_ring_count; i++)
qdev             3088 drivers/staging/qlge/qlge_main.c 		ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
qdev             3089 drivers/staging/qlge/qlge_main.c 	ql_free_shadow_space(qdev);
qdev             3092 drivers/staging/qlge/qlge_main.c static int ql_alloc_mem_resources(struct ql_adapter *qdev)
qdev             3097 drivers/staging/qlge/qlge_main.c 	if (ql_alloc_shadow_space(qdev))
qdev             3100 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rx_ring_count; i++) {
qdev             3101 drivers/staging/qlge/qlge_main.c 		if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
qdev             3102 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             3108 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->tx_ring_count; i++) {
qdev             3109 drivers/staging/qlge/qlge_main.c 		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
qdev             3110 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             3118 drivers/staging/qlge/qlge_main.c 	ql_free_mem_resources(qdev);
qdev             3126 drivers/staging/qlge/qlge_main.c static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
qdev             3129 drivers/staging/qlge/qlge_main.c 	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
qdev             3131 drivers/staging/qlge/qlge_main.c 	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
qdev             3134 drivers/staging/qlge/qlge_main.c 	    qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
qdev             3233 drivers/staging/qlge/qlge_main.c 		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
qdev             3234 drivers/staging/qlge/qlge_main.c 		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
qdev             3240 drivers/staging/qlge/qlge_main.c 		netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
qdev             3242 drivers/staging/qlge/qlge_main.c 		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
qdev             3243 drivers/staging/qlge/qlge_main.c 		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
qdev             3246 drivers/staging/qlge/qlge_main.c 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
qdev             3249 drivers/staging/qlge/qlge_main.c 	err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
qdev             3252 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
qdev             3258 drivers/staging/qlge/qlge_main.c static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
qdev             3262 drivers/staging/qlge/qlge_main.c 	    qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
qdev             3263 drivers/staging/qlge/qlge_main.c 	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
qdev             3265 drivers/staging/qlge/qlge_main.c 	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
qdev             3293 drivers/staging/qlge/qlge_main.c 	ql_init_tx_ring(qdev, tx_ring);
qdev             3295 drivers/staging/qlge/qlge_main.c 	err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
qdev             3298 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
qdev             3304 drivers/staging/qlge/qlge_main.c static void ql_disable_msix(struct ql_adapter *qdev)
qdev             3306 drivers/staging/qlge/qlge_main.c 	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
qdev             3307 drivers/staging/qlge/qlge_main.c 		pci_disable_msix(qdev->pdev);
qdev             3308 drivers/staging/qlge/qlge_main.c 		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
qdev             3309 drivers/staging/qlge/qlge_main.c 		kfree(qdev->msi_x_entry);
qdev             3310 drivers/staging/qlge/qlge_main.c 		qdev->msi_x_entry = NULL;
qdev             3311 drivers/staging/qlge/qlge_main.c 	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
qdev             3312 drivers/staging/qlge/qlge_main.c 		pci_disable_msi(qdev->pdev);
qdev             3313 drivers/staging/qlge/qlge_main.c 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
qdev             3321 drivers/staging/qlge/qlge_main.c static void ql_enable_msix(struct ql_adapter *qdev)
qdev             3330 drivers/staging/qlge/qlge_main.c 		qdev->msi_x_entry = kcalloc(qdev->intr_count,
qdev             3333 drivers/staging/qlge/qlge_main.c 		if (!qdev->msi_x_entry) {
qdev             3338 drivers/staging/qlge/qlge_main.c 		for (i = 0; i < qdev->intr_count; i++)
qdev             3339 drivers/staging/qlge/qlge_main.c 			qdev->msi_x_entry[i].entry = i;
qdev             3341 drivers/staging/qlge/qlge_main.c 		err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
qdev             3342 drivers/staging/qlge/qlge_main.c 					    1, qdev->intr_count);
qdev             3344 drivers/staging/qlge/qlge_main.c 			kfree(qdev->msi_x_entry);
qdev             3345 drivers/staging/qlge/qlge_main.c 			qdev->msi_x_entry = NULL;
qdev             3346 drivers/staging/qlge/qlge_main.c 			netif_warn(qdev, ifup, qdev->ndev,
qdev             3350 drivers/staging/qlge/qlge_main.c 			qdev->intr_count = err;
qdev             3351 drivers/staging/qlge/qlge_main.c 			set_bit(QL_MSIX_ENABLED, &qdev->flags);
qdev             3352 drivers/staging/qlge/qlge_main.c 			netif_info(qdev, ifup, qdev->ndev,
qdev             3354 drivers/staging/qlge/qlge_main.c 				   qdev->intr_count);
qdev             3359 drivers/staging/qlge/qlge_main.c 	qdev->intr_count = 1;
qdev             3361 drivers/staging/qlge/qlge_main.c 		if (!pci_enable_msi(qdev->pdev)) {
qdev             3362 drivers/staging/qlge/qlge_main.c 			set_bit(QL_MSI_ENABLED, &qdev->flags);
qdev             3363 drivers/staging/qlge/qlge_main.c 			netif_info(qdev, ifup, qdev->ndev,
qdev             3369 drivers/staging/qlge/qlge_main.c 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
qdev             3382 drivers/staging/qlge/qlge_main.c static void ql_set_tx_vect(struct ql_adapter *qdev)
qdev             3385 drivers/staging/qlge/qlge_main.c 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
qdev             3387 drivers/staging/qlge/qlge_main.c 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
qdev             3389 drivers/staging/qlge/qlge_main.c 		for (vect = 0, j = 0, i = qdev->rss_ring_count;
qdev             3390 drivers/staging/qlge/qlge_main.c 					 i < qdev->rx_ring_count; i++) {
qdev             3395 drivers/staging/qlge/qlge_main.c 			qdev->rx_ring[i].irq = vect;
qdev             3402 drivers/staging/qlge/qlge_main.c 		for (i = 0; i < qdev->rx_ring_count; i++)
qdev             3403 drivers/staging/qlge/qlge_main.c 			qdev->rx_ring[i].irq = 0;
qdev             3412 drivers/staging/qlge/qlge_main.c static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
qdev             3415 drivers/staging/qlge/qlge_main.c 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
qdev             3417 drivers/staging/qlge/qlge_main.c 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
qdev             3421 drivers/staging/qlge/qlge_main.c 		ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
qdev             3426 drivers/staging/qlge/qlge_main.c 			(1 << qdev->rx_ring[qdev->rss_ring_count +
qdev             3433 drivers/staging/qlge/qlge_main.c 		for (j = 0; j < qdev->rx_ring_count; j++)
qdev             3434 drivers/staging/qlge/qlge_main.c 			ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
qdev             3444 drivers/staging/qlge/qlge_main.c static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
qdev             3447 drivers/staging/qlge/qlge_main.c 	struct intr_context *intr_context = &qdev->intr_context[0];
qdev             3449 drivers/staging/qlge/qlge_main.c 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
qdev             3454 drivers/staging/qlge/qlge_main.c 		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
qdev             3455 drivers/staging/qlge/qlge_main.c 			qdev->rx_ring[i].irq = i;
qdev             3457 drivers/staging/qlge/qlge_main.c 			intr_context->qdev = qdev;
qdev             3461 drivers/staging/qlge/qlge_main.c 			ql_set_irq_mask(qdev, intr_context);
qdev             3486 drivers/staging/qlge/qlge_main.c 					qdev->ndev->name, i);
qdev             3493 drivers/staging/qlge/qlge_main.c 					qdev->ndev->name, i);
qdev             3502 drivers/staging/qlge/qlge_main.c 		intr_context->qdev = qdev;
qdev             3518 drivers/staging/qlge/qlge_main.c 		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
qdev             3524 drivers/staging/qlge/qlge_main.c 		ql_set_irq_mask(qdev, intr_context);
qdev             3529 drivers/staging/qlge/qlge_main.c 	ql_set_tx_vect(qdev);
qdev             3532 drivers/staging/qlge/qlge_main.c static void ql_free_irq(struct ql_adapter *qdev)
qdev             3535 drivers/staging/qlge/qlge_main.c 	struct intr_context *intr_context = &qdev->intr_context[0];
qdev             3537 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
qdev             3539 drivers/staging/qlge/qlge_main.c 			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
qdev             3540 drivers/staging/qlge/qlge_main.c 				free_irq(qdev->msi_x_entry[i].vector,
qdev             3541 drivers/staging/qlge/qlge_main.c 					 &qdev->rx_ring[i]);
qdev             3543 drivers/staging/qlge/qlge_main.c 				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
qdev             3547 drivers/staging/qlge/qlge_main.c 	ql_disable_msix(qdev);
qdev             3550 drivers/staging/qlge/qlge_main.c static int ql_request_irq(struct ql_adapter *qdev)
qdev             3554 drivers/staging/qlge/qlge_main.c 	struct pci_dev *pdev = qdev->pdev;
qdev             3555 drivers/staging/qlge/qlge_main.c 	struct intr_context *intr_context = &qdev->intr_context[0];
qdev             3557 drivers/staging/qlge/qlge_main.c 	ql_resolve_queues_to_irqs(qdev);
qdev             3559 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
qdev             3561 drivers/staging/qlge/qlge_main.c 		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
qdev             3562 drivers/staging/qlge/qlge_main.c 			status = request_irq(qdev->msi_x_entry[i].vector,
qdev             3566 drivers/staging/qlge/qlge_main.c 					     &qdev->rx_ring[i]);
qdev             3568 drivers/staging/qlge/qlge_main.c 				netif_err(qdev, ifup, qdev->ndev,
qdev             3574 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
qdev             3576 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
qdev             3578 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
qdev             3581 drivers/staging/qlge/qlge_main.c 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
qdev             3583 drivers/staging/qlge/qlge_main.c 				     &qdev->rx_ring[0]);
qdev             3587 drivers/staging/qlge/qlge_main.c 						 &qdev->
qdev             3589 drivers/staging/qlge/qlge_main.c 					intr_context->name, &qdev->rx_ring[0]);
qdev             3593 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             3596 drivers/staging/qlge/qlge_main.c 				  qdev->rx_ring[0].type == DEFAULT_Q ?
qdev             3598 drivers/staging/qlge/qlge_main.c 				  qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
qdev             3599 drivers/staging/qlge/qlge_main.c 				  qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
qdev             3606 drivers/staging/qlge/qlge_main.c 	netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
qdev             3607 drivers/staging/qlge/qlge_main.c 	ql_free_irq(qdev);
qdev             3611 drivers/staging/qlge/qlge_main.c static int ql_start_rss(struct ql_adapter *qdev)
qdev             3620 drivers/staging/qlge/qlge_main.c 	struct ricb *ricb = &qdev->ricb;
qdev             3636 drivers/staging/qlge/qlge_main.c 		hash_id[i] = (i & (qdev->rss_ring_count - 1));
qdev             3641 drivers/staging/qlge/qlge_main.c 	status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
qdev             3643 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
qdev             3649 drivers/staging/qlge/qlge_main.c static int ql_clear_routing_entries(struct ql_adapter *qdev)
qdev             3653 drivers/staging/qlge/qlge_main.c 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
qdev             3658 drivers/staging/qlge/qlge_main.c 		status = ql_set_routing_reg(qdev, i, 0, 0);
qdev             3660 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             3665 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
qdev             3670 drivers/staging/qlge/qlge_main.c static int ql_route_initialize(struct ql_adapter *qdev)
qdev             3675 drivers/staging/qlge/qlge_main.c 	status = ql_clear_routing_entries(qdev);
qdev             3679 drivers/staging/qlge/qlge_main.c 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
qdev             3683 drivers/staging/qlge/qlge_main.c 	status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
qdev             3686 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             3691 drivers/staging/qlge/qlge_main.c 	status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
qdev             3694 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             3699 drivers/staging/qlge/qlge_main.c 	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
qdev             3701 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             3708 drivers/staging/qlge/qlge_main.c 	if (qdev->rss_ring_count > 1) {
qdev             3709 drivers/staging/qlge/qlge_main.c 		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
qdev             3712 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             3718 drivers/staging/qlge/qlge_main.c 	status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
qdev             3721 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             3724 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
qdev             3728 drivers/staging/qlge/qlge_main.c int ql_cam_route_initialize(struct ql_adapter *qdev)
qdev             3736 drivers/staging/qlge/qlge_main.c 	set = ql_read32(qdev, STS);
qdev             3737 drivers/staging/qlge/qlge_main.c 	set &= qdev->port_link_up;
qdev             3738 drivers/staging/qlge/qlge_main.c 	status = ql_set_mac_addr(qdev, set);
qdev             3740 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
qdev             3744 drivers/staging/qlge/qlge_main.c 	status = ql_route_initialize(qdev);
qdev             3746 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
qdev             3751 drivers/staging/qlge/qlge_main.c static int ql_adapter_initialize(struct ql_adapter *qdev)
qdev             3762 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, SYS, mask | value);
qdev             3767 drivers/staging/qlge/qlge_main.c 	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
qdev             3771 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
qdev             3774 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
qdev             3784 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, FSC, mask | value);
qdev             3786 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, SPLT_HDR, SPLT_LEN);
qdev             3793 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
qdev             3798 drivers/staging/qlge/qlge_main.c 	value = ql_read32(qdev, MGMT_RCV_CFG);
qdev             3803 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, MGMT_RCV_CFG, mask);
qdev             3804 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, MGMT_RCV_CFG, mask | value);
qdev             3807 drivers/staging/qlge/qlge_main.c 	if (qdev->pdev->subsystem_device == 0x0068 ||
qdev             3808 drivers/staging/qlge/qlge_main.c 			qdev->pdev->subsystem_device == 0x0180)
qdev             3809 drivers/staging/qlge/qlge_main.c 		qdev->wol = WAKE_MAGIC;
qdev             3812 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rx_ring_count; i++) {
qdev             3813 drivers/staging/qlge/qlge_main.c 		status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
qdev             3815 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             3824 drivers/staging/qlge/qlge_main.c 	if (qdev->rss_ring_count > 1) {
qdev             3825 drivers/staging/qlge/qlge_main.c 		status = ql_start_rss(qdev);
qdev             3827 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
qdev             3833 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->tx_ring_count; i++) {
qdev             3834 drivers/staging/qlge/qlge_main.c 		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
qdev             3836 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             3843 drivers/staging/qlge/qlge_main.c 	status = qdev->nic_ops->port_initialize(qdev);
qdev             3845 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
qdev             3848 drivers/staging/qlge/qlge_main.c 	status = ql_cam_route_initialize(qdev);
qdev             3850 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             3856 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rss_ring_count; i++)
qdev             3857 drivers/staging/qlge/qlge_main.c 		napi_enable(&qdev->rx_ring[i].napi);
qdev             3863 drivers/staging/qlge/qlge_main.c static int ql_adapter_reset(struct ql_adapter *qdev)
qdev             3870 drivers/staging/qlge/qlge_main.c 	status = ql_clear_routing_entries(qdev);
qdev             3872 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
qdev             3879 drivers/staging/qlge/qlge_main.c 	if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
qdev             3881 drivers/staging/qlge/qlge_main.c 		ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
qdev             3884 drivers/staging/qlge/qlge_main.c 		ql_wait_fifo_empty(qdev);
qdev             3886 drivers/staging/qlge/qlge_main.c 		clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
qdev             3888 drivers/staging/qlge/qlge_main.c 	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
qdev             3892 drivers/staging/qlge/qlge_main.c 		value = ql_read32(qdev, RST_FO);
qdev             3899 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifdown, qdev->ndev,
qdev             3905 drivers/staging/qlge/qlge_main.c 	ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
qdev             3911 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             3913 drivers/staging/qlge/qlge_main.c 	netif_info(qdev, probe, qdev->ndev,
qdev             3916 drivers/staging/qlge/qlge_main.c 		   qdev->func,
qdev             3917 drivers/staging/qlge/qlge_main.c 		   qdev->port,
qdev             3918 drivers/staging/qlge/qlge_main.c 		   qdev->chip_rev_id & 0x0000000f,
qdev             3919 drivers/staging/qlge/qlge_main.c 		   qdev->chip_rev_id >> 4 & 0x0000000f,
qdev             3920 drivers/staging/qlge/qlge_main.c 		   qdev->chip_rev_id >> 8 & 0x0000000f,
qdev             3921 drivers/staging/qlge/qlge_main.c 		   qdev->chip_rev_id >> 12 & 0x0000000f);
qdev             3922 drivers/staging/qlge/qlge_main.c 	netif_info(qdev, probe, qdev->ndev,
qdev             3926 drivers/staging/qlge/qlge_main.c static int ql_wol(struct ql_adapter *qdev)
qdev             3938 drivers/staging/qlge/qlge_main.c 	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
qdev             3940 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifdown, qdev->ndev,
qdev             3942 drivers/staging/qlge/qlge_main.c 			  qdev->wol);
qdev             3946 drivers/staging/qlge/qlge_main.c 	if (qdev->wol & WAKE_MAGIC) {
qdev             3947 drivers/staging/qlge/qlge_main.c 		status = ql_mb_wol_set_magic(qdev, 1);
qdev             3949 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifdown, qdev->ndev,
qdev             3951 drivers/staging/qlge/qlge_main.c 				  qdev->ndev->name);
qdev             3954 drivers/staging/qlge/qlge_main.c 			netif_info(qdev, drv, qdev->ndev,
qdev             3956 drivers/staging/qlge/qlge_main.c 				   qdev->ndev->name);
qdev             3961 drivers/staging/qlge/qlge_main.c 	if (qdev->wol) {
qdev             3963 drivers/staging/qlge/qlge_main.c 		status = ql_mb_wol_mode(qdev, wol);
qdev             3964 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, drv, qdev->ndev,
qdev             3967 drivers/staging/qlge/qlge_main.c 			  wol, qdev->ndev->name);
qdev             3973 drivers/staging/qlge/qlge_main.c static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
qdev             3979 drivers/staging/qlge/qlge_main.c 	if (test_bit(QL_ADAPTER_UP, &qdev->flags))
qdev             3980 drivers/staging/qlge/qlge_main.c 		cancel_delayed_work_sync(&qdev->asic_reset_work);
qdev             3981 drivers/staging/qlge/qlge_main.c 	cancel_delayed_work_sync(&qdev->mpi_reset_work);
qdev             3982 drivers/staging/qlge/qlge_main.c 	cancel_delayed_work_sync(&qdev->mpi_work);
qdev             3983 drivers/staging/qlge/qlge_main.c 	cancel_delayed_work_sync(&qdev->mpi_idc_work);
qdev             3984 drivers/staging/qlge/qlge_main.c 	cancel_delayed_work_sync(&qdev->mpi_core_to_log);
qdev             3985 drivers/staging/qlge/qlge_main.c 	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
qdev             3988 drivers/staging/qlge/qlge_main.c static int ql_adapter_down(struct ql_adapter *qdev)
qdev             3992 drivers/staging/qlge/qlge_main.c 	ql_link_off(qdev);
qdev             3994 drivers/staging/qlge/qlge_main.c 	ql_cancel_all_work_sync(qdev);
qdev             3996 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rss_ring_count; i++)
qdev             3997 drivers/staging/qlge/qlge_main.c 		napi_disable(&qdev->rx_ring[i].napi);
qdev             3999 drivers/staging/qlge/qlge_main.c 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
qdev             4001 drivers/staging/qlge/qlge_main.c 	ql_disable_interrupts(qdev);
qdev             4003 drivers/staging/qlge/qlge_main.c 	ql_tx_ring_clean(qdev);
qdev             4007 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rss_ring_count; i++)
qdev             4008 drivers/staging/qlge/qlge_main.c 		netif_napi_del(&qdev->rx_ring[i].napi);
qdev             4010 drivers/staging/qlge/qlge_main.c 	status = ql_adapter_reset(qdev);
qdev             4012 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
qdev             4013 drivers/staging/qlge/qlge_main.c 			  qdev->func);
qdev             4014 drivers/staging/qlge/qlge_main.c 	ql_free_rx_buffers(qdev);
qdev             4019 drivers/staging/qlge/qlge_main.c static int ql_adapter_up(struct ql_adapter *qdev)
qdev             4023 drivers/staging/qlge/qlge_main.c 	err = ql_adapter_initialize(qdev);
qdev             4025 drivers/staging/qlge/qlge_main.c 		netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
qdev             4028 drivers/staging/qlge/qlge_main.c 	set_bit(QL_ADAPTER_UP, &qdev->flags);
qdev             4029 drivers/staging/qlge/qlge_main.c 	ql_alloc_rx_buffers(qdev);
qdev             4033 drivers/staging/qlge/qlge_main.c 	if ((ql_read32(qdev, STS) & qdev->port_init) &&
qdev             4034 drivers/staging/qlge/qlge_main.c 			(ql_read32(qdev, STS) & qdev->port_link_up))
qdev             4035 drivers/staging/qlge/qlge_main.c 		ql_link_on(qdev);
qdev             4037 drivers/staging/qlge/qlge_main.c 	clear_bit(QL_ALLMULTI, &qdev->flags);
qdev             4038 drivers/staging/qlge/qlge_main.c 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
qdev             4039 drivers/staging/qlge/qlge_main.c 	qlge_set_multicast_list(qdev->ndev);
qdev             4042 drivers/staging/qlge/qlge_main.c 	qlge_restore_vlan(qdev);
qdev             4044 drivers/staging/qlge/qlge_main.c 	ql_enable_interrupts(qdev);
qdev             4045 drivers/staging/qlge/qlge_main.c 	ql_enable_all_completion_interrupts(qdev);
qdev             4046 drivers/staging/qlge/qlge_main.c 	netif_tx_start_all_queues(qdev->ndev);
qdev             4050 drivers/staging/qlge/qlge_main.c 	ql_adapter_reset(qdev);
qdev             4054 drivers/staging/qlge/qlge_main.c static void ql_release_adapter_resources(struct ql_adapter *qdev)
qdev             4056 drivers/staging/qlge/qlge_main.c 	ql_free_mem_resources(qdev);
qdev             4057 drivers/staging/qlge/qlge_main.c 	ql_free_irq(qdev);
qdev             4060 drivers/staging/qlge/qlge_main.c static int ql_get_adapter_resources(struct ql_adapter *qdev)
qdev             4064 drivers/staging/qlge/qlge_main.c 	if (ql_alloc_mem_resources(qdev)) {
qdev             4065 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
qdev             4068 drivers/staging/qlge/qlge_main.c 	status = ql_request_irq(qdev);
qdev             4074 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4080 drivers/staging/qlge/qlge_main.c 	if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
qdev             4081 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
qdev             4082 drivers/staging/qlge/qlge_main.c 		clear_bit(QL_EEH_FATAL, &qdev->flags);
qdev             4090 drivers/staging/qlge/qlge_main.c 	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
qdev             4092 drivers/staging/qlge/qlge_main.c 	ql_adapter_down(qdev);
qdev             4093 drivers/staging/qlge/qlge_main.c 	ql_release_adapter_resources(qdev);
qdev             4097 drivers/staging/qlge/qlge_main.c static int ql_configure_rings(struct ql_adapter *qdev)
qdev             4103 drivers/staging/qlge/qlge_main.c 	unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
qdev             4106 drivers/staging/qlge/qlge_main.c 	qdev->lbq_buf_order = get_order(lbq_buf_len);
qdev             4115 drivers/staging/qlge/qlge_main.c 	qdev->intr_count = cpu_cnt;
qdev             4116 drivers/staging/qlge/qlge_main.c 	ql_enable_msix(qdev);
qdev             4118 drivers/staging/qlge/qlge_main.c 	qdev->rss_ring_count = qdev->intr_count;
qdev             4119 drivers/staging/qlge/qlge_main.c 	qdev->tx_ring_count = cpu_cnt;
qdev             4120 drivers/staging/qlge/qlge_main.c 	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
qdev             4122 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->tx_ring_count; i++) {
qdev             4123 drivers/staging/qlge/qlge_main.c 		tx_ring = &qdev->tx_ring[i];
qdev             4125 drivers/staging/qlge/qlge_main.c 		tx_ring->qdev = qdev;
qdev             4127 drivers/staging/qlge/qlge_main.c 		tx_ring->wq_len = qdev->tx_ring_size;
qdev             4135 drivers/staging/qlge/qlge_main.c 		tx_ring->cq_id = qdev->rss_ring_count + i;
qdev             4138 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rx_ring_count; i++) {
qdev             4139 drivers/staging/qlge/qlge_main.c 		rx_ring = &qdev->rx_ring[i];
qdev             4141 drivers/staging/qlge/qlge_main.c 		rx_ring->qdev = qdev;
qdev             4144 drivers/staging/qlge/qlge_main.c 		if (i < qdev->rss_ring_count) {
qdev             4148 drivers/staging/qlge/qlge_main.c 			rx_ring->cq_len = qdev->rx_ring_size;
qdev             4165 drivers/staging/qlge/qlge_main.c 			rx_ring->cq_len = qdev->tx_ring_size;
qdev             4183 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4185 drivers/staging/qlge/qlge_main.c 	err = ql_adapter_reset(qdev);
qdev             4189 drivers/staging/qlge/qlge_main.c 	err = ql_configure_rings(qdev);
qdev             4193 drivers/staging/qlge/qlge_main.c 	err = ql_get_adapter_resources(qdev);
qdev             4197 drivers/staging/qlge/qlge_main.c 	err = ql_adapter_up(qdev);
qdev             4204 drivers/staging/qlge/qlge_main.c 	ql_release_adapter_resources(qdev);
qdev             4208 drivers/staging/qlge/qlge_main.c static int ql_change_rx_buffers(struct ql_adapter *qdev)
qdev             4215 drivers/staging/qlge/qlge_main.c 	if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
qdev             4218 drivers/staging/qlge/qlge_main.c 		while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
qdev             4219 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             4225 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             4231 drivers/staging/qlge/qlge_main.c 	status = ql_adapter_down(qdev);
qdev             4236 drivers/staging/qlge/qlge_main.c 	lbq_buf_len = (qdev->ndev->mtu > 1500) ?
qdev             4238 drivers/staging/qlge/qlge_main.c 	qdev->lbq_buf_order = get_order(lbq_buf_len);
qdev             4240 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rss_ring_count; i++) {
qdev             4241 drivers/staging/qlge/qlge_main.c 		rx_ring = &qdev->rx_ring[i];
qdev             4246 drivers/staging/qlge/qlge_main.c 	status = ql_adapter_up(qdev);
qdev             4252 drivers/staging/qlge/qlge_main.c 	netif_alert(qdev, ifup, qdev->ndev,
qdev             4254 drivers/staging/qlge/qlge_main.c 	set_bit(QL_ADAPTER_UP, &qdev->flags);
qdev             4255 drivers/staging/qlge/qlge_main.c 	dev_close(qdev->ndev);
qdev             4261 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4265 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
qdev             4267 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
qdev             4271 drivers/staging/qlge/qlge_main.c 	queue_delayed_work(qdev->workqueue,
qdev             4272 drivers/staging/qlge/qlge_main.c 			&qdev->mpi_port_cfg_work, 3*HZ);
qdev             4276 drivers/staging/qlge/qlge_main.c 	if (!netif_running(qdev->ndev)) {
qdev             4280 drivers/staging/qlge/qlge_main.c 	status = ql_change_rx_buffers(qdev);
qdev             4282 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             4292 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4293 drivers/staging/qlge/qlge_main.c 	struct rx_ring *rx_ring = &qdev->rx_ring[0];
qdev             4294 drivers/staging/qlge/qlge_main.c 	struct tx_ring *tx_ring = &qdev->tx_ring[0];
qdev             4300 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
qdev             4315 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
qdev             4328 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4332 drivers/staging/qlge/qlge_main.c 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
qdev             4340 drivers/staging/qlge/qlge_main.c 		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
qdev             4342 drivers/staging/qlge/qlge_main.c 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
qdev             4343 drivers/staging/qlge/qlge_main.c 				netif_err(qdev, hw, qdev->ndev,
qdev             4346 drivers/staging/qlge/qlge_main.c 				set_bit(QL_PROMISCUOUS, &qdev->flags);
qdev             4350 drivers/staging/qlge/qlge_main.c 		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
qdev             4352 drivers/staging/qlge/qlge_main.c 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
qdev             4353 drivers/staging/qlge/qlge_main.c 				netif_err(qdev, hw, qdev->ndev,
qdev             4356 drivers/staging/qlge/qlge_main.c 				clear_bit(QL_PROMISCUOUS, &qdev->flags);
qdev             4367 drivers/staging/qlge/qlge_main.c 		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
qdev             4369 drivers/staging/qlge/qlge_main.c 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
qdev             4370 drivers/staging/qlge/qlge_main.c 				netif_err(qdev, hw, qdev->ndev,
qdev             4373 drivers/staging/qlge/qlge_main.c 				set_bit(QL_ALLMULTI, &qdev->flags);
qdev             4377 drivers/staging/qlge/qlge_main.c 		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
qdev             4379 drivers/staging/qlge/qlge_main.c 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
qdev             4380 drivers/staging/qlge/qlge_main.c 				netif_err(qdev, hw, qdev->ndev,
qdev             4383 drivers/staging/qlge/qlge_main.c 				clear_bit(QL_ALLMULTI, &qdev->flags);
qdev             4389 drivers/staging/qlge/qlge_main.c 		status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
qdev             4394 drivers/staging/qlge/qlge_main.c 			if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
qdev             4396 drivers/staging/qlge/qlge_main.c 				netif_err(qdev, hw, qdev->ndev,
qdev             4398 drivers/staging/qlge/qlge_main.c 				ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
qdev             4403 drivers/staging/qlge/qlge_main.c 		ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
qdev             4405 drivers/staging/qlge/qlge_main.c 		    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
qdev             4406 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, hw, qdev->ndev,
qdev             4409 drivers/staging/qlge/qlge_main.c 			set_bit(QL_ALLMULTI, &qdev->flags);
qdev             4413 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
qdev             4418 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4426 drivers/staging/qlge/qlge_main.c 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
qdev             4428 drivers/staging/qlge/qlge_main.c 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
qdev             4431 drivers/staging/qlge/qlge_main.c 	status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
qdev             4432 drivers/staging/qlge/qlge_main.c 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
qdev             4434 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
qdev             4435 drivers/staging/qlge/qlge_main.c 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
qdev             4441 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4442 drivers/staging/qlge/qlge_main.c 	ql_queue_asic_error(qdev);
qdev             4447 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev =
qdev             4451 drivers/staging/qlge/qlge_main.c 	status = ql_adapter_down(qdev);
qdev             4455 drivers/staging/qlge/qlge_main.c 	status = ql_adapter_up(qdev);
qdev             4460 drivers/staging/qlge/qlge_main.c 	clear_bit(QL_ALLMULTI, &qdev->flags);
qdev             4461 drivers/staging/qlge/qlge_main.c 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
qdev             4462 drivers/staging/qlge/qlge_main.c 	qlge_set_multicast_list(qdev->ndev);
qdev             4467 drivers/staging/qlge/qlge_main.c 	netif_alert(qdev, ifup, qdev->ndev,
qdev             4470 drivers/staging/qlge/qlge_main.c 	set_bit(QL_ADAPTER_UP, &qdev->flags);
qdev             4471 drivers/staging/qlge/qlge_main.c 	dev_close(qdev->ndev);
qdev             4492 drivers/staging/qlge/qlge_main.c static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
qdev             4498 drivers/staging/qlge/qlge_main.c 	status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
qdev             4508 drivers/staging/qlge/qlge_main.c 	if (qdev->func == nic_func1)
qdev             4509 drivers/staging/qlge/qlge_main.c 		qdev->alt_func = nic_func2;
qdev             4510 drivers/staging/qlge/qlge_main.c 	else if (qdev->func == nic_func2)
qdev             4511 drivers/staging/qlge/qlge_main.c 		qdev->alt_func = nic_func1;
qdev             4518 drivers/staging/qlge/qlge_main.c static int ql_get_board_info(struct ql_adapter *qdev)
qdev             4521 drivers/staging/qlge/qlge_main.c 	qdev->func =
qdev             4522 drivers/staging/qlge/qlge_main.c 	    (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
qdev             4523 drivers/staging/qlge/qlge_main.c 	if (qdev->func > 3)
qdev             4526 drivers/staging/qlge/qlge_main.c 	status = ql_get_alt_pcie_func(qdev);
qdev             4530 drivers/staging/qlge/qlge_main.c 	qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
qdev             4531 drivers/staging/qlge/qlge_main.c 	if (qdev->port) {
qdev             4532 drivers/staging/qlge/qlge_main.c 		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
qdev             4533 drivers/staging/qlge/qlge_main.c 		qdev->port_link_up = STS_PL1;
qdev             4534 drivers/staging/qlge/qlge_main.c 		qdev->port_init = STS_PI1;
qdev             4535 drivers/staging/qlge/qlge_main.c 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
qdev             4536 drivers/staging/qlge/qlge_main.c 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
qdev             4538 drivers/staging/qlge/qlge_main.c 		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
qdev             4539 drivers/staging/qlge/qlge_main.c 		qdev->port_link_up = STS_PL0;
qdev             4540 drivers/staging/qlge/qlge_main.c 		qdev->port_init = STS_PI0;
qdev             4541 drivers/staging/qlge/qlge_main.c 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
qdev             4542 drivers/staging/qlge/qlge_main.c 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
qdev             4544 drivers/staging/qlge/qlge_main.c 	qdev->chip_rev_id = ql_read32(qdev, REV_ID);
qdev             4545 drivers/staging/qlge/qlge_main.c 	qdev->device_id = qdev->pdev->device;
qdev             4546 drivers/staging/qlge/qlge_main.c 	if (qdev->device_id == QLGE_DEVICE_ID_8012)
qdev             4547 drivers/staging/qlge/qlge_main.c 		qdev->nic_ops = &qla8012_nic_ops;
qdev             4548 drivers/staging/qlge/qlge_main.c 	else if (qdev->device_id == QLGE_DEVICE_ID_8000)
qdev             4549 drivers/staging/qlge/qlge_main.c 		qdev->nic_ops = &qla8000_nic_ops;
qdev             4556 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4558 drivers/staging/qlge/qlge_main.c 	if (qdev->workqueue) {
qdev             4559 drivers/staging/qlge/qlge_main.c 		destroy_workqueue(qdev->workqueue);
qdev             4560 drivers/staging/qlge/qlge_main.c 		qdev->workqueue = NULL;
qdev             4563 drivers/staging/qlge/qlge_main.c 	if (qdev->reg_base)
qdev             4564 drivers/staging/qlge/qlge_main.c 		iounmap(qdev->reg_base);
qdev             4565 drivers/staging/qlge/qlge_main.c 	if (qdev->doorbell_area)
qdev             4566 drivers/staging/qlge/qlge_main.c 		iounmap(qdev->doorbell_area);
qdev             4567 drivers/staging/qlge/qlge_main.c 	vfree(qdev->mpi_coredump);
qdev             4574 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4577 drivers/staging/qlge/qlge_main.c 	memset((void *)qdev, 0, sizeof(*qdev));
qdev             4584 drivers/staging/qlge/qlge_main.c 	qdev->ndev = ndev;
qdev             4585 drivers/staging/qlge/qlge_main.c 	qdev->pdev = pdev;
qdev             4603 drivers/staging/qlge/qlge_main.c 		set_bit(QL_DMA64, &qdev->flags);
qdev             4619 drivers/staging/qlge/qlge_main.c 	qdev->reg_base =
qdev             4622 drivers/staging/qlge/qlge_main.c 	if (!qdev->reg_base) {
qdev             4628 drivers/staging/qlge/qlge_main.c 	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
qdev             4629 drivers/staging/qlge/qlge_main.c 	qdev->doorbell_area =
qdev             4632 drivers/staging/qlge/qlge_main.c 	if (!qdev->doorbell_area) {
qdev             4638 drivers/staging/qlge/qlge_main.c 	err = ql_get_board_info(qdev);
qdev             4644 drivers/staging/qlge/qlge_main.c 	qdev->msg_enable = netif_msg_init(debug, default_msg);
qdev             4645 drivers/staging/qlge/qlge_main.c 	spin_lock_init(&qdev->hw_lock);
qdev             4646 drivers/staging/qlge/qlge_main.c 	spin_lock_init(&qdev->stats_lock);
qdev             4649 drivers/staging/qlge/qlge_main.c 		qdev->mpi_coredump =
qdev             4651 drivers/staging/qlge/qlge_main.c 		if (qdev->mpi_coredump == NULL) {
qdev             4656 drivers/staging/qlge/qlge_main.c 			set_bit(QL_FRC_COREDUMP, &qdev->flags);
qdev             4659 drivers/staging/qlge/qlge_main.c 	err = qdev->nic_ops->get_flash(qdev);
qdev             4666 drivers/staging/qlge/qlge_main.c 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
qdev             4669 drivers/staging/qlge/qlge_main.c 	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
qdev             4670 drivers/staging/qlge/qlge_main.c 	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
qdev             4673 drivers/staging/qlge/qlge_main.c 	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
qdev             4674 drivers/staging/qlge/qlge_main.c 	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
qdev             4675 drivers/staging/qlge/qlge_main.c 	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
qdev             4676 drivers/staging/qlge/qlge_main.c 	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
qdev             4681 drivers/staging/qlge/qlge_main.c 	qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
qdev             4683 drivers/staging/qlge/qlge_main.c 	if (!qdev->workqueue) {
qdev             4688 drivers/staging/qlge/qlge_main.c 	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
qdev             4689 drivers/staging/qlge/qlge_main.c 	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
qdev             4690 drivers/staging/qlge/qlge_main.c 	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
qdev             4691 drivers/staging/qlge/qlge_main.c 	INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
qdev             4692 drivers/staging/qlge/qlge_main.c 	INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
qdev             4693 drivers/staging/qlge/qlge_main.c 	INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
qdev             4694 drivers/staging/qlge/qlge_main.c 	init_completion(&qdev->ide_completion);
qdev             4695 drivers/staging/qlge/qlge_main.c 	mutex_init(&qdev->mpi_mutex);
qdev             4727 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = from_timer(qdev, t, timer);
qdev             4730 drivers/staging/qlge/qlge_main.c 	var = ql_read32(qdev, STS);
qdev             4731 drivers/staging/qlge/qlge_main.c 	if (pci_channel_offline(qdev->pdev)) {
qdev             4732 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
qdev             4736 drivers/staging/qlge/qlge_main.c 	mod_timer(&qdev->timer, jiffies + (5*HZ));
qdev             4743 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = NULL;
qdev             4758 drivers/staging/qlge/qlge_main.c 	qdev = netdev_priv(ndev);
qdev             4775 drivers/staging/qlge/qlge_main.c 	if (test_bit(QL_DMA64, &qdev->flags))
qdev             4781 drivers/staging/qlge/qlge_main.c 	ndev->tx_queue_len = qdev->tx_ring_size;
qdev             4806 drivers/staging/qlge/qlge_main.c 	timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
qdev             4807 drivers/staging/qlge/qlge_main.c 	mod_timer(&qdev->timer, jiffies + (5*HZ));
qdev             4808 drivers/staging/qlge/qlge_main.c 	ql_link_off(qdev);
qdev             4810 drivers/staging/qlge/qlge_main.c 	atomic_set(&qdev->lb_count, 0);
qdev             4828 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4829 drivers/staging/qlge/qlge_main.c 	del_timer_sync(&qdev->timer);
qdev             4830 drivers/staging/qlge/qlge_main.c 	ql_cancel_all_work_sync(qdev);
qdev             4841 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4849 drivers/staging/qlge/qlge_main.c 	ql_cancel_all_work_sync(qdev);
qdev             4851 drivers/staging/qlge/qlge_main.c 	for (i = 0; i < qdev->rss_ring_count; i++)
qdev             4852 drivers/staging/qlge/qlge_main.c 		netif_napi_del(&qdev->rx_ring[i].napi);
qdev             4854 drivers/staging/qlge/qlge_main.c 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
qdev             4855 drivers/staging/qlge/qlge_main.c 	ql_tx_ring_clean(qdev);
qdev             4856 drivers/staging/qlge/qlge_main.c 	ql_free_rx_buffers(qdev);
qdev             4857 drivers/staging/qlge/qlge_main.c 	ql_release_adapter_resources(qdev);
qdev             4868 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4875 drivers/staging/qlge/qlge_main.c 		del_timer_sync(&qdev->timer);
qdev             4883 drivers/staging/qlge/qlge_main.c 		del_timer_sync(&qdev->timer);
qdev             4885 drivers/staging/qlge/qlge_main.c 		set_bit(QL_EEH_FATAL, &qdev->flags);
qdev             4902 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4908 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             4914 drivers/staging/qlge/qlge_main.c 	if (ql_adapter_reset(qdev)) {
qdev             4915 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
qdev             4916 drivers/staging/qlge/qlge_main.c 		set_bit(QL_EEH_FATAL, &qdev->flags);
qdev             4926 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4932 drivers/staging/qlge/qlge_main.c 			netif_err(qdev, ifup, qdev->ndev,
qdev             4937 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev,
qdev             4940 drivers/staging/qlge/qlge_main.c 	mod_timer(&qdev->timer, jiffies + (5*HZ));
qdev             4953 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4957 drivers/staging/qlge/qlge_main.c 	del_timer_sync(&qdev->timer);
qdev             4960 drivers/staging/qlge/qlge_main.c 		err = ql_adapter_down(qdev);
qdev             4965 drivers/staging/qlge/qlge_main.c 	ql_wol(qdev);
qdev             4981 drivers/staging/qlge/qlge_main.c 	struct ql_adapter *qdev = netdev_priv(ndev);
qdev             4988 drivers/staging/qlge/qlge_main.c 		netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
qdev             4997 drivers/staging/qlge/qlge_main.c 		err = ql_adapter_up(qdev);
qdev             5002 drivers/staging/qlge/qlge_main.c 	mod_timer(&qdev->timer, jiffies + (5*HZ));
qdev                4 drivers/staging/qlge/qlge_mpi.c int ql_unpause_mpi_risc(struct ql_adapter *qdev)
qdev                9 drivers/staging/qlge/qlge_mpi.c 	tmp = ql_read32(qdev, CSR);
qdev               13 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
qdev               17 drivers/staging/qlge/qlge_mpi.c int ql_pause_mpi_risc(struct ql_adapter *qdev)
qdev               23 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
qdev               25 drivers/staging/qlge/qlge_mpi.c 		tmp = ql_read32(qdev, CSR);
qdev               34 drivers/staging/qlge/qlge_mpi.c int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
qdev               40 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, CSR, CSR_CMD_SET_RST);
qdev               42 drivers/staging/qlge/qlge_mpi.c 		tmp = ql_read32(qdev, CSR);
qdev               44 drivers/staging/qlge/qlge_mpi.c 			ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
qdev               53 drivers/staging/qlge/qlge_mpi.c int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
qdev               57 drivers/staging/qlge/qlge_mpi.c 	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
qdev               61 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
qdev               63 drivers/staging/qlge/qlge_mpi.c 	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
qdev               67 drivers/staging/qlge/qlge_mpi.c 	*data = ql_read32(qdev, PROC_DATA);
qdev               72 drivers/staging/qlge/qlge_mpi.c int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
qdev               76 drivers/staging/qlge/qlge_mpi.c 	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
qdev               80 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, PROC_DATA, data);
qdev               82 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, PROC_ADDR, reg);
qdev               84 drivers/staging/qlge/qlge_mpi.c 	status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
qdev               91 drivers/staging/qlge/qlge_mpi.c int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
qdev               94 drivers/staging/qlge/qlge_mpi.c 	status = ql_write_mpi_reg(qdev, 0x00001010, 1);
qdev              103 drivers/staging/qlge/qlge_mpi.c int ql_own_firmware(struct ql_adapter *qdev)
qdev              111 drivers/staging/qlge/qlge_mpi.c 	if (qdev->func < qdev->alt_func)
qdev              119 drivers/staging/qlge/qlge_mpi.c 	temp =  ql_read32(qdev, STS);
qdev              120 drivers/staging/qlge/qlge_mpi.c 	if (!(temp & (1 << (8 + qdev->alt_func))))
qdev              127 drivers/staging/qlge/qlge_mpi.c static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
qdev              131 drivers/staging/qlge/qlge_mpi.c 	status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
qdev              136 drivers/staging/qlge/qlge_mpi.c 		    ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
qdev              139 drivers/staging/qlge/qlge_mpi.c 			netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
qdev              143 drivers/staging/qlge/qlge_mpi.c 	ql_sem_unlock(qdev, SEM_PROC_REG_MASK);	/* does flush too */
qdev              150 drivers/staging/qlge/qlge_mpi.c static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
qdev              156 drivers/staging/qlge/qlge_mpi.c 		value = ql_read32(qdev, STS);
qdev              167 drivers/staging/qlge/qlge_mpi.c static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
qdev              175 drivers/staging/qlge/qlge_mpi.c 	if (ql_read32(qdev, CSR) & CSR_HRI)
qdev              178 drivers/staging/qlge/qlge_mpi.c 	status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
qdev              186 drivers/staging/qlge/qlge_mpi.c 		status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
qdev              194 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
qdev              196 drivers/staging/qlge/qlge_mpi.c 	ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
qdev              207 drivers/staging/qlge/qlge_mpi.c static int ql_idc_req_aen(struct ql_adapter *qdev)
qdev              210 drivers/staging/qlge/qlge_mpi.c 	struct mbox_params *mbcp = &qdev->idc_mbc;
qdev              212 drivers/staging/qlge/qlge_mpi.c 	netif_err(qdev, drv, qdev->ndev, "Enter!\n");
qdev              217 drivers/staging/qlge/qlge_mpi.c 	status = ql_get_mb_sts(qdev, mbcp);
qdev              219 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              221 drivers/staging/qlge/qlge_mpi.c 		ql_queue_asic_error(qdev);
qdev              227 drivers/staging/qlge/qlge_mpi.c 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
qdev              228 drivers/staging/qlge/qlge_mpi.c 		queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
qdev              236 drivers/staging/qlge/qlge_mpi.c static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
qdev              239 drivers/staging/qlge/qlge_mpi.c 	struct mbox_params *mbcp = &qdev->idc_mbc;
qdev              241 drivers/staging/qlge/qlge_mpi.c 	status = ql_get_mb_sts(qdev, mbcp);
qdev              243 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              245 drivers/staging/qlge/qlge_mpi.c 		ql_queue_fw_error(qdev);
qdev              250 drivers/staging/qlge/qlge_mpi.c 		complete(&qdev->ide_completion);
qdev              255 drivers/staging/qlge/qlge_mpi.c static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
qdev              260 drivers/staging/qlge/qlge_mpi.c 	status = ql_get_mb_sts(qdev, mbcp);
qdev              262 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              267 drivers/staging/qlge/qlge_mpi.c 	qdev->link_status = mbcp->mbox_out[1];
qdev              268 drivers/staging/qlge/qlge_mpi.c 	netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
qdev              273 drivers/staging/qlge/qlge_mpi.c 	if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
qdev              274 drivers/staging/qlge/qlge_mpi.c 		status = ql_cam_route_initialize(qdev);
qdev              276 drivers/staging/qlge/qlge_mpi.c 			netif_err(qdev, ifup, qdev->ndev,
qdev              280 drivers/staging/qlge/qlge_mpi.c 			clear_bit(QL_CAM_RT_SET, &qdev->flags);
qdev              287 drivers/staging/qlge/qlge_mpi.c 	if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
qdev              288 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
qdev              289 drivers/staging/qlge/qlge_mpi.c 		set_bit(QL_PORT_CFG, &qdev->flags);
qdev              294 drivers/staging/qlge/qlge_mpi.c 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
qdev              295 drivers/staging/qlge/qlge_mpi.c 		queue_delayed_work(qdev->workqueue,
qdev              296 drivers/staging/qlge/qlge_mpi.c 				&qdev->mpi_port_cfg_work, 0);
qdev              299 drivers/staging/qlge/qlge_mpi.c 	ql_link_on(qdev);
qdev              302 drivers/staging/qlge/qlge_mpi.c static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
qdev              308 drivers/staging/qlge/qlge_mpi.c 	status = ql_get_mb_sts(qdev, mbcp);
qdev              310 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
qdev              312 drivers/staging/qlge/qlge_mpi.c 	ql_link_off(qdev);
qdev              315 drivers/staging/qlge/qlge_mpi.c static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
qdev              321 drivers/staging/qlge/qlge_mpi.c 	status = ql_get_mb_sts(qdev, mbcp);
qdev              323 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
qdev              325 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
qdev              330 drivers/staging/qlge/qlge_mpi.c static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
qdev              336 drivers/staging/qlge/qlge_mpi.c 	status = ql_get_mb_sts(qdev, mbcp);
qdev              338 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
qdev              340 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
qdev              345 drivers/staging/qlge/qlge_mpi.c static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
qdev              351 drivers/staging/qlge/qlge_mpi.c 	status = ql_get_mb_sts(qdev, mbcp);
qdev              353 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
qdev              356 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
qdev              358 drivers/staging/qlge/qlge_mpi.c 			netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
qdev              366 drivers/staging/qlge/qlge_mpi.c static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
qdev              372 drivers/staging/qlge/qlge_mpi.c 	status = ql_get_mb_sts(qdev, mbcp);
qdev              374 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
qdev              376 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Firmware Revision  = 0x%.08x.\n",
qdev              378 drivers/staging/qlge/qlge_mpi.c 		qdev->fw_rev_id = mbcp->mbox_out[1];
qdev              379 drivers/staging/qlge/qlge_mpi.c 		status = ql_cam_route_initialize(qdev);
qdev              381 drivers/staging/qlge/qlge_mpi.c 			netif_err(qdev, ifup, qdev->ndev,
qdev              392 drivers/staging/qlge/qlge_mpi.c static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
qdev              399 drivers/staging/qlge/qlge_mpi.c 	status = ql_get_mb_sts(qdev, mbcp);
qdev              401 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              403 drivers/staging/qlge/qlge_mpi.c 		ql_queue_asic_error(qdev);
qdev              427 drivers/staging/qlge/qlge_mpi.c 		status = ql_get_mb_sts(qdev, mbcp);
qdev              436 drivers/staging/qlge/qlge_mpi.c 		status = ql_idc_req_aen(qdev);
qdev              446 drivers/staging/qlge/qlge_mpi.c 		status = ql_idc_cmplt_aen(qdev);
qdev              450 drivers/staging/qlge/qlge_mpi.c 		ql_link_up(qdev, mbcp);
qdev              454 drivers/staging/qlge/qlge_mpi.c 		ql_link_down(qdev, mbcp);
qdev              463 drivers/staging/qlge/qlge_mpi.c 			status = ql_get_mb_sts(qdev, mbcp);
qdev              467 drivers/staging/qlge/qlge_mpi.c 		ql_init_fw_done(qdev, mbcp);
qdev              471 drivers/staging/qlge/qlge_mpi.c 		ql_sfp_in(qdev, mbcp);
qdev              475 drivers/staging/qlge/qlge_mpi.c 		ql_sfp_out(qdev, mbcp);
qdev              487 drivers/staging/qlge/qlge_mpi.c 			status = ql_get_mb_sts(qdev, mbcp);
qdev              491 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              494 drivers/staging/qlge/qlge_mpi.c 		ql_queue_fw_error(qdev);
qdev              498 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "System Error.\n");
qdev              499 drivers/staging/qlge/qlge_mpi.c 		ql_queue_fw_error(qdev);
qdev              504 drivers/staging/qlge/qlge_mpi.c 		ql_aen_lost(qdev, mbcp);
qdev              511 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              516 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
qdev              532 drivers/staging/qlge/qlge_mpi.c static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
qdev              537 drivers/staging/qlge/qlge_mpi.c 	mutex_lock(&qdev->mpi_mutex);
qdev              540 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
qdev              543 drivers/staging/qlge/qlge_mpi.c 	status = ql_exec_mb_cmd(qdev, mbcp);
qdev              562 drivers/staging/qlge/qlge_mpi.c 		status = ql_wait_mbx_cmd_cmplt(qdev);
qdev              571 drivers/staging/qlge/qlge_mpi.c 		status = ql_mpi_handler(qdev, mbcp);
qdev              586 drivers/staging/qlge/qlge_mpi.c 	netif_err(qdev, drv, qdev->ndev,
qdev              596 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
qdev              606 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
qdev              607 drivers/staging/qlge/qlge_mpi.c 	mutex_unlock(&qdev->mpi_mutex);
qdev              615 drivers/staging/qlge/qlge_mpi.c int ql_mb_about_fw(struct ql_adapter *qdev)
qdev              628 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev              633 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              639 drivers/staging/qlge/qlge_mpi.c 	qdev->fw_rev_id = mbcp->mbox_out[1];
qdev              647 drivers/staging/qlge/qlge_mpi.c int ql_mb_get_fw_state(struct ql_adapter *qdev)
qdev              660 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev              665 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              675 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              686 drivers/staging/qlge/qlge_mpi.c static int ql_mb_idc_ack(struct ql_adapter *qdev)
qdev              698 drivers/staging/qlge/qlge_mpi.c 	mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
qdev              699 drivers/staging/qlge/qlge_mpi.c 	mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
qdev              700 drivers/staging/qlge/qlge_mpi.c 	mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
qdev              701 drivers/staging/qlge/qlge_mpi.c 	mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
qdev              703 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev              708 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
qdev              718 drivers/staging/qlge/qlge_mpi.c int ql_mb_set_port_cfg(struct ql_adapter *qdev)
qdev              730 drivers/staging/qlge/qlge_mpi.c 	mbcp->mbox_in[1] = qdev->link_config;
qdev              731 drivers/staging/qlge/qlge_mpi.c 	mbcp->mbox_in[2] = qdev->max_frame_size;
qdev              734 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev              739 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              742 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              749 drivers/staging/qlge/qlge_mpi.c static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
qdev              772 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev              777 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
qdev              784 drivers/staging/qlge/qlge_mpi.c int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
qdev              791 drivers/staging/qlge/qlge_mpi.c 	my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
qdev              796 drivers/staging/qlge/qlge_mpi.c 	status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
qdev              800 drivers/staging/qlge/qlge_mpi.c 	pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
qdev              809 drivers/staging/qlge/qlge_mpi.c int ql_mb_get_port_cfg(struct ql_adapter *qdev)
qdev              822 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev              827 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              831 drivers/staging/qlge/qlge_mpi.c 		netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
qdev              833 drivers/staging/qlge/qlge_mpi.c 		qdev->link_config = mbcp->mbox_out[1];
qdev              834 drivers/staging/qlge/qlge_mpi.c 		qdev->max_frame_size = mbcp->mbox_out[2];
qdev              839 drivers/staging/qlge/qlge_mpi.c int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
qdev              854 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev              859 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
qdev              865 drivers/staging/qlge/qlge_mpi.c int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
qdev              870 drivers/staging/qlge/qlge_mpi.c 	u8 *addr = qdev->ndev->dev_addr;
qdev              896 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev              901 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
qdev              914 drivers/staging/qlge/qlge_mpi.c static int ql_idc_wait(struct ql_adapter *qdev)
qdev              918 drivers/staging/qlge/qlge_mpi.c 	struct mbox_params *mbcp = &qdev->idc_mbc;
qdev              924 drivers/staging/qlge/qlge_mpi.c 			wait_for_completion_timeout(&qdev->ide_completion,
qdev              927 drivers/staging/qlge/qlge_mpi.c 			netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
qdev              935 drivers/staging/qlge/qlge_mpi.c 			netif_err(qdev, drv, qdev->ndev,
qdev              939 drivers/staging/qlge/qlge_mpi.c 			netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
qdev              943 drivers/staging/qlge/qlge_mpi.c 			netif_err(qdev, drv, qdev->ndev,
qdev              954 drivers/staging/qlge/qlge_mpi.c int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
qdev              969 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev              974 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev              982 drivers/staging/qlge/qlge_mpi.c int ql_mb_get_led_cfg(struct ql_adapter *qdev)
qdev              995 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev             1000 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1004 drivers/staging/qlge/qlge_mpi.c 		qdev->led_config = mbcp->mbox_out[1];
qdev             1009 drivers/staging/qlge/qlge_mpi.c int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
qdev             1023 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev             1031 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1039 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1046 drivers/staging/qlge/qlge_mpi.c static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
qdev             1060 drivers/staging/qlge/qlge_mpi.c 	status = ql_mailbox_command(qdev, mbcp);
qdev             1070 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1074 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1081 drivers/staging/qlge/qlge_mpi.c int ql_wait_fifo_empty(struct ql_adapter *qdev)
qdev             1088 drivers/staging/qlge/qlge_mpi.c 		nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
qdev             1089 drivers/staging/qlge/qlge_mpi.c 		ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
qdev             1101 drivers/staging/qlge/qlge_mpi.c static int ql_set_port_cfg(struct ql_adapter *qdev)
qdev             1104 drivers/staging/qlge/qlge_mpi.c 	status = ql_mb_set_port_cfg(qdev);
qdev             1107 drivers/staging/qlge/qlge_mpi.c 	status = ql_idc_wait(qdev);
qdev             1121 drivers/staging/qlge/qlge_mpi.c 	struct ql_adapter *qdev =
qdev             1125 drivers/staging/qlge/qlge_mpi.c 	status = ql_mb_get_port_cfg(qdev);
qdev             1127 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1132 drivers/staging/qlge/qlge_mpi.c 	if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
qdev             1133 drivers/staging/qlge/qlge_mpi.c 			qdev->max_frame_size ==
qdev             1137 drivers/staging/qlge/qlge_mpi.c 	qdev->link_config |=	CFG_JUMBO_FRAME_SIZE;
qdev             1138 drivers/staging/qlge/qlge_mpi.c 	qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
qdev             1139 drivers/staging/qlge/qlge_mpi.c 	status = ql_set_port_cfg(qdev);
qdev             1141 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1146 drivers/staging/qlge/qlge_mpi.c 	clear_bit(QL_PORT_CFG, &qdev->flags);
qdev             1149 drivers/staging/qlge/qlge_mpi.c 	ql_queue_fw_error(qdev);
qdev             1161 drivers/staging/qlge/qlge_mpi.c 	struct ql_adapter *qdev =
qdev             1164 drivers/staging/qlge/qlge_mpi.c 	struct mbox_params *mbcp = &qdev->idc_mbc;
qdev             1173 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev,
qdev             1178 drivers/staging/qlge/qlge_mpi.c 		ql_link_off(qdev);
qdev             1185 drivers/staging/qlge/qlge_mpi.c 		set_bit(QL_CAM_RT_SET, &qdev->flags);
qdev             1188 drivers/staging/qlge/qlge_mpi.c 			status = ql_mb_idc_ack(qdev);
qdev             1190 drivers/staging/qlge/qlge_mpi.c 				netif_err(qdev, drv, qdev->ndev,
qdev             1193 drivers/staging/qlge/qlge_mpi.c 			netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
qdev             1210 drivers/staging/qlge/qlge_mpi.c 		ql_link_off(qdev);
qdev             1211 drivers/staging/qlge/qlge_mpi.c 		set_bit(QL_CAM_RT_SET, &qdev->flags);
qdev             1221 drivers/staging/qlge/qlge_mpi.c 			status = ql_mb_idc_ack(qdev);
qdev             1223 drivers/staging/qlge/qlge_mpi.c 				netif_err(qdev, drv, qdev->ndev,
qdev             1226 drivers/staging/qlge/qlge_mpi.c 			netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
qdev             1236 drivers/staging/qlge/qlge_mpi.c 	struct ql_adapter *qdev =
qdev             1242 drivers/staging/qlge/qlge_mpi.c 	mutex_lock(&qdev->mpi_mutex);
qdev             1244 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
qdev             1246 drivers/staging/qlge/qlge_mpi.c 	while (ql_read32(qdev, STS) & STS_PI) {
qdev             1252 drivers/staging/qlge/qlge_mpi.c 		err = ql_mpi_handler(qdev, mbcp);
qdev             1258 drivers/staging/qlge/qlge_mpi.c 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
qdev             1259 drivers/staging/qlge/qlge_mpi.c 	mutex_unlock(&qdev->mpi_mutex);
qdev             1260 drivers/staging/qlge/qlge_mpi.c 	ql_enable_completion_interrupt(qdev, 0);
qdev             1265 drivers/staging/qlge/qlge_mpi.c 	struct ql_adapter *qdev =
qdev             1267 drivers/staging/qlge/qlge_mpi.c 	cancel_delayed_work_sync(&qdev->mpi_work);
qdev             1268 drivers/staging/qlge/qlge_mpi.c 	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
qdev             1269 drivers/staging/qlge/qlge_mpi.c 	cancel_delayed_work_sync(&qdev->mpi_idc_work);
qdev             1273 drivers/staging/qlge/qlge_mpi.c 	if (!ql_own_firmware(qdev)) {
qdev             1274 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
qdev             1278 drivers/staging/qlge/qlge_mpi.c 	if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
qdev             1279 drivers/staging/qlge/qlge_mpi.c 		netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
qdev             1280 drivers/staging/qlge/qlge_mpi.c 		qdev->core_is_dumped = 1;
qdev             1281 drivers/staging/qlge/qlge_mpi.c 		queue_delayed_work(qdev->workqueue,
qdev             1282 drivers/staging/qlge/qlge_mpi.c 			&qdev->mpi_core_to_log, 5 * HZ);
qdev             1284 drivers/staging/qlge/qlge_mpi.c 	ql_soft_reset_mpi_risc(qdev);
qdev               23 net/qrtr/smd.c 	struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev);
qdev               26 net/qrtr/smd.c 	if (!qdev)
qdev               29 net/qrtr/smd.c 	rc = qrtr_endpoint_post(&qdev->ep, data, len);
qdev               31 net/qrtr/smd.c 		dev_err(qdev->dev, "invalid ipcrouter packet\n");
qdev               42 net/qrtr/smd.c 	struct qrtr_smd_dev *qdev = container_of(ep, struct qrtr_smd_dev, ep);
qdev               49 net/qrtr/smd.c 	rc = rpmsg_send(qdev->channel, skb->data, skb->len);
qdev               61 net/qrtr/smd.c 	struct qrtr_smd_dev *qdev;
qdev               64 net/qrtr/smd.c 	qdev = devm_kzalloc(&rpdev->dev, sizeof(*qdev), GFP_KERNEL);
qdev               65 net/qrtr/smd.c 	if (!qdev)
qdev               68 net/qrtr/smd.c 	qdev->channel = rpdev->ept;
qdev               69 net/qrtr/smd.c 	qdev->dev = &rpdev->dev;
qdev               70 net/qrtr/smd.c 	qdev->ep.xmit = qcom_smd_qrtr_send;
qdev               72 net/qrtr/smd.c 	rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
qdev               76 net/qrtr/smd.c 	dev_set_drvdata(&rpdev->dev, qdev);
qdev               85 net/qrtr/smd.c 	struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev);
qdev               87 net/qrtr/smd.c 	qrtr_endpoint_unregister(&qdev->ep);
qdev              336 net/sched/sch_cbs.c 	struct net_device *qdev;
qdev              346 net/sched/sch_cbs.c 		qdev = qdisc_dev(q->qdisc);
qdev              347 net/sched/sch_cbs.c 		if (qdev == dev) {
qdev             1072 net/sched/sch_taprio.c 	struct net_device *qdev;
qdev             1083 net/sched/sch_taprio.c 		qdev = qdisc_dev(q->root);
qdev             1084 net/sched/sch_taprio.c 		if (qdev == dev) {