[PATCH 475/524] staging:ti dspbridge: remove DSP_SUCCEEDED macro from core

Greg Kroah-Hartman gregkh at suse.de
Thu Aug 5 22:24:18 UTC 2010


From: Ernesto Ramos <ernesto at ti.com>

Since status succeeded is now 0 macro DSP_SUCCEEDED
is not necessary anymore.

Signed-off-by: Ernesto Ramos <ernesto at ti.com>
Signed-off-by: Greg Kroah-Hartman <gregkh at suse.de>
---
 drivers/staging/tidspbridge/core/chnl_sm.c        |   22 ++++----
 drivers/staging/tidspbridge/core/dsp-clock.c      |    4 +-
 drivers/staging/tidspbridge/core/io_sm.c          |   47 ++++++++---------
 drivers/staging/tidspbridge/core/msg_sm.c         |   22 ++++----
 drivers/staging/tidspbridge/core/tiomap3430.c     |   57 +++++++++------------
 drivers/staging/tidspbridge/core/tiomap3430_pwr.c |    6 +-
 drivers/staging/tidspbridge/core/tiomap_io.c      |   41 +++++++--------
 7 files changed, 91 insertions(+), 108 deletions(-)

diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c
index 69c4784..1b23141 100644
--- a/drivers/staging/tidspbridge/core/chnl_sm.c
+++ b/drivers/staging/tidspbridge/core/chnl_sm.c
@@ -170,7 +170,7 @@ func_cont:
 	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
 	if (pchnl->chnl_type == CHNL_PCPY) {
 		/* This is a processor-copy channel. */
-		if (DSP_SUCCEEDED(status) && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
+		if (!status && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
 			/* Check buffer size on output channels for fit. */
 			if (byte_size >
 			    io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
@@ -178,7 +178,7 @@ func_cont:
 
 		}
 	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		/* Get a free chirp: */
 		chnl_packet_obj =
 		    (struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
@@ -186,7 +186,7 @@ func_cont:
 			status = -EIO;
 
 	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		/* Enqueue the chirp on the chnl's IORequest queue: */
 		chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
 		    host_buf;
@@ -330,7 +330,7 @@ int bridge_chnl_close(struct chnl_object *chnl_obj)
 		status = bridge_chnl_cancel_io(chnl_obj);
 	}
 func_cont:
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		/* Assert I/O on this channel is now cancelled: Protects
 		 * from io_dpc. */
 		DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL));
@@ -420,8 +420,7 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
 			chnl_mgr_obj->dw_output_mask = 0;
 			chnl_mgr_obj->dw_last_output = 0;
 			chnl_mgr_obj->hdev_obj = hdev_obj;
-			if (DSP_SUCCEEDED(status))
-				spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
+			spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
 		} else {
 			status = -ENOMEM;
 		}
@@ -499,7 +498,7 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
 	} else {
 		status = -EFAULT;
 	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		/* Note: Currently, if another thread continues to add IO
 		 * requests to this channel, this function will continue to
 		 * flush all such queued IO requests. */
@@ -507,8 +506,7 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
 		    && (pchnl->chnl_type == CHNL_PCPY)) {
 			/* Wait for IO completions, up to the specified
 			 * timeout: */
-			while (!LST_IS_EMPTY(pchnl->pio_requests) &&
-			       DSP_SUCCEEDED(status)) {
+			while (!LST_IS_EMPTY(pchnl->pio_requests) && !status) {
 				status = bridge_chnl_get_ioc(chnl_obj,
 						timeout, &chnl_ioc_obj);
 				if (DSP_FAILED(status))
@@ -833,7 +831,7 @@ int bridge_chnl_open(struct chnl_object **chnl,
 	else
 		status = -ENOMEM;
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
 							GFP_KERNEL);
 		if (pchnl->ntfy_obj)
@@ -842,7 +840,7 @@ int bridge_chnl_open(struct chnl_object **chnl,
 			status = -ENOMEM;
 	}
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		if (pchnl->pio_completions && pchnl->pio_requests &&
 		    pchnl->free_packets_list) {
 			/* Initialize CHNL object fields: */
@@ -897,7 +895,7 @@ int bridge_chnl_open(struct chnl_object **chnl,
 		*chnl = pchnl;
 	}
 func_end:
-	DBC_ENSURE((DSP_SUCCEEDED(status) && pchnl) || (*chnl == NULL));
+	DBC_ENSURE((!status && pchnl) || (*chnl == NULL));
 	return status;
 }
 
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index b474e83..5b1a0c5 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -285,7 +285,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
 		status = -EPERM;
 	}
 
-	if (DSP_SUCCEEDED(status))
+	if (!status)
 		set_dsp_clk_active(&dsp_clocks, clk_id);
 
 out:
@@ -354,7 +354,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
 		status = -EPERM;
 	}
 
-	if (DSP_SUCCEEDED(status))
+	if (!status)
 		set_dsp_clk_inactive(&dsp_clocks, clk_id);
 
 out:
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index e5e3684..1d433a9 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -230,11 +230,10 @@ int bridge_io_create(struct io_mgr **io_man,
 
 		spin_lock_init(&pio_mgr->dpc_lock);
 
-		if (DSP_SUCCEEDED(status))
-			status = dev_get_dev_node(hdev_obj, &dev_node_obj);
+		status = dev_get_dev_node(hdev_obj, &dev_node_obj);
 	}
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		pio_mgr->hbridge_context = hbridge_context;
 		pio_mgr->shared_irq = mgr_attrts->irq_shared;
 		if (dsp_wdt_init())
@@ -378,15 +377,13 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
 	dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
 		__func__, (ul_shm_length - sizeof(struct shm)));
 
-	if (DSP_SUCCEEDED(status)) {
-		/* Get start and length of message part of shared memory */
-		status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
+	/* Get start and length of message part of shared memory */
+	status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
 					   &ul_msg_base);
-	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
 					   &ul_msg_limit);
-		if (DSP_SUCCEEDED(status)) {
+		if (!status) {
 			if (ul_msg_limit <= ul_msg_base) {
 				status = -EINVAL;
 			} else {
@@ -409,7 +406,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
 	} else {
 		status = -EFAULT;
 	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
 		status =
 		    cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
@@ -420,18 +417,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
 		if (DSP_FAILED(status))
 			status = -EFAULT;
 	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		status =
 		    cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
 		if (DSP_FAILED(status))
 			status = -EFAULT;
 	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
 		if (DSP_FAILED(status))
 			status = -EFAULT;
 	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		/* Get memory reserved in host resources */
 		(void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
 					      &hio_mgr->ext_proc_info,
@@ -1551,7 +1548,7 @@ static int register_shm_segs(struct io_mgr *hio_mgr,
 		goto func_end;
 	}
 	/* Get end of 1st SM Heap region */
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		/* Get start and length of message part of shared memory */
 		status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
 					   &shm0_end);
@@ -1561,7 +1558,7 @@ static int register_shm_segs(struct io_mgr *hio_mgr,
 		}
 	}
 	/* Start of Gpp reserved region */
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		/* Get start and length of message part of shared memory */
 		status =
 		    cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
@@ -1572,15 +1569,15 @@ static int register_shm_segs(struct io_mgr *hio_mgr,
 		}
 	}
 	/* Register with CMM */
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr);
-		if (DSP_SUCCEEDED(status)) {
+		if (!status) {
 			status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr,
 							   CMM_ALLSEGMENTS);
 		}
 	}
 	/* Register new SM region(s) */
-	if (DSP_SUCCEEDED(status) && (shm0_end - ul_shm0_base) > 0) {
+	if (!status && (shm0_end - ul_shm0_base) > 0) {
 		/* Calc size (bytes) of SM the GPP can alloc from */
 		ul_rsrvd_size =
 		    (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
@@ -1843,11 +1840,11 @@ int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
 	} else {
 		status = -EFAULT;
 	}
-	if (DSP_SUCCEEDED(status))
+	if (!status)
 		status =
 		    cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
 
-	if (DSP_SUCCEEDED(status))
+	if (!status)
 		/* trace_cur_pos will hold the address of a DSP pointer */
 		status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
 							&trace_cur_pos);
@@ -2013,7 +2010,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context)
 		status = -EFAULT;
 	}
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		status = dev_get_node_manager(dev_object, &node_mgr);
 		if (!node_mgr) {
 			pr_debug("%s: Failed on dev_get_node_manager.\n",
@@ -2022,7 +2019,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context)
 		}
 	}
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		/* Look for SYS_PUTCBEG/SYS_PUTCEND: */
 		status =
 			cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
@@ -2032,7 +2029,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context)
 			pr_debug("%s: Failed on cod_get_sym_value.\n",
 								__func__);
 	}
-	if (DSP_SUCCEEDED(status))
+	if (!status)
 		status = dev_get_intf_fxns(dev_object, &intf_fxns);
 	/*
 	 * Check for the "magic number" in the trace buffer.  If it has
@@ -2041,7 +2038,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context)
 	 */
 	mmu_fault_dbg_info.head[0] = 0;
 	mmu_fault_dbg_info.head[1] = 0;
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		poll_cnt = 0;
 		while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
 			mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
@@ -2066,7 +2063,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context)
 		}
 	}
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		total_size = mmu_fault_dbg_info.size;
 		/* Limit the size in case DSP went crazy */
 		if (total_size > MAX_MMU_DBGBUFF)
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c
index 7f44294..85ca448 100644
--- a/drivers/staging/tidspbridge/core/msg_sm.c
+++ b/drivers/staging/tidspbridge/core/msg_sm.c
@@ -102,7 +102,7 @@ int bridge_msg_create(struct msg_mgr **msg_man,
 		else
 			sync_init_event(msg_mgr_obj->sync_event);
 
-		if (DSP_SUCCEEDED(status))
+		if (!status)
 			*msg_man = msg_mgr_obj;
 		else
 			delete_msg_mgr(msg_mgr_obj);
@@ -157,7 +157,7 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
 
 	/*  Create event that will be signalled when a message from
 	 *  the DSP is available. */
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		msg_q->sync_event = kzalloc(sizeof(struct sync_object),
 							GFP_KERNEL);
 		if (msg_q->sync_event)
@@ -167,7 +167,7 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
 	}
 
 	/* Create a notification list for message ready notification. */
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
 							GFP_KERNEL);
 		if (msg_q->ntfy_obj)
@@ -181,7 +181,7 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
 	 *  unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
 	 *  will be set by the unblocked thread to signal that it
 	 *  is unblocked and will no longer reference the object. */
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		msg_q->sync_done = kzalloc(sizeof(struct sync_object),
 							GFP_KERNEL);
 		if (msg_q->sync_done)
@@ -190,7 +190,7 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
 			status = -ENOMEM;
 	}
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
 							GFP_KERNEL);
 		if (msg_q->sync_done_ack)
@@ -199,13 +199,13 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
 			status = -ENOMEM;
 	}
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		/* Enter critical section */
 		spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
 		/* Initialize message frames and put in appropriate queues */
-		for (i = 0; i < max_msgs && DSP_SUCCEEDED(status); i++) {
+		for (i = 0; i < max_msgs && !status; i++) {
 			status = add_new_msg(hmsg_mgr->msg_free_list);
-			if (DSP_SUCCEEDED(status)) {
+			if (!status) {
 				num_allocated++;
 				status = add_new_msg(msg_q->msg_free_list);
 			}
@@ -330,7 +330,7 @@ int bridge_msg_get(struct msg_queue *msg_queue_obj,
 	}
 	/* Exit critical section */
 	spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
-	if (DSP_SUCCEEDED(status) && !got_msg) {
+	if (!status && !got_msg) {
 		/*  Wait til message is available, timeout, or done. We don't
 		 *  have to schedule the DPC, since the DSP will send messages
 		 *  when they are available. */
@@ -349,7 +349,7 @@ int bridge_msg_get(struct msg_queue *msg_queue_obj,
 			(void)sync_set_event(msg_queue_obj->sync_done_ack);
 			status = -EPERM;
 		} else {
-			if (DSP_SUCCEEDED(status)) {
+			if (!status) {
 				DBC_ASSERT(!LST_IS_EMPTY
 					   (msg_queue_obj->msg_used_list));
 				/* Get msg from used list */
@@ -432,7 +432,7 @@ int bridge_msg_put(struct msg_queue *msg_queue_obj,
 
 		spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
 	}
-	if (DSP_SUCCEEDED(status) && !put_msg) {
+	if (!status && !put_msg) {
 		/* Wait til a free message frame is available, timeout,
 		 * or done */
 		syncs[0] = hmsg_mgr->sync_event;
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index ae1f394..8f25a05 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -262,7 +262,6 @@ void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
  */
 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
 {
-	int status = 0;
 	struct bridge_dev_context *dev_context = dev_ctxt;
 	u32 temp;
 	struct dspbridge_platform_data *pdata =
@@ -291,11 +290,10 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
 					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 	dsp_clk_enable(DSP_CLK_IVA2);
 
-	if (DSP_SUCCEEDED(status)) {
-		/* set the device state to IDLE */
-		dev_context->dw_brd_state = BRD_IDLE;
-	}
-	return status;
+	/* set the device state to IDLE */
+	dev_context->dw_brd_state = BRD_IDLE;
+
+	return 0;
 }
 
 /*
@@ -406,13 +404,13 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
 	} else
 		__raw_writel(0xffffffff, dw_sync_addr);
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		resources = dev_context->resources;
 		if (!resources)
 			status = -EPERM;
 
 		/* Assert RST1 i.e only the RST only for DSP megacell */
-		if (DSP_SUCCEEDED(status)) {
+		if (!status) {
 			(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
 					OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
 					OMAP2_RM_RSTCTRL);
@@ -428,7 +426,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
 					OMAP343X_CONTROL_IVA2_BOOTMOD));
 		}
 	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		/* Reset and Unreset the RST2, so that BOOTADDR is copied to
 		 * IVA2 SYSC register */
 		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
@@ -476,7 +474,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
 
 	/* Lock the above TLB entries and get the BIOS and load monitor timer
 	 * information */
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
 		hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
 		hw_mmu_ttb_set(resources->dw_dmmu_base,
@@ -499,7 +497,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
 				     &ul_load_monitor_timer);
 	}
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		if (ul_load_monitor_timer != 0xFFFF) {
 			clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
 			    ul_load_monitor_timer;
@@ -510,7 +508,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
 		}
 	}
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		if (ul_bios_gp_timer != 0xFFFF) {
 			clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
 			    ul_bios_gp_timer;
@@ -521,7 +519,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
 		}
 	}
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		/* Set the DSP clock rate */
 		(void)dev_get_symbol(dev_context->hdev_obj,
 				     "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
@@ -551,7 +549,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
 		}
 
 	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
 
 /*PM_IVA2GRPSEL_PER = 0xC0;*/
@@ -908,7 +906,7 @@ static int bridge_dev_create(struct bridge_dev_context
 	else
 		status = -ENOMEM;
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		spin_lock_init(&pt_attrs->pg_lock);
 		dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
 
@@ -918,7 +916,7 @@ static int bridge_dev_create(struct bridge_dev_context
 		 * resources struct */
 		dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
 	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		dev_context->hdev_obj = hdev_obj;
 		/* Store current board state. */
 		dev_context->dw_brd_state = BRD_STOPPED;
@@ -1111,13 +1109,13 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
 	u32 total_bytes = ul_num_bytes;
 	u8 host_buf[BUFFERSIZE];
 	struct bridge_dev_context *dev_context = dev_ctxt;
-	while ((total_bytes > 0) && DSP_SUCCEEDED(status)) {
+	while (total_bytes > 0 && !status) {
 		copy_bytes =
 		    total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
 		/* Read from External memory */
 		status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
 					   copy_bytes, mem_type);
-		if (DSP_SUCCEEDED(status)) {
+		if (!status) {
 			if (dest_addr < (dev_context->dw_dsp_start_add +
 					 dev_context->dw_internal_size)) {
 				/* Write to Internal memory */
@@ -1149,7 +1147,7 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
 	u32 ul_remain_bytes = 0;
 	u32 ul_bytes = 0;
 	ul_remain_bytes = ul_num_bytes;
-	while (ul_remain_bytes > 0 && DSP_SUCCEEDED(status)) {
+	while (ul_remain_bytes > 0 && !status) {
 		ul_bytes =
 		    ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
 		if (dsp_addr < (dev_context->dw_dsp_start_add +
@@ -1369,9 +1367,7 @@ static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
 	}
 	up_read(&mm->mmap_sem);
 func_cont:
-	if (DSP_SUCCEEDED(status)) {
-		status = 0;
-	} else {
+	if (status) {
 		/*
 		 * Roll out the mapped pages incase it failed in middle of
 		 * mapping
@@ -1433,7 +1429,7 @@ static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
 		"pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
 		ul_num_bytes, l1_base_va, pte_addr_l1);
 
-	while (rem_bytes && (DSP_SUCCEEDED(status))) {
+	while (rem_bytes && !status) {
 		u32 va_curr_orig = va_curr;
 		/* Find whether the L1 PTE points to a valid L2 PT */
 		pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
@@ -1472,7 +1468,7 @@ static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
 		 * entry. Similar checking is done for L1 PTEs too
 		 * below
 		 */
-		while (rem_bytes_l2 && (DSP_SUCCEEDED(status))) {
+		while (rem_bytes_l2 && !status) {
 			pte_val = *(u32 *) pte_addr_l2;
 			pte_size = hw_mmu_pte_size_l2(pte_val);
 			/* va_curr aligned to pte_size? */
@@ -1639,7 +1635,7 @@ static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
 		HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
 	};
 
-	while (num_bytes && DSP_SUCCEEDED(status)) {
+	while (num_bytes && !status) {
 		/* To find the max. page size with which both PA & VA are
 		 * aligned */
 		all_bits = pa_curr | va_curr;
@@ -1736,7 +1732,7 @@ static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
 			 * Should not overwrite it. */
 			status = -EPERM;
 		}
-		if (DSP_SUCCEEDED(status)) {
+		if (!status) {
 			pg_tbl_va = l2_base_va;
 			if (size == HW_PAGE_SIZE64KB)
 				pt->pg_info[l2_page_num].num_entries += 16;
@@ -1749,7 +1745,7 @@ static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
 		}
 		spin_unlock(&pt->pg_lock);
 	}
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
 			pg_tbl_va, pa, va, size);
 		dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
@@ -1789,7 +1785,7 @@ static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
 	va_curr = ul_mpu_addr;
 	page[0] = vmalloc_to_page((void *)va_curr);
 	pa_next = page_to_phys(page[0]);
-	while (DSP_SUCCEEDED(status) && (i < num_pages)) {
+	while (!status && (i < num_pages)) {
 		/*
 		 * Reuse pa_next from the previous iteraion to avoid
 		 * an extra va2pa call
@@ -1827,11 +1823,6 @@ static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
 				    hw_attrs);
 		va_curr += size_curr;
 	}
-	if (DSP_SUCCEEDED(status))
-		status = 0;
-	else
-		status = -EPERM;
-
 	/*
 	 * In any case, flush the TLB
 	 * This is called from here instead from pte_update to avoid unnecessary
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index dc63b3a..d938645 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -112,7 +112,7 @@ int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
 		/* Disable wdt on hibernation. */
 		dsp_wdt_enable(false);
 
-		if (DSP_SUCCEEDED(status)) {
+		if (!status) {
 			/* Update the Bridger Driver state */
 			dev_context->dw_brd_state = BRD_DSP_HIBERNATION;
 #ifdef CONFIG_TIDSPBRIDGE_DVFS
@@ -310,7 +310,7 @@ int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
 		status = dsp_clk_disable(bpwr_clks[clk_id_index].clk);
 		dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id,
 					  false);
-		if (DSP_SUCCEEDED(status)) {
+		if (!status) {
 			(dev_context->dsp_per_clks) &=
 				(~((u32) (1 << bpwr_clks[clk_id_index].clk)));
 		}
@@ -318,7 +318,7 @@ int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
 	case BPWR_ENABLE_CLOCK:
 		status = dsp_clk_enable(bpwr_clks[clk_id_index].clk);
 		dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true);
-		if (DSP_SUCCEEDED(status))
+		if (!status)
 			(dev_context->dsp_per_clks) |=
 				(1 << bpwr_clks[clk_id_index].clk);
 		break;
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index 742da05..190c028 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -70,19 +70,19 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
 	DBC_ASSERT(ul_shm_base_virt != 0);
 
 	/* Check if it is a read of Trace section */
-	if (DSP_SUCCEEDED(status) && !ul_trace_sec_beg) {
+	if (!status && !ul_trace_sec_beg) {
 		status = dev_get_symbol(dev_context->hdev_obj,
 					DSP_TRACESEC_BEG, &ul_trace_sec_beg);
 	}
 	DBC_ASSERT(ul_trace_sec_beg != 0);
 
-	if (DSP_SUCCEEDED(status) && !ul_trace_sec_end) {
+	if (!status && !ul_trace_sec_end) {
 		status = dev_get_symbol(dev_context->hdev_obj,
 					DSP_TRACESEC_END, &ul_trace_sec_end);
 	}
 	DBC_ASSERT(ul_trace_sec_end != 0);
 
-	if (DSP_SUCCEEDED(status)) {
+	if (!status) {
 		if ((dsp_addr <= ul_trace_sec_end) &&
 		    (dsp_addr >= ul_trace_sec_beg))
 			trace_read = true;
@@ -100,19 +100,19 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
 		ul_ext_end = 0;
 
 		/* Get DYNEXT_BEG, EXT_BEG and EXT_END. */
-		if (DSP_SUCCEEDED(status) && !ul_dyn_ext_base) {
+		if (!status && !ul_dyn_ext_base) {
 			status = dev_get_symbol(dev_context->hdev_obj,
 						DYNEXTBASE, &ul_dyn_ext_base);
 		}
 		DBC_ASSERT(ul_dyn_ext_base != 0);
 
-		if (DSP_SUCCEEDED(status)) {
+		if (!status) {
 			status = dev_get_symbol(dev_context->hdev_obj,
 						EXTBASE, &ul_ext_base);
 		}
 		DBC_ASSERT(ul_ext_base != 0);
 
-		if (DSP_SUCCEEDED(status)) {
+		if (!status) {
 			status = dev_get_symbol(dev_context->hdev_obj,
 						EXTEND, &ul_ext_end);
 		}
@@ -131,7 +131,7 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
 		if (ul_ext_end < ul_ext_base)
 			status = -EPERM;
 
-		if (DSP_SUCCEEDED(status)) {
+		if (!status) {
 			ul_tlb_base_virt =
 			    dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
 			DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
@@ -167,7 +167,7 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
 
 	offset = dsp_addr - ul_ext_base;
 
-	if (DSP_SUCCEEDED(status))
+	if (!status)
 		memcpy(host_buff, (u8 *) dw_base_addr + offset, ul_num_bytes);
 
 	return status;
@@ -247,12 +247,12 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
 		/* Check if it is a load to Trace section */
 		ret = dev_get_symbol(dev_context->hdev_obj,
 				     DSP_TRACESEC_BEG, &ul_trace_sec_beg);
-		if (DSP_SUCCEEDED(ret))
+		if (!ret)
 			ret = dev_get_symbol(dev_context->hdev_obj,
 					     DSP_TRACESEC_END,
 					     &ul_trace_sec_end);
 	}
-	if (DSP_SUCCEEDED(ret)) {
+	if (!ret) {
 		if ((dsp_addr <= ul_trace_sec_end) &&
 		    (dsp_addr >= ul_trace_sec_beg))
 			trace_load = true;
@@ -272,7 +272,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
 					     SHMBASENAME, &ul_shm_base_virt);
 		DBC_ASSERT(ul_shm_base_virt != 0);
 		if (dynamic_load) {
-			if (DSP_SUCCEEDED(ret)) {
+			if (!ret) {
 				if (symbols_reloaded)
 					ret =
 					    dev_get_symbol
@@ -280,7 +280,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
 					     &ul_ext_base);
 			}
 			DBC_ASSERT(ul_ext_base != 0);
-			if (DSP_SUCCEEDED(ret)) {
+			if (!ret) {
 				/* DR  OMAPS00013235 : DLModules array may be
 				 * in EXTMEM. It is expected that DYNEXTMEM and
 				 * EXTMEM are contiguous, so checking for the
@@ -293,13 +293,13 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
 			}
 		} else {
 			if (symbols_reloaded) {
-				if (DSP_SUCCEEDED(ret))
+				if (!ret)
 					ret =
 					    dev_get_symbol
 					    (dev_context->hdev_obj, EXTBASE,
 					     &ul_ext_base);
 				DBC_ASSERT(ul_ext_base != 0);
-				if (DSP_SUCCEEDED(ret))
+				if (!ret)
 					ret =
 					    dev_get_symbol
 					    (dev_context->hdev_obj, EXTEND,
@@ -316,19 +316,16 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
 		if (ul_ext_end < ul_ext_base)
 			ret = -EPERM;
 
-		if (DSP_SUCCEEDED(ret)) {
+		if (!ret) {
 			ul_tlb_base_virt =
 			    dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
 			DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
 
 			if (symbols_reloaded) {
-				if (DSP_SUCCEEDED(ret)) {
-					ret =
-					    dev_get_symbol
+				ret = dev_get_symbol
 					    (dev_context->hdev_obj,
 					     DSP_TRACESEC_END, &shm0_end);
-				}
-				if (DSP_SUCCEEDED(ret)) {
+				if (!ret) {
 					ret =
 					    dev_get_symbol
 					    (dev_context->hdev_obj, DYNEXTBASE,
@@ -360,7 +357,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
 	if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
 		ret = -EPERM;
 
-	if (DSP_SUCCEEDED(ret)) {
+	if (!ret) {
 		for (i = 0; i < 4; i++)
 			remain_byte[i] = 0x0;
 
@@ -369,7 +366,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
 		if (dsp_addr > ul_ext_end || dw_offset > dsp_addr)
 			ret = -EPERM;
 	}
-	if (DSP_SUCCEEDED(ret)) {
+	if (!ret) {
 		if (ul_num_bytes)
 			memcpy((u8 *) dw_base_addr + dw_offset, host_buff,
 			       ul_num_bytes);
-- 
1.7.1




More information about the devel mailing list