qcacld-2.0: reduce htt descriptor memory foot print
When allocate HTT descriptor, instead of allocating
large single chunk of memory, allocate small sized multiple
chunk of memory. Then not need to allocate order 5 memory.
Will have less chance to have out of memory problem.
Change-Id: Ib9d4a3f10adbc0656e1418cf3a67429322bb7164
CRs-fixed: 845666
diff --git a/CORE/CLD_TXRX/HTT/htt_tx.c b/CORE/CLD_TXRX/HTT/htt_tx.c
index 1ffd4b5..350bfe1 100644
--- a/CORE/CLD_TXRX/HTT/htt_tx.c
+++ b/CORE/CLD_TXRX/HTT/htt_tx.c
@@ -76,9 +76,12 @@
int
htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
{
- int i, pool_size;
- u_int32_t **p;
+ int i, i_int, pool_size;
+ uint32_t **p;
adf_os_dma_addr_t pool_paddr = {0};
+ struct htt_tx_desc_page_t *page_info;
+ unsigned int num_link = 0;
+ uint32_t page_size;
if (pdev->cfg.is_high_latency) {
pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
@@ -104,41 +107,93 @@
* It should be, but round up just to be sure.
*/
pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
-
pdev->tx_descs.pool_elems = desc_pool_elems;
pdev->tx_descs.alloc_cnt = 0;
pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
- if (pdev->cfg.is_high_latency)
- pdev->tx_descs.pool_vaddr = adf_os_mem_alloc(pdev->osdev, pool_size);
- else
- pdev->tx_descs.pool_vaddr =
- adf_os_mem_alloc_consistent( pdev->osdev, pool_size, &pool_paddr,
- adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
+ /* Calculate required page count first */
+ page_size = adf_os_mem_get_page_size();
+ pdev->num_pages = pool_size / page_size;
+ if (pool_size % page_size)
+ pdev->num_pages++;
- pdev->tx_descs.pool_paddr = pool_paddr;
+ /* Put in as many as possible descriptors into single page */
+ /* calculate how many descriptors can put in single page */
+ pdev->num_desc_per_page = page_size / pdev->tx_descs.size;
- if (!pdev->tx_descs.pool_vaddr) {
- return 1; /* failure */
+ /* Pages information storage */
+ pdev->desc_pages = (struct htt_tx_desc_page_t *)adf_os_mem_alloc(
+ pdev->osdev, pdev->num_pages * sizeof(struct htt_tx_desc_page_t));
+ if (!pdev->desc_pages) {
+ adf_os_print("HTT Attach, desc page alloc fail");
+ goto fail1;
}
- adf_os_print("%s:htt_desc_start:0x%p htt_desc_end:0x%p\n", __func__,
- pdev->tx_descs.pool_vaddr,
- (u_int32_t *) (pdev->tx_descs.pool_vaddr + pool_size));
+ page_info = pdev->desc_pages;
+ p = (uint32_t **) pdev->tx_descs.freelist;
+ /* Allocate required memory with multiple pages */
+ for(i = 0; i < pdev->num_pages; i++) {
+ if (pdev->cfg.is_high_latency) {
+ page_info->page_v_addr_start = adf_os_mem_alloc(
+ pdev->osdev, page_size);
+ page_info->page_p_addr = pool_paddr;
+ if (!page_info->page_v_addr_start) {
+ page_info = pdev->desc_pages;
+ for (i_int = 0 ; i_int < i; i_int++) {
+ page_info = pdev->desc_pages + i_int;
+ adf_os_mem_free(page_info->page_v_addr_start);
+ }
+ goto fail2;
+ }
+ } else {
+ page_info->page_v_addr_start = adf_os_mem_alloc_consistent(
+ pdev->osdev,
+ page_size,
+ &page_info->page_p_addr,
+ adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
+ if (!page_info->page_v_addr_start) {
+ page_info = pdev->desc_pages;
+ for (i_int = 0 ; i_int < i; i_int++) {
+ page_info = pdev->desc_pages + i_int;
+ adf_os_mem_free_consistent(
+ pdev->osdev,
+ pdev->num_desc_per_page * pdev->tx_descs.size,
+ page_info->page_v_addr_start,
+ page_info->page_p_addr,
+ adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
+ }
+ goto fail2;
+ }
+ }
+ page_info->page_v_addr_end = page_info->page_v_addr_start +
+ pdev->num_desc_per_page * pdev->tx_descs.size;
+ page_info++;
+ }
-#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
- g_dbg_htt_desc_end_addr = (u_int32_t *)
- (pdev->tx_descs.pool_vaddr + pool_size);
- g_dbg_htt_desc_start_addr = (u_int32_t *) pdev->tx_descs.pool_vaddr;
-#endif
-
- /* link tx descriptors into a freelist */
- pdev->tx_descs.freelist = (u_int32_t *) pdev->tx_descs.pool_vaddr;
- p = (u_int32_t **) pdev->tx_descs.freelist;
- for (i = 0; i < desc_pool_elems - 1; i++) {
- *p = (u_int32_t *) (((char *) p) + pdev->tx_descs.size);
- p = (u_int32_t **) *p;
+ page_info = pdev->desc_pages;
+ pdev->tx_descs.freelist = (uint32_t *)page_info->page_v_addr_start;
+ p = (uint32_t **) pdev->tx_descs.freelist;
+ for(i = 0; i < pdev->num_pages; i++) {
+ for (i_int = 0; i_int < pdev->num_desc_per_page; i_int++) {
+ if (i_int == (pdev->num_desc_per_page - 1)) {
+ /* Last element on this page, should pint next page */
+ if (!page_info->page_v_addr_start) {
+ adf_os_print("over flow num link %d\n", num_link);
+ goto fail3;
+ }
+ page_info++;
+ *p = (uint32_t *)page_info->page_v_addr_start;
+ }
+ else {
+ *p = (uint32_t *)(((char *) p) + pdev->tx_descs.size);
+ }
+ num_link++;
+ p = (uint32_t **) *p;
+ /* Last link established exit */
+ if (num_link == (pdev->tx_descs.pool_elems - 1))
+ break;
+ }
}
*p = NULL;
@@ -148,24 +203,89 @@
adf_os_atomic_add(HTT_MAX_BUS_CREDIT,&pdev->htt_tx_credit.bus_delta);
}
return 0; /* success */
+
+fail3:
+ if (pdev->cfg.is_high_latency) {
+ page_info = pdev->desc_pages;
+ for (i_int = 0 ; i_int < pdev->num_pages; i_int++) {
+ page_info = pdev->desc_pages + i_int;
+ adf_os_mem_free(page_info->page_v_addr_start);
+ }
+ } else {
+ page_info = pdev->desc_pages;
+ for (i_int = 0 ; i_int < pdev->num_pages; i_int++) {
+ page_info = pdev->desc_pages + i_int;
+ adf_os_mem_free_consistent(
+ pdev->osdev,
+ pdev->num_desc_per_page * pdev->tx_descs.size,
+ page_info->page_v_addr_start,
+ page_info->page_p_addr,
+ adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
+ }
+ }
+
+fail2:
+ adf_os_mem_free(pdev->desc_pages);
+
+fail1:
+ return -1;
}
void
htt_tx_detach(struct htt_pdev_t *pdev)
{
+ unsigned int i;
+ struct htt_tx_desc_page_t *page_info;
+
if (pdev){
- if (pdev->cfg.is_high_latency)
+ if (pdev->cfg.is_high_latency) {
adf_os_mem_free(pdev->tx_descs.pool_vaddr);
- else
- adf_os_mem_free_consistent(
- pdev->osdev,
- pdev->tx_descs.pool_elems * pdev->tx_descs.size, /* pool_size */
- pdev->tx_descs.pool_vaddr,
- pdev->tx_descs.pool_paddr,
- adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
+ for (i = 0; i < pdev->num_pages; i++) {
+ page_info = pdev->desc_pages + i;
+ adf_os_mem_free(page_info->page_v_addr_start);
+ }
+ } else {
+ for (i = 0; i < pdev->num_pages; i++) {
+ page_info = pdev->desc_pages + i;
+ adf_os_mem_free_consistent(
+ pdev->osdev,
+ pdev->num_desc_per_page * pdev->tx_descs.size,
+ page_info->page_v_addr_start,
+ page_info->page_p_addr,
+ adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
+ }
}
+ adf_os_mem_free(pdev->desc_pages);
+ }
}
+/**
+ * htt_tx_get_paddr() - get physical address for htt desc
+ *
+ * Get HTT descriptor physical address from virtaul address
+ * Find page first and find offset
+ *
+ * Return: Physical address of descriptor
+ */
+adf_os_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev, char *target_vaddr)
+{
+ unsigned int i;
+ struct htt_tx_desc_page_t *page_info = NULL;
+
+ for (i = 0; i < pdev->num_pages; i++) {
+ page_info = pdev->desc_pages + i;
+ if (!page_info->page_v_addr_start) {
+ adf_os_assert(0);
+ return 0;
+ }
+ if ((target_vaddr >= page_info->page_v_addr_start) &&
+ (target_vaddr <= page_info->page_v_addr_end))
+ break;
+ }
+
+ return page_info->page_p_addr +
+ (adf_os_dma_addr_t)(target_vaddr - page_info->page_v_addr_start);
+}
/*--- descriptor allocation functions ---------------------------------------*/
@@ -204,13 +324,14 @@
* to map it from a virtual/CPU address to a physical/bus address.
*/
*fragmentation_descr_field_ptr =
- HTT_TX_DESC_PADDR(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
+ (uint32_t)htt_tx_get_paddr(pdev, (char *)htt_tx_desc) +
+ HTT_TX_DESC_LEN;
}
/*
* Include the headroom for the HTC frame header when specifying the
* physical address for the HTT tx descriptor.
*/
- *paddr_lo = (u_int32_t) HTT_TX_DESC_PADDR(pdev, htt_host_tx_desc);
+ *paddr_lo = (uint32_t)htt_tx_get_paddr(pdev, (char *)htt_host_tx_desc);
/*
* The allocated tx descriptor space includes headroom for a
* HTC frame header. Hide this headroom, so that we don't have
@@ -251,7 +372,7 @@
((u_int32_t *) htt_tx_desc) + HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
if (reset) {
*fragmentation_descr_field_ptr =
- HTT_TX_DESC_PADDR(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
+ (uint32_t)htt_tx_get_paddr(pdev, (char *)htt_tx_desc) + HTT_TX_DESC_LEN;
} else {
*fragmentation_descr_field_ptr = paddr;
}
diff --git a/CORE/CLD_TXRX/HTT/htt_types.h b/CORE/CLD_TXRX/HTT/htt_types.h
index 7bdf308..f230b5f 100644
--- a/CORE/CLD_TXRX/HTT/htt_types.h
+++ b/CORE/CLD_TXRX/HTT/htt_types.h
@@ -210,6 +210,12 @@
};
#endif
+struct htt_tx_desc_page_t
+{
+ char* page_v_addr_start;
+ char* page_v_addr_end;
+ adf_os_dma_addr_t page_p_addr;
+};
struct htt_pdev_t {
ol_pdev_handle ctrl_pdev;
@@ -373,6 +379,9 @@
int rx_buff_index;
#endif
+ int num_pages;
+ int num_desc_per_page;
+ struct htt_tx_desc_page_t *desc_pages;
};
#endif /* _HTT_TYPES__H_ */
diff --git a/CORE/CLD_TXRX/TXRX/ol_tx_desc.c b/CORE/CLD_TXRX/TXRX/ol_tx_desc.c
index c24f476..60dc045 100644
--- a/CORE/CLD_TXRX/TXRX/ol_tx_desc.c
+++ b/CORE/CLD_TXRX/TXRX/ol_tx_desc.c
@@ -66,7 +66,7 @@
adf_os_spin_lock_bh(&pdev->tx_mutex);
if (pdev->tx_desc.freelist) {
pdev->tx_desc.num_free--;
- tx_desc = &pdev->tx_desc.freelist->tx_desc;
+ tx_desc = pdev->tx_desc.freelist->tx_desc;
pdev->tx_desc.freelist = pdev->tx_desc.freelist->next;
#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
if (tx_desc->pkt_type != 0xff
@@ -126,7 +126,7 @@
struct ol_tx_desc_t *
ol_tx_desc_find(struct ol_txrx_pdev_t *pdev, u_int16_t tx_desc_id)
{
- return &pdev->tx_desc.array[tx_desc_id].tx_desc;
+ return pdev->tx_desc.array[tx_desc_id].tx_desc;
}
void
@@ -139,8 +139,9 @@
tx_desc->entry_timestamp_ticks = 0xffffffff;
#endif
#endif
- ((union ol_tx_desc_list_elem_t *) tx_desc)->next = pdev->tx_desc.freelist;
- pdev->tx_desc.freelist = (union ol_tx_desc_list_elem_t *) tx_desc;
+ ((struct ol_tx_desc_list_elem_t *)(tx_desc->p_link))->next =
+ pdev->tx_desc.freelist;
+ pdev->tx_desc.freelist = tx_desc->p_link;
pdev->tx_desc.num_free++;
#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
#ifdef QCA_LL_TX_FLOW_CT
diff --git a/CORE/CLD_TXRX/TXRX/ol_tx_desc.h b/CORE/CLD_TXRX/TXRX/ol_tx_desc.h
index a310954..8e7ebb7 100644
--- a/CORE/CLD_TXRX/TXRX/ol_tx_desc.h
+++ b/CORE/CLD_TXRX/TXRX/ol_tx_desc.h
@@ -148,11 +148,8 @@
static inline u_int16_t
ol_tx_desc_id(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
{
- TXRX_ASSERT2(
- ((union ol_tx_desc_list_elem_t *) tx_desc - pdev->tx_desc.array) <
- pdev->tx_desc.pool_size);
- return (u_int16_t)
- ((union ol_tx_desc_list_elem_t *)tx_desc - pdev->tx_desc.array);
+ TXRX_ASSERT2(tx_desc->id < pdev->tx_desc.pool_size);
+ return tx_desc->id;
}
/*
* @brief Retrieves the beacon headr for the vdev
diff --git a/CORE/CLD_TXRX/TXRX/ol_tx_send.c b/CORE/CLD_TXRX/TXRX/ol_tx_send.c
index 320252a..bf0ef24 100644
--- a/CORE/CLD_TXRX/TXRX/ol_tx_send.c
+++ b/CORE/CLD_TXRX/TXRX/ol_tx_send.c
@@ -419,11 +419,11 @@
OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); /* restore orginal hdr offset */ \
adf_nbuf_unmap((_pdev)->osdev, (_netbuf), ADF_OS_DMA_TO_DEVICE); \
adf_nbuf_free((_netbuf)); \
- ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = (_lcl_freelist); \
+ ((struct ol_tx_desc_list_elem_t *)(_tx_desc))->next = (_lcl_freelist); \
if (adf_os_unlikely(!lcl_freelist)) { \
- (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
+ (_tx_desc_last) = (struct ol_tx_desc_list_elem_t *)(_tx_desc); \
} \
- (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
+ (_lcl_freelist) = (struct ol_tx_desc_list_elem_t *)(_tx_desc); \
} while (0)
#else /*!ATH_11AC_TXCOMPACT*/
@@ -432,11 +432,11 @@
OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); /* restore orginal hdr offset */ \
adf_nbuf_unmap((_pdev)->osdev, (_netbuf), ADF_OS_DMA_TO_DEVICE); \
adf_nbuf_free((_netbuf)); \
- ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = (_lcl_freelist); \
+ ((struct ol_tx_desc_list_elem_t *)(_tx_desc))->next = (_lcl_freelist); \
if (adf_os_unlikely(!lcl_freelist)) { \
- (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
+ (_tx_desc_last) = (struct ol_tx_desc_list_elem_t *)(_tx_desc); \
} \
- (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
+ (_lcl_freelist) = (struct ol_tx_desc_list_elem_t *)(_tx_desc); \
} while (0)
@@ -493,12 +493,12 @@
* been given to the target to transmit, for which the
* target has never provided a response.
*/
- if (adf_os_atomic_read(&pdev->tx_desc.array[i].tx_desc.ref_cnt)) {
+ if (adf_os_atomic_read(&pdev->tx_desc.array[i].tx_desc->ref_cnt)) {
TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
"Warning: freeing tx frame "
"(no tx completion from the target)\n");
ol_tx_desc_frame_free_nonstd(
- pdev, &pdev->tx_desc.array[i].tx_desc, 1);
+ pdev, pdev->tx_desc.array[i].tx_desc, 1);
}
}
}
@@ -531,11 +531,11 @@
char *trace_str;
uint32_t byte_cnt = 0;
- union ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
+ struct ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
adf_nbuf_t netbuf;
- union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
- union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
+ struct ol_tx_desc_list_elem_t *lcl_freelist = NULL;
+ struct ol_tx_desc_list_elem_t *tx_desc_last = NULL;
ol_tx_desc_list tx_descs;
TAILQ_INIT(&tx_descs);
@@ -544,7 +544,7 @@
trace_str = (status) ? "OT:C:F:" : "OT:C:S:";
for (i = 0; i < num_msdus; i++) {
tx_desc_id = desc_ids[i];
- tx_desc = &td_array[tx_desc_id].tx_desc;
+ tx_desc = td_array[tx_desc_id].tx_desc;
tx_desc->status = status;
netbuf = tx_desc->netbuf;
@@ -609,7 +609,7 @@
uint8_t i, is_member;
uint16_t vdev_id_mask;
struct ol_tx_desc_t *tx_desc;
- union ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
+ struct ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
tx_desc = &td_array[tx_desc_id].tx_desc;
@@ -735,10 +735,10 @@
u_int16_t tx_desc_id)
{
struct ol_tx_desc_t *tx_desc;
- union ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
+ struct ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
adf_nbuf_t netbuf;
- tx_desc = &td_array[tx_desc_id].tx_desc;
+ tx_desc = td_array[tx_desc_id].tx_desc;
tx_desc->status = status;
netbuf = tx_desc->netbuf;
@@ -782,16 +782,16 @@
u_int16_t *desc_ids = (u_int16_t *)tx_desc_id_iterator;
u_int16_t tx_desc_id;
struct ol_tx_desc_t *tx_desc;
- union ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
- union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
- union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
+ struct ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
+ struct ol_tx_desc_list_elem_t *lcl_freelist = NULL;
+ struct ol_tx_desc_list_elem_t *tx_desc_last = NULL;
adf_nbuf_t netbuf;
ol_tx_desc_list tx_descs;
TAILQ_INIT(&tx_descs);
for (i = 0; i < num_msdus; i++) {
tx_desc_id = desc_ids[i];
- tx_desc = &td_array[tx_desc_id].tx_desc;
+ tx_desc = td_array[tx_desc_id].tx_desc;
netbuf = tx_desc->netbuf;
/* find the "vdev" this tx_desc belongs to */
@@ -1011,7 +1011,7 @@
ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, u_int16_t msdu_id)
{
#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
- struct ol_tx_desc_t *tx_desc = &pdev->tx_desc.array[msdu_id].tx_desc;
+ struct ol_tx_desc_t *tx_desc = pdev->tx_desc.array[msdu_id].tx_desc;
u_int8_t tid;
adf_nbuf_t msdu = tx_desc->netbuf;
@@ -1104,7 +1104,7 @@
for (i = 0; i < num_msdus; i++) {
u_int16_t id = desc_ids[i];
- struct ol_tx_desc_t *tx_desc = &pdev->tx_desc.array[id].tx_desc;
+ struct ol_tx_desc_t *tx_desc = pdev->tx_desc.array[id].tx_desc;
int bin;
tx_delay_queue_ticks = now_ticks - tx_desc->entry_timestamp_ticks;
diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx.c b/CORE/CLD_TXRX/TXRX/ol_txrx.c
index e06a666..5a84435 100644
--- a/CORE/CLD_TXRX/TXRX/ol_txrx.c
+++ b/CORE/CLD_TXRX/TXRX/ol_txrx.c
@@ -326,7 +326,11 @@
#ifdef WDI_EVENT_ENABLE
A_STATUS ret;
#endif
- u_int16_t desc_pool_size;
+ uint16_t desc_pool_size;
+ uint32_t page_size;
+ void **desc_pages = NULL;
+ unsigned int pages_idx;
+ unsigned int descs_idx;
pdev = adf_os_mem_alloc(osdev, sizeof(*pdev));
if (!pdev) {
@@ -420,13 +424,37 @@
#endif /* IPA_UC_OFFLOAD */
pdev->tx_desc.array = adf_os_mem_alloc(
- osdev, desc_pool_size * sizeof(union ol_tx_desc_list_elem_t));
+ osdev, desc_pool_size * sizeof(struct ol_tx_desc_list_elem_t));
if (!pdev->tx_desc.array) {
goto fail3;
}
adf_os_mem_set(
pdev->tx_desc.array, 0,
- desc_pool_size * sizeof(union ol_tx_desc_list_elem_t));
+ desc_pool_size * sizeof(struct ol_tx_desc_list_elem_t));
+
+ pdev->desc_mem_size = desc_pool_size * sizeof(struct ol_tx_desc_t);
+ page_size = adf_os_mem_get_page_size();
+ pdev->num_desc_pages = pdev->desc_mem_size / page_size;
+ if (pdev->desc_mem_size % page_size)
+ pdev->num_desc_pages++;
+ pdev->num_descs_per_page = page_size / sizeof(struct ol_tx_desc_t);
+
+ /* Allocate host descriptor resources */
+ desc_pages = adf_os_mem_alloc(
+ pdev->osdev, pdev->num_desc_pages * sizeof(char *));
+ if (!desc_pages)
+ goto fail3;
+
+ for (pages_idx = 0; pages_idx < pdev->num_desc_pages; pages_idx++) {
+ desc_pages[pages_idx] = adf_os_mem_alloc(pdev->osdev, page_size);
+ if (!desc_pages[pages_idx]) {
+ for (i = 0; i < pages_idx; i++)
+ adf_os_mem_free(desc_pages[i]);
+ adf_os_mem_free(desc_pages);
+ goto fail3;
+ }
+ }
+ pdev->desc_pages = desc_pages;
/*
* Each SW tx desc (used only within the tx datapath SW) has a
@@ -435,9 +463,23 @@
* desc now, to avoid doing it during time-critical transmit.
*/
pdev->tx_desc.pool_size = desc_pool_size;
+
+ pages_idx = 0;
+ descs_idx = 0;
for (i = 0; i < desc_pool_size; i++) {
void *htt_tx_desc;
u_int32_t paddr_lo;
+
+ pdev->tx_desc.array[i].tx_desc =
+ (struct ol_tx_desc_t *)(desc_pages[pages_idx] +
+ descs_idx * sizeof(struct ol_tx_desc_t));
+ descs_idx++;
+ if (pdev->num_descs_per_page == descs_idx) {
+ /* Next page */
+ pages_idx++;
+ descs_idx = 0;
+ }
+
htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr_lo);
if (! htt_tx_desc) {
VOS_TRACE(VOS_MODULE_ID_TXRX, VOS_TRACE_LEVEL_FATAL,
@@ -446,18 +488,20 @@
while (--i >= 0) {
htt_tx_desc_free(
pdev->htt_pdev,
- pdev->tx_desc.array[i].tx_desc.htt_tx_desc);
+ pdev->tx_desc.array[i].tx_desc->htt_tx_desc);
}
goto fail4;
}
- pdev->tx_desc.array[i].tx_desc.htt_tx_desc = htt_tx_desc;
- pdev->tx_desc.array[i].tx_desc.htt_tx_desc_paddr = paddr_lo;
+ pdev->tx_desc.array[i].tx_desc->htt_tx_desc = htt_tx_desc;
+ pdev->tx_desc.array[i].tx_desc->htt_tx_desc_paddr = paddr_lo;
#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
- pdev->tx_desc.array[i].tx_desc.pkt_type = 0xff;
+ pdev->tx_desc.array[i].tx_desc->pkt_type = 0xff;
#ifdef QCA_COMPUTE_TX_DELAY
- pdev->tx_desc.array[i].tx_desc.entry_timestamp_ticks = 0xffffffff;
+ pdev->tx_desc.array[i].tx_desc->entry_timestamp_ticks = 0xffffffff;
#endif
#endif
+ pdev->tx_desc.array[i].tx_desc->p_link = (void *)&pdev->tx_desc.array[i];
+ pdev->tx_desc.array[i].tx_desc->id = i;
}
/* link SW tx descs into a freelist */
@@ -789,10 +833,14 @@
fail5:
for (i = 0; i < desc_pool_size; i++) {
htt_tx_desc_free(
- pdev->htt_pdev, pdev->tx_desc.array[i].tx_desc.htt_tx_desc);
+ pdev->htt_pdev, pdev->tx_desc.array[i].tx_desc->htt_tx_desc);
}
fail4:
+ for (i = 0; i < pages_idx; i++)
+ adf_os_mem_free(desc_pages[i]);
+ adf_os_mem_free(desc_pages);
+
adf_os_mem_free(pdev->tx_desc.array);
#ifdef IPA_UC_OFFLOAD
if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
@@ -822,6 +870,8 @@
ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
{
int i;
+ unsigned int page_idx;
+
/*checking to ensure txrx pdev structure is not NULL */
if (!pdev) {
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "NULL pdev passed to %s\n", __func__);
@@ -877,17 +927,23 @@
* been given to the target to transmit, for which the
* target has never provided a response.
*/
- if (adf_os_atomic_read(&pdev->tx_desc.array[i].tx_desc.ref_cnt)) {
+ if (adf_os_atomic_read(&pdev->tx_desc.array[i].tx_desc->ref_cnt)) {
TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
"Warning: freeing tx frame "
"(no tx completion from the target)\n");
ol_tx_desc_frame_free_nonstd(
- pdev, &pdev->tx_desc.array[i].tx_desc, 1);
+ pdev, pdev->tx_desc.array[i].tx_desc, 1);
}
- htt_tx_desc = pdev->tx_desc.array[i].tx_desc.htt_tx_desc;
+ htt_tx_desc = pdev->tx_desc.array[i].tx_desc->htt_tx_desc;
htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
}
+
+ for (page_idx = 0; page_idx < pdev->num_desc_pages; page_idx++) {
+ adf_os_mem_free(pdev->desc_pages[page_idx]);
+ }
+ adf_os_mem_free(pdev->desc_pages);
+
adf_os_mem_free(pdev->tx_desc.array);
#ifdef IPA_UC_OFFLOAD
diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx_types.h b/CORE/CLD_TXRX/TXRX/ol_txrx_types.h
index ae81627..998b899 100644
--- a/CORE/CLD_TXRX/TXRX/ol_txrx_types.h
+++ b/CORE/CLD_TXRX/TXRX/ol_txrx_types.h
@@ -195,13 +195,15 @@
struct ol_txrx_vdev_t* vdev;
#endif
void *txq;
+ void *p_link;
+ uint16_t id;
};
typedef TAILQ_HEAD(, ol_tx_desc_t) ol_tx_desc_list;
-union ol_tx_desc_list_elem_t {
- union ol_tx_desc_list_elem_t *next;
- struct ol_tx_desc_t tx_desc;
+struct ol_tx_desc_list_elem_t {
+ struct ol_tx_desc_list_elem_t *next;
+ struct ol_tx_desc_t *tx_desc;
};
union ol_txrx_align_mac_addr_t {
@@ -590,8 +592,8 @@
struct {
u_int16_t pool_size;
u_int16_t num_free;
- union ol_tx_desc_list_elem_t *array;
- union ol_tx_desc_list_elem_t *freelist;
+ struct ol_tx_desc_list_elem_t *array;
+ struct ol_tx_desc_list_elem_t *freelist;
} tx_desc;
struct {
@@ -827,6 +829,11 @@
struct ol_txrx_peer_t *ocb_peer;
int tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES];
+ unsigned int page_size;
+ unsigned int desc_mem_size;
+ unsigned int num_desc_pages;
+ unsigned int num_descs_per_page;
+ void **desc_pages;
};
struct ol_txrx_ocb_chan_info {
diff --git a/CORE/SERVICES/COMMON/adf/adf_os_mem.h b/CORE/SERVICES/COMMON/adf/adf_os_mem.h
index bc9e413..fc14518 100644
--- a/CORE/SERVICES/COMMON/adf/adf_os_mem.h
+++ b/CORE/SERVICES/COMMON/adf/adf_os_mem.h
@@ -233,5 +233,15 @@
return (a_int32_t)__adf_os_str_len(str);
}
+/**
+ * @brief Returns the system default page size
+ *
+ * @retval system default page size
+ */
+static inline a_int32_t
+adf_os_mem_get_page_size(void)
+{
+ return __adf_os_mem_get_page_size();
+}
#endif
diff --git a/CORE/SERVICES/COMMON/adf/linux/adf_os_mem_pvt.h b/CORE/SERVICES/COMMON/adf/linux/adf_os_mem_pvt.h
index b0ed660..d1205dd 100644
--- a/CORE/SERVICES/COMMON/adf/linux/adf_os_mem_pvt.h
+++ b/CORE/SERVICES/COMMON/adf/linux/adf_os_mem_pvt.h
@@ -53,6 +53,10 @@
#define pci_alloc_consistent(dev, size, paddr) NULL
#endif /* __KERNEL__ */
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif /* PAGE_SIZE */
+
static inline void *
__adf_os_mem_alloc(adf_os_device_t osdev, size_t size)
{
@@ -189,4 +193,15 @@
return strlen(str);
}
+/**
+ * @brief Returns the system default page size
+ *
+ * @retval system default page size
+ */
+static inline a_int32_t
+__adf_os_mem_get_page_size(void)
+{
+ return PAGE_SIZE;
+}
+
#endif /*ADF_OS_MEM_PVT_H*/