00001
00013 #include "ipc_emb.h"
00014 #include "dma.h"
00015 #include "macif.h"
00016 #include "ps.h"
00017 #include "rxl_cntrl.h"
00018 #if NX_UMAC_PRESENT
00019 #include "rxu_cntrl.h"
00020 #endif
00021 #include "txl_buffer.h"
00022
00023
00026 #define CFM_PENDING_MAX 16
00028 #define CFM_IDX_MSK (CFM_PENDING_MAX - 1)
00029
00031 #define CFM_STATUS_OFFSET offsetof_b(struct txl_buffer_control, status)
00032
00034 #define CFM_CNT_THRESHOLD 8
00035
00037 #define UF_RX_VEC_VALID_PATTERN 0x0000C0DE
00038
00040 #define CFM_LLICTRL (IPC_DMA_LLI_COUNTER_EN | (IPC_DMA_LLI_CFM_TX << IPC_DMA_LLI_COUNTER_POS) | \
00041 IPC_DMA_LLI_IRQ_EN | (IPC_DMA_LLI_CFM_TX << IPC_DMA_LLI_IRQ_POS))
00042
00043
00044
00045
00046
00047
00049 struct macif_tx_cfm_tag
00050 {
00052 struct dma_desc *first;
00054 struct dma_desc *curr;
00056 uint32_t cfm_ind;
00058 int cfm_cnt;
00060 uint32_t user_cfm[CFM_PENDING_MAX];
00062 uint16_t lli_cnt;
00064 uint8_t in_idx;
00065 };
00066
00068 struct macif_ipc_env_tag
00069 {
00071 struct macif_tx_cfm_tag tx_cfm;
00072 #if NX_UF_EN
00074 struct co_list unsup_rx_vec_free_list;
00075 #endif //NX_UF_EN
00076 };
00077
00078
00079
00080
00081
00083 struct macif_ipc_env_tag macif_ipc_env;
00084
00085
00086
00087
00088
00089 #if NX_UF_EN
00090
00095 static void macif_ipc_uf_dma_handler(void *env, int dma_type)
00096 {
00097
00098 struct unsup_rx_vector_desc *desc = (struct unsup_rx_vector_desc*)env;
00099
00100
00101
00102 GLOBAL_INT_DISABLE();
00103 co_list_push_back(&macif_ipc_env.unsup_rx_vec_free_list, &desc->list_hdr);
00104 GLOBAL_INT_RESTORE();
00105
00106
00107 ipc_emb_unsup_rx_vec_event_ind();
00108 }
00109 #endif //NX_UF_EN
00110
00111 int macif_init(void)
00112 {
00113 #if NX_UF_EN
00114 int i;
00115 #endif //NX_UF_EN
00116
00117
00118 memset(&macif_ipc_env, 0, sizeof(macif_ipc_env));
00119
00120 macif_ipc_env.tx_cfm.lli_cnt = dma_lli_counter_get(IPC_DMA_LLI_CFM_TX) + 1;
00121 macif_ipc_env.tx_cfm.in_idx = macif_ipc_env.tx_cfm.lli_cnt & CFM_IDX_MSK;
00122
00123 #if NX_UF_EN
00125 co_list_init(&macif_ipc_env.unsup_rx_vec_free_list);
00126
00127 for (i = 0; i < UNSUP_RX_VECT_MAX; i++)
00128 {
00129 struct unsup_rx_vector_desc *desc = &rx_vector_desc_array[i];
00130
00131
00132 desc->dma_desc.src = CPU2HW(&desc->rx_vector);
00133 desc->dma_desc.length = sizeof_b(desc->rx_vector);
00134
00135 desc->gp_dma_desc.dma_desc = &desc->dma_desc;
00136 desc->gp_dma_desc.cb = macif_ipc_uf_dma_handler;
00137 desc->gp_dma_desc.env = desc;
00138
00139 co_list_push_back(&macif_ipc_env.unsup_rx_vec_free_list, &desc->list_hdr);
00140 }
00141 #endif //NX_UF_EN
00142
00143 return 0;
00144 }
00145
00146 void macif_msg_evt(int dummy)
00147 {
00148 return ipc_emb_msg_evt(dummy);
00149 }
00150
00151 void macif_kmsg_fwd(const struct ke_msg *ke_msg)
00152 {
00153 return ipc_emb_kmsg_fwd(ke_msg);
00154 }
00155
00156 void macif_prim_tbtt_ind(void)
00157 {
00158 return ipc_emb_prim_tbtt_ind();
00159 }
00160
00161 void macif_sec_tbtt_ind(void)
00162 {
00163 return ipc_emb_sec_tbtt_ind();
00164 }
00165
00166 uint8_t macif_rx_get_packet_threshold(void)
00167 {
00168 return (IPC_RXBUF_CNT / 4);
00169 }
00170
00171 void macif_rx_data_ind(void)
00172 {
00173 ipc_emb_rxdata_ind();
00174 }
00175
00176 bool macif_rx_buf_check(void)
00177 {
00178 return ipc_emb_hostrxbuf_check();
00179 }
00180
00181 #if NX_UMAC_PRESENT
00182 uint32_t macif_rx_buf_get(uint32_t *host_id)
00183 {
00184 return ipc_emb_hostrxbuf_get(host_id);
00185 }
00186 #else
00187 uint32_t macif_rx_buf_get(void)
00188 {
00189 return ipc_emb_hostrxbuf_get();
00190 }
00191 #endif //(NX_UMAC_PRESENT)
00192
00193 #if NX_UMAC_PRESENT
00194 void macif_rx_desc_upload(struct co_list *desc_list)
00195 {
00196 struct rxu_stat_desc *rx_stat_desc = (struct rxu_stat_desc *)co_list_pick(desc_list);
00197
00198 struct dma_desc *first_dma_desc= NULL;
00199
00200 struct dma_desc *last_dma_desc = NULL;
00201
00202 while (rx_stat_desc)
00203 {
00204 uint32_t dma_addr;
00205
00206 PROF_IPCDESC_TRANSFER_SET();
00207
00208
00209 if (!ipc_emb_hostrxdesc_check())
00210 {
00211 break;
00212 }
00213
00214
00215 dma_addr = ipc_emb_hostrxdesc_get();
00216
00217 if (!first_dma_desc)
00218 {
00219 first_dma_desc = &rx_stat_desc->dma_desc;
00220 }
00221
00222 rx_stat_desc->dma_desc.dest = dma_addr;
00223
00224 if (last_dma_desc)
00225 {
00226 last_dma_desc->next = CPU2HW(&rx_stat_desc->dma_desc);
00227 }
00228
00229 last_dma_desc = &rx_stat_desc->dma_desc;
00230
00231
00232 co_list_pop_front(desc_list);
00233
00234
00235 rxl_upload_cntrl_push_pending(&rx_stat_desc->upload_cntrl);
00236
00237 PROF_IPCDESC_TRANSFER_CLR();
00238
00239
00240 rx_stat_desc = (struct rxu_stat_desc *)co_list_pick(desc_list);
00241 }
00242
00243 if (first_dma_desc)
00244 {
00245 last_dma_desc->next = 0;
00246
00247
00248 dma_push(first_dma_desc, last_dma_desc, RX_DATA_UPLOAD_CHAN);
00249 }
00250 }
00251 #endif
00252
00253 #if NX_UF_EN
00254 void macif_uf_ind(struct rx_vector_desc *rx_vector)
00255 {
00256 struct unsup_rx_vector_desc *desc;
00257 uint32_t hostbuf;
00258
00259
00260 desc = (struct unsup_rx_vector_desc *)co_list_pop_front(&macif_ipc_env.unsup_rx_vec_free_list);
00261 if (desc == NULL)
00262 return;
00263
00264
00265 hostbuf = ipc_emb_hostunsuprxvectbuf_get();
00266 if (hostbuf == 0)
00267 {
00268 co_list_push_back(&macif_ipc_env.unsup_rx_vec_free_list, &desc->list_hdr);
00269 return;
00270 }
00271
00272
00273 desc->dma_desc.dest = hostbuf;
00274 desc->rx_vector = *rx_vector;
00275 desc->rx_vector.pattern = UF_RX_VEC_VALID_PATTERN;
00276
00277
00278 hal_dma_push(&desc->gp_dma_desc, DMA_UL);
00279 }
00280 #endif //NX_UF_EN
00281
00282 void macif_tx_evt(int queue_idx)
00283 {
00284 return ipc_emb_tx_evt(queue_idx);
00285 }
00286
00287 bool macif_tx_q_has_data(int queue_idx)
00288 {
00289 return ipc_emb_tx_q_has_data(queue_idx);
00290 }
00291
00292 uint8_t macif_tx_q_len(int queue_idx, int vif_idx)
00293 {
00294 return ipc_emb_tx_q_len(queue_idx, vif_idx);
00295 }
00296
00297 uint32_t macif_tx_pattern_addr_get(void)
00298 {
00299 return ipc_emb_tx_pattern_addr_get();
00300 }
00301
00302 uint32_t macif_buffered_get(uint8_t sta, uint8_t tid)
00303 {
00304 return ipc_emb_buffered_get(sta, tid);
00305 }
00306
00307 #if RW_MUMIMO_TX_EN
00308 void macif_tx_enable_users(int queue_idx, uint8_t active_users)
00309 {
00310 return ipc_emb_enable_users(queue_idx, active_users);
00311 }
00312 #endif
00313
00325 static bool macif_tx_cfm_lli_done(uint16_t next_lli_cnt)
00326 {
00327 return (((uint16_t)(dma_lli_counter_get(IPC_DMA_LLI_CFM_TX) - (next_lli_cnt)))
00328 < (((uint16_t)-1) / 2));
00329 }
00330
00340 void macif_tx_cfm_start(uint8_t access_category)
00341 {
00342 macif_ipc_env.tx_cfm.first = NULL;
00343 macif_ipc_env.tx_cfm.curr = NULL;
00344 #if RW_MUMIMO_TX_EN
00345 macif_ipc_env.tx_cfm.cfm_ind = 0;
00346 #else
00347 macif_ipc_env.tx_cfm.cfm_ind = CO_BIT(access_category);
00348 #endif
00349 macif_ipc_env.tx_cfm.cfm_cnt = 0;
00350 }
00351
00352 void macif_tx_cfm_push(uint8_t access_category, struct txdesc *txdesc)
00353 {
00354 struct dma_desc *desc = &txdesc->lmac.hw_desc->dma_desc;
00355 struct macif_tx_cfm_tag *tx_cfm = &macif_ipc_env.tx_cfm;
00356
00357 #if NX_UMAC_PRESENT
00358
00359 desc->dest = txdesc->host.status_desc_addr;
00360 #else
00361
00362 #if NX_AMSDU_TX
00363 desc->dest = txdesc->host.packet_addr[0] + CFM_STATUS_OFFSET;
00364 #else
00365 desc->dest = txdesc->host.packet_addr + CFM_STATUS_OFFSET;
00366 #endif
00367 #endif
00368 desc->ctrl = 0;
00369
00370 #if (RW_MUMIMO_TX_EN)
00371
00372 tx_cfm->cfm_ind |= CO_BIT(get_user_pos(txdesc) + access_category * RW_USER_MAX);
00373 #endif
00374
00375
00376 if (tx_cfm->first == NULL)
00377 {
00378 tx_cfm->first = desc;
00379 }
00380 else
00381 {
00382 tx_cfm->curr->next = CPU2HW(desc);
00383 }
00384
00385
00386 tx_cfm->curr = desc;
00387
00388
00389 tx_cfm->cfm_cnt++;
00390
00391
00392 if (tx_cfm->cfm_cnt >= CFM_CNT_THRESHOLD)
00393 {
00394
00395 macif_tx_cfm_done(access_category, false);
00396
00397
00398 tx_cfm->cfm_cnt = 0;
00399 tx_cfm->first = NULL;
00400 tx_cfm->curr = NULL;
00401 }
00402
00403 #if NX_POWERSAVE
00404
00405 txl_cntrl_env.pck_cnt--;
00406 #endif
00407 }
00408
00409 void macif_tx_cfm_done(uint8_t access_category, bool poll)
00410 {
00411 struct macif_tx_cfm_tag *tx_cfm = &macif_ipc_env.tx_cfm;
00412
00413 if (tx_cfm->first != NULL)
00414 {
00415
00416 tx_cfm->curr->ctrl = CFM_LLICTRL;
00417
00418 #if NX_POWERSAVE
00419 GLOBAL_INT_DISABLE();
00420 ps_env.prevent_sleep |= PS_TX_CFM_UPLOADING;
00421 #endif
00422
00423
00424 ASSERT_ERR(tx_cfm->user_cfm[tx_cfm->in_idx] == 0);
00425 tx_cfm->user_cfm[tx_cfm->in_idx] = tx_cfm->cfm_ind;
00426 tx_cfm->in_idx = (tx_cfm->in_idx + 1) & CFM_IDX_MSK;
00427
00428
00429 dma_push(tx_cfm->first, tx_cfm->curr, RX_DATA_UPLOAD_CHAN);
00430
00431 if (poll)
00432 {
00433
00434 dma_lli_poll(IPC_DMA_LLI_CFM_TX);
00435
00436
00437 macif_tx_cfm_dma_int_handler();
00438 }
00439
00440 #if NX_POWERSAVE
00441 GLOBAL_INT_RESTORE();
00442 #endif
00443 }
00444 }
00445
00446 void macif_tx_cfm_dma_int_handler(void)
00447 {
00448 struct macif_tx_cfm_tag *tx_cfm = &macif_ipc_env.tx_cfm;
00449
00450
00451 uint32_t irqstatus = dma_int_status_get() & IPC_DMA_LLI_CFM_MASK;
00452
00453
00454 PROF_TX_CFM_DMA_IRQ_SET();
00455
00456
00457 dma_int_ack_clear(irqstatus);
00458
00459
00460 while (macif_tx_cfm_lli_done(tx_cfm->lli_cnt))
00461 {
00462
00463 uint8_t out_idx = tx_cfm->lli_cnt & CFM_IDX_MSK;
00464
00465
00466 dma_int_ack_clear(irqstatus);
00467
00468
00469 ipc_emb_txcfm_ind(tx_cfm->user_cfm[out_idx]);
00470 tx_cfm->user_cfm[out_idx] = 0;
00471
00472
00473 tx_cfm->lli_cnt++;
00474 }
00475
00476 #if NX_POWERSAVE
00477 ps_env.prevent_sleep &= ~PS_TX_CFM_UPLOADING;
00478 #endif
00479
00480
00481 PROF_TX_CFM_DMA_IRQ_CLR();
00482 }
00483
00484