00001
00020
00021
00022
00023
00024 #include "co_int.h"
00025 #include "co_bool.h"
00026 #include <string.h>
00027
00028 #include "dbg_assert.h"
00029 #include "mac.h"
00030 #include "mac_frame.h"
00031 #include "mm.h"
00032 #include "macif.h"
00033 #include "rxl_hwdesc.h"
00034 #include "txl_buffer.h"
00035 #if NX_TX_FRAME
00036 #include "txl_frame.h"
00037 #endif
00038 #include "tx_swdesc.h"
00039 #include "txl_cfm.h"
00040 #include "txl_cntrl.h"
00041 #include "rxl_cntrl.h"
00042 #include "reg_mac_pl.h"
00043 #include "reg_mac_core.h"
00044 #include "dbg.h"
00045 #include "ps.h"
00046 #if (NX_P2P)
00047 #include "p2p.h"
00048 #endif //(NX_P2P)
00049 #if (NX_TD)
00050 #include "td.h"
00051 #endif //(NX_TD)
00052 #if NX_UMAC_PRESENT
00053 #include "txu_cntrl.h"
00054 #include "apm.h"
00055 #endif
00056 #if NX_MFP
00057 #include "mfp.h"
00058 #endif
00059 #if (RW_BFMER_EN)
00060 #include "bfr.h"
00061 #endif //(RW_BFMER_EN)
00062 #if NX_AMPDU_TX
00063 #include "txl_agg.h"
00064 #endif
00065 #if NX_MAC_HE
00066 #include "txl_he.h"
00067 #endif
00068
00069
00070
00071
00072
00074 #if NX_BEACONING
00075 #define TX_IRQ_BITS ( NXMAC_AC_0_TX_TRIGGER_BIT | NXMAC_AC_1_TX_TRIGGER_BIT | \
00076 NXMAC_AC_2_TX_TRIGGER_BIT | NXMAC_AC_3_TX_TRIGGER_BIT | \
00077 NXMAC_BCN_TX_TRIGGER_BIT )
00078 #else
00079 #define TX_IRQ_BITS ( NXMAC_AC_0_TX_TRIGGER_BIT | NXMAC_AC_1_TX_TRIGGER_BIT | \
00080 NXMAC_AC_2_TX_TRIGGER_BIT | NXMAC_AC_3_TX_TRIGGER_BIT)
00081 #endif
00082
00083 #if RW_MUMIMO_TX_EN
00085 #define TX_SEC_IRQ_BITS_MERGED ( NXMAC_SEC_U_3AC_3_TX_TRIGGER_BIT | \
00086 NXMAC_SEC_U_3AC_2_TX_TRIGGER_BIT | \
00087 NXMAC_SEC_U_3AC_1_TX_TRIGGER_BIT | \
00088 NXMAC_SEC_U_3AC_0_TX_TRIGGER_BIT | \
00089 NXMAC_SEC_U_2AC_3_TX_TRIGGER_BIT | \
00090 NXMAC_SEC_U_2AC_2_TX_TRIGGER_BIT | \
00091 NXMAC_SEC_U_2AC_1_TX_TRIGGER_BIT | \
00092 NXMAC_SEC_U_2AC_0_TX_TRIGGER_BIT | \
00093 NXMAC_SEC_U_1AC_3_TX_TRIGGER_BIT | \
00094 NXMAC_SEC_U_1AC_2_TX_TRIGGER_BIT | \
00095 NXMAC_SEC_U_1AC_1_TX_TRIGGER_BIT | \
00096 NXMAC_SEC_U_1AC_0_TX_TRIGGER_BIT )
00097 #endif
00098
00099 #if NX_BW_LEN_ADAPT
00101 #define TX_BW_DROP_IRQ (NXMAC_AC_0BW_DROP_TRIGGER_BIT | NXMAC_AC_1BW_DROP_TRIGGER_BIT | \
00102 NXMAC_AC_2BW_DROP_TRIGGER_BIT | NXMAC_AC_3BW_DROP_TRIGGER_BIT)
00103 #endif
00104
00105 #if NX_MAC_HE
00107 #define TB_PROT_TRIGGER_BIT NXMAC_TB_PROT_TRIGGER_BIT
00109 #define TB_TX_TRIGGER_BIT (NXMAC_TB_TX_TRIGGER_BIT | NXMAC_TB_TX_BUF_TRIGGER_BIT | NXMAC_TB_TX_CANCELLED_BIT)
00110 #else
00112 #define TB_PROT_TRIGGER_BIT 0
00114 #define TB_TX_TRIGGER_BIT 0
00115 #endif
00116
00118 #if RW_MUMIMO_TX_EN
00119 #define SEC_USER_TX_TRIGGER_BIT NXMAC_SEC_USER_TX_TRIGGER_BIT
00120 #else
00121 #define SEC_USER_TX_TRIGGER_BIT 0
00122 #endif
00123
00125 #define TX_PROT_IRQ ( NXMAC_AC_0BW_DROP_TRIGGER_BIT | \
00126 NXMAC_AC_1BW_DROP_TRIGGER_BIT | \
00127 NXMAC_AC_2BW_DROP_TRIGGER_BIT | \
00128 NXMAC_AC_3BW_DROP_TRIGGER_BIT | \
00129 TB_PROT_TRIGGER_BIT | \
00130 NXMAC_AC_0_PROT_TRIGGER_BIT | \
00131 NXMAC_AC_1_PROT_TRIGGER_BIT | \
00132 NXMAC_AC_2_PROT_TRIGGER_BIT | \
00133 NXMAC_AC_3_PROT_TRIGGER_BIT )
00134
00136 #define TX_TRANSMIT_IRQ ( SEC_USER_TX_TRIGGER_BIT | \
00137 TB_TX_TRIGGER_BIT | \
00138 NXMAC_AC_0_TX_TRIGGER_BIT | \
00139 NXMAC_AC_1_TX_TRIGGER_BIT | \
00140 NXMAC_AC_2_TX_TRIGGER_BIT | \
00141 NXMAC_AC_3_TX_TRIGGER_BIT | \
00142 NXMAC_BCN_TX_TRIGGER_BIT | \
00143 NXMAC_AC_0_TX_BUF_TRIGGER_BIT | \
00144 NXMAC_AC_1_TX_BUF_TRIGGER_BIT | \
00145 NXMAC_AC_2_TX_BUF_TRIGGER_BIT | \
00146 NXMAC_AC_3_TX_BUF_TRIGGER_BIT | \
00147 NXMAC_BCN_TX_BUF_TRIGGER_BIT )
00148
00149
00151 #define FRAME_OK (DESC_DONE_TX_BIT | FRAME_SUCCESSFUL_TX_BIT)
00152
00154 const uint32_t TX_TIMEOUT[NX_TXQ_CNT] =
00155 {
00156 TX_AC0_TIMEOUT,
00157 TX_AC1_TIMEOUT,
00158 TX_AC2_TIMEOUT,
00159 TX_AC3_TIMEOUT,
00160 #if (NX_BEACONING)
00161 TX_BCN_TIMEOUT
00162 #endif
00163 };
00164
00165
00166
00167
00168
00169 struct txl_cntrl_env_tag txl_cntrl_env;
00170
00171
00172
00173
00174
00184 __INLINE void txl_smoothing_set(struct tx_hd *thd, struct tx_policy_tbl *pt,
00185 uint32_t smoothing)
00186 {
00187 #if !NX_MAC_HE
00188 thd->phyctrlinfo |= smoothing;
00189 #else
00190 pt->phycntrlinfo1 |= smoothing;
00191 #endif
00192 }
00193
00202 __INLINE void txl_timer_start(uint8_t ac)
00203 {
00204 int timer_id = TX_AC2TIMER(ac);
00205 uint32_t timer_bit = CO_BIT(timer_id);
00206 uint32_t curr_time = hal_machw_time();
00207
00208
00209 nxmac_abs_timer_set(timer_id, curr_time + TX_TIMEOUT[ac]);
00210
00211
00212 nxmac_timers_int_event_clear(timer_bit);
00213 nxmac_timers_int_un_mask_set(nxmac_timers_int_un_mask_get() | timer_bit);
00214 }
00215
00224 __INLINE void txl_timer_move(uint8_t ac)
00225 {
00226 uint32_t curr_time = hal_machw_time();
00227
00228
00229 nxmac_abs_timer_set(TX_AC2TIMER(ac), curr_time + TX_TIMEOUT[ac]);
00230 }
00231
00240 __INLINE void txl_timer_clear(uint8_t ac)
00241 {
00242 uint32_t timer_msk = nxmac_timers_int_un_mask_get();
00243 uint32_t timer_bit = CO_BIT(TX_AC2TIMER(ac));
00244
00245 nxmac_timers_int_un_mask_set(timer_msk & ~timer_bit);
00246 nxmac_timers_int_event_clear(timer_bit);
00247 }
00248
00249 #if !NX_FULLY_HOSTED
00250 #if NX_TX_FRAME
00251
00265 static void txl_int_fake_transfer(struct txdesc *txdesc, uint8_t access_category)
00266 {
00267
00268 struct txl_buffer_tag *buf = txl_buffer_get(txdesc);
00269 struct tx_pbd *tbd = &buf->tbd;
00270 struct dma_desc *dma_desc_pat = &buf->dma_desc_pat;
00271 dma_desc_pat->src = macif_tx_pattern_addr_get();
00272 dma_desc_pat->dest = CPU2HW(&tbd->upatterntx);
00273 dma_desc_pat->length = sizeof_b(tbd->upatterntx);
00274 dma_desc_pat->ctrl = TX_LLICTRL(access_category, 1);
00275 buf->txdesc = txdesc;
00276 #if NX_MAC_HE
00277 buf->flags &= ~BUF_SINGLETON_READY;
00278 #endif
00279 txl_buffer_push(access_category, buf);
00280 dma_push(dma_desc_pat, dma_desc_pat, IPC_DMA_CHANNEL_DATA_TX);
00281 }
00282 #endif
00283
00299 static bool txl_payload_transfer(struct txdesc *txdesc, uint8_t access_category,
00300 uint8_t user_idx)
00301 {
00302 struct txl_buffer_tag *buffer = NULL;
00303 bool buffer_full = true;
00304 struct txl_list *txlist = &txl_cntrl_env.txlist[access_category];
00305 #if NX_AMSDU_TX
00306 uint8_t pkt_idx;
00307 #endif
00308
00309 #if NX_TX_FRAME
00310 if (is_int_frame(txdesc))
00311 {
00312 txl_int_fake_transfer(txdesc, access_category);
00313 return (false);
00314 }
00315 #endif
00316
00317
00318 #if NX_AMSDU_TX
00319 #if NX_UMAC_PRESENT
00320 if (txl_buffer_is_amsdu_single_buf(txdesc))
00321 {
00322 ASSERT_ERR(txlist->dwnld_index[user_idx] == 0);
00323 pkt_idx = 0xFF;
00324 }
00325 else
00326 #endif
00327 {
00328 pkt_idx = txlist->dwnld_index[user_idx];
00329 }
00330 buffer = txl_buffer_alloc(txdesc, access_category, user_idx, pkt_idx);
00331 #else
00332 buffer = txl_buffer_alloc(txdesc, access_category, user_idx);
00333 #endif
00334 if (buffer != NULL)
00335 {
00336
00337 #if NX_AMSDU_TX
00338 txdesc->lmac.buffer[txlist->dwnld_index[user_idx]] = buffer;
00339 #else
00340 txdesc->lmac.buffer = buffer;
00341 #endif
00342
00343 buffer->txdesc = txdesc;
00344
00345 #if NX_AMSDU_TX
00346 if (txlist->dwnld_index[user_idx] > 0)
00347
00348 txl_buffer_update_tbd(txdesc, access_category, txlist->dwnld_index[user_idx]);
00349 else
00350 #endif
00351
00352 txl_buffer_update_thd(txdesc, access_category);
00353
00354 #if NX_AMSDU_TX
00355
00356 #if NX_UMAC_PRESENT
00357 if (pkt_idx == 0xFF)
00358 txlist->dwnld_index[user_idx] = txdesc->host.packet_cnt;
00359 else
00360 #endif
00361 txlist->dwnld_index[user_idx]++;
00362 #endif
00363
00364
00365 buffer_full = false;
00366 }
00367 else
00368 {
00369 txlist->first_to_download[user_idx] = txdesc;
00370 }
00371
00372 return (buffer_full);
00373 }
00374
00386 static void txl_transmit_prep(int access_category, uint8_t user_idx)
00387 {
00388 bool buffer_full = false;
00389 struct txl_list *txlist = &txl_cntrl_env.txlist[access_category];
00390
00391
00392 while (1)
00393 {
00394
00395 struct txdesc *txdesc = txlist->first_to_download[user_idx];
00396
00397
00398 if (txl_buffer_count(access_category, user_idx) >= TX_MIN_DOWNLOAD_CNT)
00399 break;
00400
00401 if (txdesc == NULL)
00402
00403 break;
00404
00405
00406 buffer_full = txl_payload_transfer(txdesc, access_category, user_idx);
00407
00408 if (buffer_full)
00409
00410 break;
00411
00412 #if NX_AMSDU_TX
00413 if (txlist->dwnld_index[user_idx] >= txdesc->host.packet_cnt)
00414 #endif
00415 {
00416
00417 txlist->first_to_download[user_idx] = tx_desc_next(txdesc);
00418 #if NX_AMSDU_TX
00419
00420 txlist->dwnld_index[user_idx] = 0;
00421 #endif
00422 }
00423 }
00424 }
00425 #endif
00426
00427
00439 static void txl_hwdesc_config_pre(struct txdesc *txdesc, int access_category)
00440 {
00441 struct tx_hd *txhd = &txdesc->lmac.hw_desc->thd;
00442 #if NX_UMAC_PRESENT
00443 int add_len = txdesc->umac.head_len + txdesc->umac.tail_len;
00444 #else
00445 int add_len = 0;
00446 #endif
00447
00448
00449 #if NX_AMSDU_TX
00450 txhd->frmlen = add_len + MAC_FCS_LEN;
00451 for (int i = 0; i < txdesc->host.packet_cnt; i++)
00452 {
00453 txhd->frmlen += txdesc->host.packet_len[i];
00454 }
00455 #else
00456 txhd->frmlen = txdesc->host.packet_len + add_len + MAC_FCS_LEN;
00457 #endif
00458
00459
00460 txhd->optlen[0] = 0;
00461 txhd->optlen[1] = 0;
00462 txhd->optlen[2] = 0;
00463
00464
00465 txhd->upatterntx = TX_HEADER_DESC_PATTERN;
00466
00467
00468 txhd->nextmpdudesc_ptr = 0;
00469
00470
00471 txhd->nextfrmexseq_ptr = 0;
00472
00473 #if !NX_FULLY_HOSTED
00474
00475 txhd->first_pbd_ptr = 0;
00476
00477
00478 txhd->datastartptr = 0;
00479 txhd->dataendptr = 0;
00480 #endif
00481
00482
00483 txhd->policyentryaddr = 0;
00484
00485
00486 txhd->macctrlinfo2 = WHICHDESC_UNFRAGMENTED_MSDU;
00487
00488
00489 txhd->frmlifetime = 0;
00490
00491
00492 txhd->statinfo = 0;
00493 }
00494
00495
00507 static void txl_hwdesc_config_post(struct txdesc *txdesc, uint8_t access_category)
00508 {
00509 struct txl_buffer_control *bufctrl = txl_buffer_control_get(txdesc);
00510 struct tx_hd *txhd = &txdesc->lmac.hw_desc->thd;
00511 struct tx_policy_tbl *pt = &bufctrl->policy_tbl;
00512 uint32_t smoothing = SMOOTHING_TX_BIT;
00513
00514 #if NX_AMPDU_TX
00515 #if RW_MUMIMO_TX_EN
00516
00517 if (is_in_mumimo_ppdu(txdesc))
00518 {
00519
00520 if (is_primary_user(txdesc))
00521 {
00522
00523 if (is_mpdu_first(txdesc))
00524 {
00525 struct tx_agg_desc *agg_desc = txdesc->lmac.agg_desc;
00526 struct tx_hd *a_thd = &agg_desc->a_thd;
00527 uint8_t smm_index;
00528
00529
00530 ASSERT_ERR(txdesc->lmac.bfr_node);
00531
00532
00533 smm_index = txdesc->lmac.bfr_node->smm_index;
00534
00535
00536 agg_desc->status |= AGG_FIRST_DOWNLOADED;
00537
00538
00539 a_thd->policyentryaddr = CPU2HW(pt);
00540
00541
00542
00543
00544 a_thd->phyctrlinfo |= bufctrl->phy_control_info &
00545 ~(PAID_TX_MASK | GID_TX_MASK | USE_MUMIMO_TX_BIT);
00546
00547
00548 ASSERT_ERR((txdesc->umac.phy_flags & VHT_NSS_MASK) == 0);
00549
00550 #if NX_MAC_HE
00551 if (txl_he_is_he_su(txdesc->umac.phy_flags))
00552 {
00553 txl_he_ltf_type_set(txdesc->umac.phy_flags, &pt->powercntrlinfo[0]);
00554 }
00555 #endif
00556
00557
00558 txl_agg_set_ampdu_protection(&txdesc->umac.phy_flags);
00559
00560
00561
00562 pt->ratecntrlinfo[0] = txdesc->umac.phy_flags;
00563
00564
00565 pt->phycntrlinfo1 &= ~STBC_PT_MASK;
00566
00567
00568 pt->phycntrlinfo2 &= ~SMM_INDEX_PT_MASK;
00569 pt->phycntrlinfo2 |= BMFED_BIT | (smm_index << SMM_INDEX_PT_OFT);
00570
00571 #if NX_MAC_HE
00572
00573 memcpy(&agg_desc->pol_tbl, pt, sizeof(agg_desc->pol_tbl));
00574 #endif
00575 }
00576 }
00577 else if (!is_mpdu_agg(txdesc) || is_mpdu_first(txdesc))
00578 {
00579 struct tx_agg_desc *agg_desc = txdesc->lmac.agg_desc;
00580 struct tx_compressed_policy_tbl *cpt = &bufctrl->comp_pol_tbl;
00581 uint32_t hw_key_idx = (pt->maccntrlinfo1 & KEYSRAM_INDEX_MASK) >> KEYSRAM_INDEX_OFT;
00582 uint32_t fec_coding = (pt->phycntrlinfo1 & FEC_CODING_PT_BIT) >> FEC_CODING_PT_OFT;
00583 uint32_t mcs_idx = (txdesc->umac.phy_flags & MCS_INDEX_TX_RCX_MASK)
00584 >> MCS_INDEX_TX_RCX_OFT;
00585 uint32_t smm_index;
00586
00587
00588 ASSERT_ERR(txdesc->lmac.bfr_node);
00589
00590
00591 smm_index = txdesc->lmac.bfr_node->smm_index;
00592
00593
00594 txhd = is_mpdu_agg(txdesc) ? &agg_desc->a_thd : txhd;
00595
00596
00597 txhd->policyentryaddr = CPU2HW(cpt);
00598
00599
00600 cpt->upatterntx = POLICY_TABLE_PATTERN;
00601 cpt->sec_user_control = (mcs_idx << MCS_IDX_TX_CPT_OFT) |
00602 (fec_coding << FEC_CODING_CPT_OFT) |
00603 (smm_index << SMM_INDEX_CPT_OFT) |
00604 (hw_key_idx << KEYSRAM_INDEX_CPT_OFT);
00605 }
00606 }
00607 else
00608 #endif
00609
00610 if (is_mpdu_agg(txdesc))
00611 {
00612
00613 if (is_mpdu_first(txdesc))
00614 {
00615 struct tx_agg_desc *agg_desc = txdesc->lmac.agg_desc;
00616 struct tx_hd *a_thd = &agg_desc->a_thd;
00617
00618
00619 a_thd->policyentryaddr = CPU2HW(pt);
00620
00621 #if NX_MAC_HE
00622 if (txl_he_is_he_su(txdesc->umac.phy_flags))
00623 {
00624 txl_he_ltf_type_set(txdesc->umac.phy_flags, &pt->powercntrlinfo[0]);
00625 }
00626 #endif
00627
00628
00629 txl_agg_set_ampdu_protection(&txdesc->umac.phy_flags);
00630
00631
00632
00633 pt->ratecntrlinfo[0] = txdesc->umac.phy_flags;
00634
00635 #if RW_BFMER_EN
00636
00637 if (txdesc->lmac.bfr_node)
00638 {
00639 uint8_t smm_index = txdesc->lmac.bfr_node->smm_index;
00640
00641
00642 pt->phycntrlinfo2 &= ~SMM_INDEX_PT_MASK;
00643 pt->phycntrlinfo2 |= BMFED_BIT | (smm_index << SMM_INDEX_PT_OFT);
00644
00645
00646 smoothing = 0;
00647 }
00648 #endif
00649
00650
00651 agg_desc->status |= AGG_FIRST_DOWNLOADED;
00652
00653
00654 a_thd->phyctrlinfo = bufctrl->phy_control_info;
00655
00656 a_thd->macctrlinfo1 = bufctrl->mac_control_info;
00657 a_thd->macctrlinfo1 &= (~EXPECTED_ACK_MSK);
00658 a_thd->macctrlinfo1 |= EXPECTED_ACK_COMPRESSED_BLOCK_ACK;
00659
00660
00661 txl_smoothing_set(a_thd, pt, smoothing);
00662
00663 #if NX_MAC_HE
00664
00665 txl_buffer_get(txdesc)->flags |= BUF_SINGLETON_READY;
00666
00667
00668 memcpy(&agg_desc->pol_tbl, pt, sizeof(agg_desc->pol_tbl));
00669 #endif
00670 }
00671 }
00672 else
00673 #endif
00674 {
00675 #if (NX_UMAC_PRESENT)
00676 if (txdesc->host.flags & TXU_CNTRL_MGMT)
00677 {
00678 uint32_t mac_hdr_addr = txl_buffer_machdr_get(txdesc);
00679 struct mac_hdr *hdr = (struct mac_hdr *)(HW2CPU(mac_hdr_addr));
00680
00681
00682 #if NX_MFP
00683 if (txdesc->host.flags & TXU_CNTRL_MGMT_ROBUST)
00684 {
00685 if (txdesc->umac.head_len)
00686 {
00687
00688 txu_cntrl_protect_mgmt_frame(txdesc, hdr, MAC_SHORT_MAC_HDR_LEN);
00689 }
00690 else if (txdesc->umac.tail_len)
00691 {
00692 #if NX_AMSDU_TX
00693 int len = txdesc->host.packet_len[0];
00694 #else
00695 int len = txdesc->host.packet_len;
00696 #endif
00697 uint32_t mmic_addr = 0;
00698
00699 #if NX_FULLY_HOSTED
00700 mmic_addr = txu_cntrl_mgmt_mic_pbd_append(txdesc);
00701 #endif
00702
00703
00704 mfp_add_mgmt_mic(txdesc, mac_hdr_addr, len, mmic_addr);
00705 }
00706 }
00707 #endif
00708
00709 txhd->macctrlinfo2 &= ~(WHICHDESC_MSK | UNDER_BA_SETUP_BIT);
00710 if (MAC_ADDR_GROUP(&hdr->addr1))
00711 txhd->macctrlinfo1 = EXPECTED_ACK_NO_ACK;
00712 else
00713 txhd->macctrlinfo1 = EXPECTED_ACK_NORMAL_ACK;
00714
00715 #if NX_FULLY_HOSTED
00716
00717 txhd->macctrlinfo2 |= INTERRUPT_EN_TX;
00718 #endif
00719
00720 txhd->statinfo = 0;
00721
00722
00723 txhd->policyentryaddr = CPU2HW(&bufctrl->policy_tbl);
00724 }
00725 else
00726 #endif //(NX_UMAC_PRESENT)
00727 {
00728 #if NX_UMAC_PRESENT && !NX_FULLY_HOSTED
00729
00730 txu_cntrl_tkip_mic_append(txdesc, access_category);
00731 #endif //NX_UMAC_PRESENT && !NX_FULLY_HOSTED
00732
00733
00734 txhd->macctrlinfo1 = bufctrl->mac_control_info;
00735 #if NX_AMSDU_TX
00736
00737 if (txl_buffer_is_amsdu_multi_buf(txdesc))
00738 {
00739
00740 pt->maccntrlinfo2 &= ~(LONG_RETRY_LIMIT_MASK | SHORT_RETRY_LIMIT_MASK);
00741 }
00742 #endif
00743
00744 #if NX_FULLY_HOSTED
00745
00746 txhd->macctrlinfo2 |= INTERRUPT_EN_TX;
00747 #endif
00748
00749 #if RW_BFMER_EN
00750
00751 if (txdesc->lmac.bfr_node && bfr_is_bfmed_sglt_allowed(txdesc))
00752 {
00753 uint8_t smm_index = txdesc->lmac.bfr_node->smm_index;
00754
00755
00756 pt->phycntrlinfo2 &= ~SMM_INDEX_PT_MASK;
00757 pt->phycntrlinfo2 |= BMFED_BIT | (smm_index << SMM_INDEX_PT_OFT);
00758
00759
00760 smoothing = 0;
00761 }
00762 #endif
00763 #if NX_MAC_HE
00764
00765 if (!is_htc_sglt_allowed(txdesc))
00766 txl_buffer_remove_htc(txhd);
00767 #endif
00768
00769
00770 txhd->policyentryaddr = CPU2HW(pt);
00771 }
00772
00773
00774 txhd->phyctrlinfo = bufctrl->phy_control_info;
00775
00776
00777 txl_smoothing_set(txhd, pt, smoothing);
00778
00779 #if NX_MAC_HE
00780
00781 txl_he_txop_dur_rtscts_thres_mpdu_check(txdesc);
00782
00783 txl_buffer_get(txdesc)->flags |= BUF_SINGLETON_READY;
00784 #endif
00785 }
00786 }
00787
00788
00797 static void txl_machdr_format(uint32_t machdrptr)
00798 {
00799
00800 uint16_t seq_ctrl = co_read8p(machdrptr + MAC_HEAD_CTRL_OFT) & MAC_SEQCTRL_FRAG_MSK;
00801
00802
00803
00804 if (seq_ctrl == 0)
00805 {
00806
00807 txl_cntrl_env.seqnbr++;
00808 }
00809 seq_ctrl |= txl_cntrl_env.seqnbr << MAC_SEQCTRL_NUM_OFT;
00810
00811
00812 co_write16p(machdrptr + MAC_HEAD_CTRL_OFT, seq_ctrl);
00813 }
00814
00815
00824 static void txl_cntrl_newtail(uint8_t access_category)
00825 {
00826
00827 switch (access_category)
00828 {
00829 #if NX_BEACONING
00830 case AC_BCN:
00831 nxmac_dma_cntrl_set(NXMAC_TX_BCN_NEW_TAIL_BIT);
00832 break;
00833 #endif
00834 case AC_VO:
00835 nxmac_dma_cntrl_set(NXMAC_TX_AC_3_NEW_TAIL_BIT);
00836 break;
00837 case AC_VI:
00838 nxmac_dma_cntrl_set(NXMAC_TX_AC_2_NEW_TAIL_BIT);
00839 break;
00840 case AC_BE:
00841 nxmac_dma_cntrl_set(NXMAC_TX_AC_1_NEW_TAIL_BIT);
00842 break;
00843 case AC_BK:
00844 nxmac_dma_cntrl_set(NXMAC_TX_AC_0_NEW_TAIL_BIT);
00845 break;
00846 default:
00847 ASSERT_ERR(0);
00848 break;
00849 }
00850 }
00851
00863 static void txl_frame_exchange_done(uint8_t access_category)
00864 {
00865 struct txl_list *txlist = &txl_cntrl_env.txlist[access_category];
00866
00867
00868 while (1)
00869 {
00870 struct txdesc * txdesc = NULL;
00871
00872
00873
00874
00875 #if NX_AMPDU_TX
00876 if (txlist->chk_state == THD_CHK_STATE)
00877 {
00878 #endif
00879
00880 txdesc = (struct txdesc *)co_list_pick(&(txlist->transmitting[0]));
00881
00882
00883 if (txdesc != NULL)
00884 {
00885 struct tx_hd *txhd = &txdesc->lmac.hw_desc->thd;
00886 uint32_t txstatus = txhd->statinfo;
00887 struct tx_cfm_tag *cfm = txl_cfm_tag_get(txdesc);
00888
00889
00890
00891
00892
00893 if (txstatus & DESC_DONE_TX_BIT)
00894 {
00895
00896 PROF_AGG_SMPDU_DONETX_SET();
00897
00898 #if (RW_MUMIMO_TX_EN)
00899 PROF_MU_USER_POS_IRQ_SET(get_user_pos(txdesc));
00900 #endif
00901
00902 #if !NX_FULLY_HOSTED
00903
00904 txl_free_done_mpdu(txdesc, access_category, 0);
00905 #endif
00906
00907
00908
00909 cfm->status = txstatus;
00910
00911 #if NX_AMPDU_TX
00912 txl_agg_check_saved_agg_desc(access_category);
00913
00914 if (!is_mpdu_agg(txdesc))
00915 #endif
00916 {
00917
00918 struct tx_hd *nextdesc = HW2CPU(txhd->nextfrmexseq_ptr);
00919
00920
00921 if (nextdesc != NULL)
00922 {
00923 #if NX_AMSDU_TX
00924 struct tx_pbd *next_pbd;
00925 #endif
00926 #if NX_AMPDU_TX
00927 struct tx_hd *a_thd = NULL;
00928
00929
00930 if ((nextdesc->macctrlinfo2 & WHICHDESC_MSK) == WHICHDESC_AMPDU_EXTRA)
00931 {
00932
00933 a_thd = nextdesc;
00934
00935 nextdesc = HW2CPU(nextdesc->nextmpdudesc_ptr);
00936
00937
00938
00939 ASSERT_ERR(nextdesc != NULL);
00940 }
00941 #endif
00942
00943
00944 #if NX_AMSDU_TX
00945 next_pbd = HW2CPU(nextdesc->first_pbd_ptr);
00946 if ((!next_pbd || !(next_pbd->bufctrlinfo & TBD_DONE_HW)) &&
00947 !(nextdesc->statinfo & DESC_DONE_TX_BIT))
00948 #else
00949 if (!(nextdesc->statinfo & DESC_DONE_TX_BIT))
00950 #endif
00951 {
00952 #if NX_AMPDU_TX
00953 if (a_thd != NULL)
00954
00955 txl_agg_check_rtscts_retry_limit(a_thd, access_category);
00956 #endif
00957
00958 PROF_AGG_SMPDU_DONETX_CLR();
00959
00960
00961 txl_timer_move(access_category);
00962 break;
00963 }
00964 }
00965 else
00966 {
00967 txl_timer_clear(access_category);
00968
00969 txlist->last_frame_exch = NULL;
00970
00971 #if NX_AMPDU_TX
00972 if (txlist->ppdu_cnt == 1)
00973 {
00974 #if RW_MUMIMO_TX_EN
00975
00976 if (txlist->mumimo.users)
00977 {
00978 txl_agg_mumimo_close(access_category);
00979 }
00980 else
00981 #endif
00982 if (txlist->agg[0].desc != NULL && !macif_tx_q_has_data(access_category))
00983 {
00984
00985 txl_agg_finish(access_category);
00986 }
00987 }
00988 #endif
00989 }
00990 }
00991
00992
00993 PROF_AGG_SMPDU_DONETX_CLR();
00994 }
00995
00996 else
00997 {
00998 #if !NX_FULLY_HOSTED && NX_AMSDU_TX
00999
01000 txl_check_done_amsdu_subframe(txdesc, access_category, 0);
01001 #endif
01002
01003 #if NX_AMPDU_TX
01004
01005 if (is_mpdu_first(txdesc))
01006 {
01007
01008 txl_agg_check_rtscts_retry_limit(&txdesc->lmac.agg_desc->a_thd,
01009 access_category);
01010 }
01011 #endif
01012 break;
01013 }
01014
01015
01016
01017
01018 #if NX_AMPDU_TX
01019
01020 if (is_mpdu_last(txdesc))
01021 {
01022 txlist->agg_desc = txdesc->lmac.agg_desc;
01023 txlist->chk_state = BAR_THD_CHK_STATE;
01024 }
01025 #endif
01026
01027
01028 co_list_pop_front(&(txlist->transmitting[0]));
01029
01030 #if NX_AMPDU_TX
01031 if (!is_mpdu_agg(txdesc))
01032 {
01033
01034 txlist->ppdu_cnt--;
01035 }
01036 #endif
01037
01038 #if NX_TX_FRAME
01039
01040 if (is_int_frame(txdesc))
01041 {
01042
01043 txl_frame_cfm(txdesc);
01044 }
01045 else
01046 #endif
01047 {
01048
01049 txl_cfm_push(txdesc, txstatus, access_category);
01050 }
01051
01052
01053 txl_timer_move(access_category);
01054
01055 }
01056 else
01057 {
01058
01059 txlist->last_frame_exch = NULL;
01060
01061 txl_timer_clear(access_category);
01062
01063 break;
01064 }
01065
01066 #if NX_AMPDU_TX
01067
01068
01069
01070 }
01071 else
01072 {
01073
01074 struct tx_agg_desc *agg_desc = txlist->agg_desc;
01075 struct tx_hd *bar_thd = &agg_desc->bar_thd;
01076 uint32_t bar_thd_status = bar_thd->statinfo;
01077
01078 #if RW_MUMIMO_TX_EN
01079 if (!(agg_desc->status & AGG_INT))
01080 #endif
01081 {
01082
01083 struct tx_hd *nextdesc = HW2CPU(bar_thd->nextfrmexseq_ptr);
01084
01085
01086 if ((nextdesc == NULL) && (txlist->ppdu_cnt == 1))
01087 {
01088 #if RW_MUMIMO_TX_EN
01089
01090 if (txlist->mumimo.users)
01091 {
01092 txl_agg_mumimo_close(access_category);
01093 }
01094 else
01095 #endif
01096 if (txlist->agg[0].desc != NULL && !macif_tx_q_has_data(access_category))
01097 {
01098
01099 txl_agg_finish(access_category);
01100 }
01101 }
01102 }
01103
01104
01105 if (bar_thd_status & DESC_DONE_TX_BIT)
01106 {
01107
01108 PROF_AGG_BAR_DONETX_SET();
01109
01110
01111 txl_timer_move(access_category);
01112
01113 if (bar_thd_status & FRAME_SUCCESSFUL_TX_BIT)
01114 {
01115
01116 if (!(agg_desc->status & AGG_BA_RECEIVED))
01117 {
01118 int i = 0;
01119
01120
01121 do
01122 {
01123 rxl_immediate_frame_get();
01124 i++;
01125 } while ((i < 5) && (!(agg_desc->status & AGG_BA_RECEIVED)));
01126 }
01127
01128
01129 ASSERT_REC(agg_desc->status & AGG_BA_RECEIVED);
01130 }
01131 else
01132 {
01133
01134 co_list_pop_front(&txlist->aggregates);
01135 }
01136
01137
01138 agg_desc->status |= AGG_DONE;
01139
01140
01141 ke_evt_set(txl_cfm_evt_bit[access_category]);
01142
01143 #if RW_MUMIMO_TX_EN
01144
01145 if (agg_desc->prim_agg_desc && (agg_desc->prim_agg_desc != agg_desc))
01146 {
01147 ASSERT_ERR(!co_list_is_empty(&agg_desc->cfm));
01148
01149
01150 co_list_concat(&txl_cfm_env.cfmlist[access_category], &agg_desc->cfm);
01151 }
01152
01153
01154 if (agg_desc->status & AGG_INT)
01155 {
01156
01157
01158
01159
01160 txlist->agg_desc = (struct tx_agg_desc *)co_list_next(&agg_desc->list_hdr);
01161
01162
01163 ASSERT_ERR(txlist->agg_desc);
01164 }
01165 else
01166 #endif
01167 {
01168 #if RW_MUMIMO_TX_EN
01169
01170 if (agg_desc->prim_agg_desc)
01171 {
01172
01173 ASSERT_ERR(txl_cntrl_env.mumimo_ppdu_cnt);
01174 txl_cntrl_env.mumimo_ppdu_cnt--;
01175
01176
01177 if (!txl_cntrl_env.mumimo_ppdu_cnt)
01178 {
01179
01180 nxmac_drop_to_lower_bw_setf(1);
01181 }
01182 }
01183 #endif
01184
01185
01186 txlist->ppdu_cnt--;
01187
01188
01189
01190 txlist->chk_state = THD_CHK_STATE;
01191 txlist->agg_desc = NULL;
01192
01193
01194 if (bar_thd->nextfrmexseq_ptr != 0)
01195 {
01196
01197
01198 txlist->agg_desc_prev = agg_desc;
01199
01200
01201 agg_desc->user_cnt++;
01202 }
01203 else
01204 {
01205
01206 txlist->last_frame_exch = NULL;
01207 }
01208 }
01209
01210
01211 PROF_AGG_BAR_DONETX_CLR();
01212 }
01213
01214 else
01215 {
01216 break;
01217 }
01218 }
01219 #endif
01220 }
01221 }
01222
01223
01234 static void txl_frame_exchange_manage(struct txdesc *txdesc,
01235 struct txl_buffer_tag *buffer,
01236 uint8_t access_category)
01237 {
01238 struct tx_hd *new_hd = NULL;
01239 struct tx_hd *next_prev_hd = NULL;
01240
01241 #if NX_AMPDU_TX
01242 #if RW_MUMIMO_TX_EN
01243 if (is_in_mumimo_ppdu(txdesc))
01244 {
01245 struct tx_agg_desc *agg_desc = txdesc->lmac.agg_desc;
01246 struct tx_agg_desc *prim_agg_desc = agg_desc->prim_agg_desc;
01247
01248
01249 if (!(buffer->flags & BUF_ALLOC_OK))
01250 return;
01251
01252
01253 prim_agg_desc->download &= ~CO_BIT(buffer->user_idx);
01254
01255
01256 if (prim_agg_desc->download)
01257 return;
01258
01259
01260 new_hd = &prim_agg_desc->a_thd;
01261
01262 next_prev_hd = prim_agg_desc->last_bar_thd;
01263
01264
01265 if (!txl_cntrl_env.mumimo_ppdu_cnt)
01266 {
01267
01268 nxmac_drop_to_lower_bw_setf(0);
01269 }
01270
01271
01272 txl_cntrl_env.mumimo_ppdu_cnt++;
01273 }
01274 else
01275 #endif
01276
01277 if (is_mpdu_agg(txdesc))
01278 {
01279 struct tx_agg_desc *agg_desc = txdesc->lmac.agg_desc;
01280
01281
01282 if (!(buffer->flags & BUF_ALLOC_OK))
01283 return;
01284
01285
01286 agg_desc->status |= AGG_DOWNLOADED;
01287
01288
01289 if (!(agg_desc->status & AGG_FORMATTED))
01290
01291
01292 return;
01293
01294
01295 new_hd = &agg_desc->a_thd;
01296
01297 next_prev_hd = &agg_desc->bar_thd;
01298 }
01299 else
01300 #endif
01301 {
01302
01303 new_hd = &txdesc->lmac.hw_desc->thd;
01304 next_prev_hd = new_hd;
01305 }
01306
01307 #if TRACE_COMPO(LMAC)
01308 if (txdesc->lmac.hw_desc->thd.frmlen)
01309 {
01310 struct mac_hdr *mac_hdr = (struct mac_hdr *)HW2CPU(txl_buffer_machdr_get(txdesc));
01311 if (access_category < AC_MAX)
01312 {
01313 TRACE_LMAC(TX, "[AC %d]{VIF-%d} to STA-%d %fc SN=%d",
01314 access_category, txdesc->host.vif_idx, txdesc->host.staid,
01315 mac_hdr->fctl, (mac_hdr->seq >> MAC_SEQCTRL_NUM_OFT));
01316 }
01317 else
01318 {
01319 TRACE_AP(BCN, "{VIF-%d} SN=%d (tbtt in %dus)", txdesc->host.vif_idx,
01320 (mac_hdr->seq >> MAC_SEQCTRL_NUM_OFT), nxmac_next_tbtt_get() << 5);
01321 }
01322 }
01323 else
01324 {
01325 TRACE_LMAC(TX, "[AC %d]{VIF-%d} to STA-%d Null Data Packet",
01326 access_category, txdesc->host.vif_idx, txdesc->host.staid);
01327 }
01328 #endif // TRACE_COMPO(LMAC)
01329
01330
01331 txl_frame_exchange_chain(new_hd, next_prev_hd, access_category);
01332 }
01333
01334 #if (NX_TX_FRAME)
01335 #if (!NX_UMAC_PRESENT) && (NX_CHNL_CTXT || NX_P2P)
01336
01345 static void txl_cntrl_discard(struct txdesc *txdesc, uint8_t access_category)
01346 {
01347 PROF_CHAN_CTXT_TX_DISCARD_SET();
01348
01349 #if NX_POWERSAVE
01350
01351 txl_cntrl_env.pck_cnt++;
01352 #endif
01353
01354 txdesc->lmac.hw_desc->cfm.status = DESC_DONE_SW_TX_BIT;
01355
01356
01357 GLOBAL_INT_DISABLE();
01358 co_list_push_back(&txl_cfm_env.cfmlist[access_category], (struct co_list_hdr *)txdesc);
01359 GLOBAL_INT_RESTORE();
01360
01361
01362 ke_evt_set(txl_cfm_evt_bit[access_category]);
01363
01364 PROF_CHAN_CTXT_TX_DISCARD_CLR();
01365 }
01366 #endif //(!NX_UMAC_PRESENT)
01367
01377 static void txl_cntrl_postpone(struct txdesc *txdesc, uint8_t access_category)
01378 {
01379
01380 struct sta_info_tag *sta = &sta_info_tab[txdesc->host.staid];
01381
01382 struct txl_frame_desc_tag *frame_desc = (struct txl_frame_desc_tag *)txdesc;
01383
01384
01385 frame_desc->postponed = true;
01386
01387 txdesc->host.tid = access_category;
01388
01389
01390 co_list_push_back(&sta->tx_desc_post, &txdesc->list_hdr);
01391
01392 #if (NX_UMAC_PRESENT && NX_BEACONING)
01393 apm_tx_int_ps_postpone(txdesc, sta);
01394 #endif // (NX_UMAC_PRESENT && NX_BEACONING)
01395
01396 }
01397 #endif //(NX_TX_FRAME)
01398
01399 #if (NX_UMAC_PRESENT)
01400
01409 static bool txl_cntrl_start_pm_mon(struct mac_hdr *mac_hdr)
01410 {
01411 do
01412 {
01413 uint16_t type_subtype = mac_hdr->fctl & MAC_FCTRL_TYPESUBTYPE_MASK;
01414
01415
01416 if ((type_subtype != MAC_FCTRL_ASSOCRSP) && (type_subtype != MAC_FCTRL_REASSOCRSP))
01417 break;
01418
01419
01420 if (co_read16p(CPU2HW(mac_hdr) + MAC_SHORT_MAC_HDR_LEN + MAC_ASSO_RSP_STATUS_OFT) != 0)
01421 break;
01422
01423 return (true);
01424
01425 } while(0);
01426
01427 return (false);
01428 }
01429 #endif //(NX_UMAC_PRESENT)
01430
01431 #if (NX_BCN_AUTONOMOUS_TX)
01432
01448 static void txl_check_bcmc_status(struct txdesc *txdesc, uint8_t access_category,
01449 uint32_t machdrptr)
01450 {
01451 struct vif_info_tag *vif;
01452 uint16_t frame_ctrl;
01453
01454 if (access_category != AC_BCN)
01455 return;
01456
01457 vif = &vif_info_tab[txdesc->host.vif_idx];
01458
01459
01460 frame_ctrl = co_read16p(machdrptr + MAC_HEAD_FCTRL_OFT);
01461
01462 #if (!NX_UMAC_PRESENT)
01463 if (vif->u.ap.bc_mc_nb > 1) {
01464 frame_ctrl |= MAC_FCTRL_MOREDATA;
01465 vif->u.ap.bc_mc_nb--;
01466
01467
01468 co_write16p(machdrptr + MAC_HEAD_FCTRL_OFT, frame_ctrl);
01469 }
01470 #endif
01471
01472 if (frame_ctrl & MAC_FCTRL_MOREDATA)
01473 vif->u.ap.bc_mc_status |= VIF_AP_BCMC_MOREDATA;
01474 else
01475 vif->u.ap.bc_mc_status &= ~(VIF_AP_BCMC_MOREDATA);
01476 }
01477 #endif // NX_BCN_AUTONOMOUS_TX
01478
01492 static void txl_payload_handle(struct txdesc *txdesc,
01493 struct txl_buffer_tag *buffer,
01494 uint8_t access_category)
01495 {
01496 #if NX_TX_FRAME
01497 if (!is_int_frame(txdesc))
01498 #endif
01499 {
01500 #if NX_AMSDU_TX
01501 if (!(buffer->flags & BUF_INT_MSDU))
01502 #endif
01503 {
01504 uint32_t mac_hdr_addr = txl_buffer_machdr_get(txdesc);
01505
01506 #if (NX_BCN_AUTONOMOUS_TX)
01507 txl_check_bcmc_status(txdesc, access_category, mac_hdr_addr);
01508 #endif
01509
01510
01511 if (!is_qos_data(txdesc))
01512 {
01513 #if (NX_UMAC_PRESENT)
01514 if (txdesc->host.flags & TXU_CNTRL_MGMT)
01515 {
01516 struct mac_hdr *mac_hdr = (struct mac_hdr *)(HW2CPU(mac_hdr_addr));
01517
01518
01519 if (txl_cntrl_start_pm_mon(mac_hdr))
01520 {
01521
01522 txdesc->host.flags |= TXU_CNTRL_MGMT_PM_MON;
01523
01524 rxu_cntrl_monitor_pm(&mac_hdr->addr1);
01525 }
01526 }
01527 #endif //(NX_UMAC_PRESENT)
01528 PROF_TX_PAYL_HDL_SET();
01529 txl_machdr_format(mac_hdr_addr);
01530 PROF_TX_PAYL_HDL_CLR();
01531 }
01532
01533
01534 PROF_TX_PAYL_HDL_SET();
01535 txl_hwdesc_config_post(txdesc, access_category);
01536 PROF_TX_PAYL_HDL_CLR();
01537 }
01538 }
01539 #if NX_MAC_HE
01540 else
01541 {
01542
01543 txl_buffer_get(txdesc)->flags |= BUF_SINGLETON_READY;
01544 }
01545 #endif
01546
01547
01548 PROF_TX_PAYL_HDL_SET();
01549 txl_frame_exchange_manage(txdesc, buffer, access_category);
01550 PROF_TX_PAYL_HDL_CLR();
01551 }
01552
01553 #if !NX_FULLY_HOSTED
01554 bool txl_payload_alloc(struct txdesc *txdesc, uint8_t access_category,
01555 uint8_t user_idx)
01556 {
01557 struct txl_list *txlist = &txl_cntrl_env.txlist[access_category];
01558 bool success = false;
01559
01560 #if NX_AMSDU_TX
01561 while (1)
01562 {
01563 if ((txl_buffer_count(access_category, user_idx) >= TX_MIN_DOWNLOAD_CNT) &&
01564 (txlist->first_to_download[user_idx] == NULL))
01565 {
01566 txlist->first_to_download[user_idx] = txdesc;
01567 break;
01568 }
01569
01570 success = !txl_payload_transfer(txdesc, access_category, user_idx);
01571 if (!success)
01572 break;
01573
01574 if (txlist->dwnld_index[user_idx] == txdesc->host.packet_cnt)
01575 {
01576 txlist->dwnld_index[user_idx] = 0;
01577 break;
01578 }
01579 }
01580 #else
01581 if (txl_buffer_count(access_category, user_idx) < TX_MIN_DOWNLOAD_CNT)
01582
01583 success = !txl_payload_transfer(txdesc, access_category, user_idx);
01584 else
01585 txlist->first_to_download[user_idx] = txdesc;
01586 #endif
01587
01588 return (success);
01589 }
01590 #endif
01591
01592 void txl_cntrl_newhead(uint32_t desc,
01593 uint8_t access_category)
01594 {
01595
01596 switch (access_category)
01597 {
01598 #if NX_BEACONING
01599 case AC_BCN:
01600 ASSERT_REC(nxmac_tx_bcn_state_getf() != 2);
01601 nxmac_tx_bcn_head_ptr_set(desc);
01602 nxmac_dma_cntrl_set(NXMAC_TX_BCN_NEW_HEAD_BIT);
01603 break;
01604 #endif
01605 case AC_VO:
01606 ASSERT_REC(nxmac_tx_ac_3_state_getf() != 2);
01607 nxmac_tx_ac_3_head_ptr_set(desc);
01608 nxmac_dma_cntrl_set(NXMAC_TX_AC_3_NEW_HEAD_BIT);
01609 break;
01610 case AC_VI:
01611 ASSERT_REC(nxmac_tx_ac_2_state_getf() != 2);
01612 nxmac_tx_ac_2_head_ptr_set(desc);
01613 nxmac_dma_cntrl_set(NXMAC_TX_AC_2_NEW_HEAD_BIT);
01614 break;
01615 case AC_BE:
01616 ASSERT_REC(nxmac_tx_ac_1_state_getf() != 2);
01617 nxmac_tx_ac_1_head_ptr_set(desc);
01618 nxmac_dma_cntrl_set(NXMAC_TX_AC_1_NEW_HEAD_BIT);
01619 break;
01620 case AC_BK:
01621 ASSERT_REC(nxmac_tx_ac_0_state_getf() != 2);
01622 nxmac_tx_ac_0_head_ptr_set(desc);
01623 nxmac_dma_cntrl_set(NXMAC_TX_AC_0_NEW_HEAD_BIT);
01624 break;
01625 default:
01626 ASSERT_ERR(0);
01627 break;
01628 }
01629
01630
01631 txl_timer_start(access_category);
01632 }
01633
01634 void txl_frame_exchange_chain(struct tx_hd *first_thd,
01635 struct tx_hd *last_thd,
01636 uint8_t access_category)
01637 {
01638 struct tx_hd *prev_hd = txl_cntrl_env.txlist[access_category].last_frame_exch;
01639
01640
01641 if (prev_hd != NULL)
01642 {
01643
01644 PROF_TX_NEW_TAIL_SET();
01645
01646
01647 prev_hd->nextfrmexseq_ptr = CPU2HW(first_thd);
01648
01649
01650 #if NX_MAC_HE
01651 if (txl_he_tb_can_chain_edca(access_category))
01652 #endif
01653 txl_cntrl_newtail(access_category);
01654
01655
01656 PROF_TX_NEW_TAIL_CLR();
01657 }
01658 else
01659 {
01660
01661 #if NX_MAC_HE
01662 if (txl_he_tb_can_chain_edca(access_category))
01663 #endif
01664 txl_cntrl_newhead(CPU2HW(first_thd), access_category);
01665 }
01666
01667
01668 txl_cntrl_env.txlist[access_category].last_frame_exch = last_thd;
01669 }
01670
01671 #if !NX_FULLY_HOSTED
01672 void txl_free_done_mpdu(struct txdesc *txdesc, uint8_t access_category, uint8_t user_idx)
01673 {
01674 #if NX_AMSDU_TX
01675 struct txl_list *txlist = &txl_cntrl_env.txlist[access_category];
01676 #endif
01677
01678 #if NX_TX_FRAME
01679
01680 if (!is_int_frame(txdesc))
01681 #endif
01682 {
01683 #if NX_AMSDU_TX
01684
01685 while (txlist->tx_index[user_idx] < txdesc->host.packet_cnt)
01686 {
01687 struct txl_buffer_tag *buf = txdesc->lmac.buffer[txlist->tx_index[user_idx]];
01688
01689
01690 if (buf != NULL)
01691 {
01692 if (txl_buffer_free(buf, access_category))
01693 txl_transmit_prep(access_category, user_idx);
01694
01695
01696 txdesc->lmac.buffer[txlist->tx_index[user_idx]] = NULL;
01697 }
01698 txlist->tx_index[user_idx]++;
01699 };
01700 #else
01701 if (txdesc->lmac.buffer != NULL)
01702 {
01703
01704 if (txl_buffer_free(txdesc->lmac.buffer, access_category))
01705 txl_transmit_prep(access_category, user_idx);
01706 txdesc->lmac.buffer = NULL;
01707 }
01708 #endif
01709 }
01710
01711 #if NX_AMSDU_TX
01712
01713 txlist->tx_index[0] = 0;
01714 #endif // NX_AMSDU_TX
01715 }
01716
01717 #if NX_AMSDU_TX
01718 void txl_check_done_amsdu_subframe(struct txdesc *txdesc, uint8_t access_category,
01719 uint8_t user_idx)
01720 {
01721 struct txl_list *txlist = &txl_cntrl_env.txlist[access_category];
01722
01723 #if NX_UMAC_PRESENT
01724 if (txl_buffer_is_amsdu_multi_buf(txdesc))
01725 #else
01726 if (is_mpdu_split(txdesc))
01727 #endif
01728 {
01729
01730 while (txlist->tx_index[user_idx] < txdesc->host.packet_cnt)
01731 {
01732 struct txl_buffer_tag *buf = txdesc->lmac.buffer[txlist->tx_index[user_idx]];
01733 struct tx_pbd *tbd;
01734
01735
01736 if (buf == NULL)
01737 break;
01738
01739
01740 tbd = &buf->tbd;
01741 if (buf->flags & BUF_SPLIT)
01742 tbd = HW2CPU(tbd->next);
01743
01744
01745 if (!(tbd->bufctrlinfo & TBD_DONE_HW))
01746 break;
01747
01748
01749 if (txl_buffer_free(buf, access_category))
01750 txl_transmit_prep(access_category, user_idx);
01751
01752
01753 txdesc->lmac.buffer[txlist->tx_index[user_idx]] = NULL;
01754
01755 txlist->tx_index[user_idx]++;
01756 }
01757 }
01758 }
01759 #endif // NX_AMSDU_TX
01760 #endif // !NX_FULLY_HOSTED
01761
01762 void txl_cntrl_init(void)
01763 {
01764 int i, j;
01765
01766 #if NX_AMPDU_TX
01767 txl_agg_init();
01768 #endif
01769 tx_txdesc_init();
01770 txl_buffer_init();
01771 txl_cfm_init();
01772 #if NX_TX_FRAME
01773 txl_frame_init(false);
01774 #endif
01775 #if NX_MAC_HE
01776 txl_he_init();
01777 #endif
01778
01779 memset(&txl_cntrl_env, 0, sizeof(txl_cntrl_env));
01780
01781
01782 for (i=0; i<NX_TXQ_CNT; i++)
01783 {
01784 for (j=0; j<RW_USER_MAX; j++)
01785 {
01786 co_list_init(&(txl_cntrl_env.txlist[i].transmitting[j]));
01787 }
01788
01789 #if RW_MUMIMO_TX_EN
01790 txl_cntrl_env.txlist[i].mumimo.open = MU_USER_MASK;
01791 #endif
01792 txl_cntrl_env.txlist[i].last_frame_exch = NULL;
01793 txl_cntrl_env.txlist[i].bridgedmacnt = dma_lli_counter_get(TX_AC2LLI(i));
01794 txl_cntrl_env.txlist[i].chk_state = THD_CHK_STATE;
01795
01796 #if NX_AMPDU_TX
01797 txl_cntrl_env.txlist[i].ppdu_cnt = 0;
01798 txl_cntrl_env.txlist[i].agg_desc = NULL;
01799 txl_cntrl_env.txlist[i].agg_desc_prev = NULL;
01800 co_list_init(&(txl_cntrl_env.txlist[i].aggregates));
01801 #endif
01802 }
01803
01804
01805 txl_cntrl_env.seqnbr = 0;
01806 }
01807
01808 #if (NX_TX_FRAME)
01809 bool txl_cntrl_tx_check(struct vif_info_tag *vif)
01810 {
01811
01812 if (txl_cntrl_env.reset)
01813 {
01814 return (false);
01815 }
01816
01817 #if (NX_CHNL_CTXT)
01818 if (!chan_is_tx_allowed(vif))
01819 {
01820 return (false);
01821 }
01822 #endif //(NX_CHNL_CTXT)
01823
01824 #if (NX_P2P)
01825
01826 if (vif->p2p && !p2p_is_present(vif->p2p_index))
01827 {
01828 return (false);
01829 }
01830 #endif //(NX_P2P)
01831
01832 return (true);
01833 }
01834 #endif //(NX_TX_FRAME)
01835
01836 void txl_cntrl_halt_ac(uint8_t access_category)
01837 {
01838
01839 txl_timer_clear(access_category);
01840
01841
01842 switch (access_category)
01843 {
01844 #if NX_BEACONING
01845 case AC_BCN:
01846 nxmac_dma_cntrl_set(NXMAC_HALT_BCN_AFTER_TXOP_BIT);
01847 while(nxmac_tx_bcn_state_getf() != 0);
01848 nxmac_dma_cntrl_clear(NXMAC_HALT_BCN_AFTER_TXOP_BIT);
01849 break;
01850 #endif
01851 case AC_VO:
01852 nxmac_dma_cntrl_set(NXMAC_HALT_AC_3_AFTER_TXOP_BIT);
01853 while(nxmac_tx_ac_3_state_getf() != 0);
01854 nxmac_dma_cntrl_clear(NXMAC_HALT_AC_3_AFTER_TXOP_BIT);
01855 break;
01856 case AC_VI:
01857 nxmac_dma_cntrl_set(NXMAC_HALT_AC_2_AFTER_TXOP_BIT);
01858 while(nxmac_tx_ac_2_state_getf() != 0);
01859 nxmac_dma_cntrl_clear(NXMAC_HALT_AC_2_AFTER_TXOP_BIT);
01860 break;
01861 case AC_BE:
01862 nxmac_dma_cntrl_set(NXMAC_HALT_AC_1_AFTER_TXOP_BIT);
01863 while(nxmac_tx_ac_1_state_getf() != 0);
01864 nxmac_dma_cntrl_clear(NXMAC_HALT_AC_1_AFTER_TXOP_BIT);
01865 break;
01866 case AC_BK:
01867 nxmac_dma_cntrl_set(NXMAC_HALT_AC_0_AFTER_TXOP_BIT);
01868 while(nxmac_tx_ac_0_state_getf() != 0);
01869 nxmac_dma_cntrl_clear(NXMAC_HALT_AC_0_AFTER_TXOP_BIT);
01870 break;
01871 default:
01872 ASSERT_ERR(0);
01873 break;
01874 }
01875 }
01876
01877 void txl_cntrl_flush_ac(uint8_t access_category, uint32_t status)
01878 {
01879 struct txl_list *txlist = &txl_cntrl_env.txlist[access_category];
01880 int i;
01881
01882
01883 txl_cfm_flush(access_category, &txl_cfm_env.cfmlist[access_category], status);
01884 for (i=0; i < RW_USER_MAX; i++)
01885 {
01886 txl_cfm_flush(access_category, &txlist->transmitting[i], status);
01887 }
01888
01889
01890 txlist->last_frame_exch = NULL;
01891 for (i=0; i < RW_USER_MAX; i++)
01892 {
01893 txlist->first_to_download[i] = NULL;
01894 }
01895
01896 #if !NX_FULLY_HOSTED
01897
01898 txl_buffer_reset(access_category);
01899 #endif
01900
01901
01902 txl_timer_clear(access_category);
01903 }
01904
01905 bool txl_cntrl_push(struct txdesc *txdesc, uint8_t access_category)
01906 {
01907 struct txl_list *txlist = &txl_cntrl_env.txlist[access_category];
01908 #if RW_MUMIMO_TX_EN
01909 int status;
01910 #endif
01911
01912
01913 PROF_TX_AC_BG_SET(access_category);
01914
01915 #if (!NX_UMAC_PRESENT)
01916 #if (NX_CHNL_CTXT || NX_P2P)
01917 struct vif_info_tag *vif = &vif_info_tab[txdesc->host.vif_idx];
01918
01919
01920 if (!txl_cntrl_tx_check(vif))
01921 {
01922
01923 txl_cntrl_discard(txdesc, access_category);
01924
01925 return (false);
01926 }
01927 #endif //(NX_CHNL_CTXT || NX_P2P)
01928 #endif //(!NX_UMAC_PRESENT)
01929
01930
01931 txl_hwdesc_config_pre(txdesc, access_category);
01932
01933 GLOBAL_INT_DISABLE();
01934 #if NX_AMPDU_TX
01935 #if RW_MUMIMO_TX_EN
01936 status =
01937 #endif
01938 txl_agg_push_mpdu(txdesc, access_category);
01939 #endif
01940
01941 #if RW_MUMIMO_TX_EN
01942 if (status == SU_PACKET)
01943 #endif
01944 {
01945 #if !NX_FULLY_HOSTED
01946
01947 if (txlist->first_to_download[0] == NULL)
01948 {
01949 txl_payload_alloc(txdesc, access_category, 0);
01950 }
01951 #endif
01952 co_list_push_back(&txlist->transmitting[0], &txdesc->list_hdr);
01953 }
01954 GLOBAL_INT_RESTORE();
01955
01956 #if NX_POWERSAVE
01957
01958 txl_cntrl_env.pck_cnt++;
01959 #endif
01960
01961 #if (NX_TD)
01962 td_pck_ind(txdesc->host.vif_idx, txdesc->host.staid, false);
01963 #endif //(NX_TD)
01964
01965 #if (NX_UAPSD || NX_DPSM)
01966
01967 ps_check_tx_frame(txdesc->host.staid, txdesc->host.tid);
01968 #endif //(NX_UAPSD || NX_DPSM)
01969
01970 #if NX_FULLY_HOSTED
01971 #if NX_AMPDU_TX
01972 if (!is_mpdu_agg(txdesc) || is_mpdu_first(txdesc))
01973 #endif
01974 {
01975 txl_buffer_control_copy(txdesc, txdesc->lmac.buffer);
01976 GLOBAL_INT_DISABLE();
01977 txl_payload_handle(txdesc, txdesc->lmac.buffer, access_category);
01978 GLOBAL_INT_RESTORE();
01979 }
01980 #endif
01981
01982 #if RW_MUMIMO_TX_EN
01983 return (status == MU_PAUSED);
01984 #else
01985 return (false);
01986 #endif
01987 }
01988
01989 #if (NX_UMAC_PRESENT)
01990 void txl_cntrl_inc_pck_cnt(void)
01991 {
01992 #if NX_POWERSAVE
01993
01994 txl_cntrl_env.pck_cnt++;
01995 #endif
01996 }
01997 #endif //(NX_UMAC_PRESENT)
01998
01999 #if NX_TX_FRAME
02000 bool txl_cntrl_push_int(struct txdesc *txdesc, uint8_t access_category)
02001 {
02002 struct txl_list *txlist = &txl_cntrl_env.txlist[access_category];
02003 struct tx_hd *thd = &txdesc->lmac.hw_desc->thd;
02004
02005 struct vif_info_tag *vif = &vif_info_tab[txdesc->host.vif_idx];
02006
02007
02008 PROF_TX_FRAME_PUSH_SET();
02009
02010
02011 if (!txl_cntrl_tx_check(vif)
02012 #if (NX_UMAC_PRESENT && NX_BEACONING)
02013 || !apm_tx_int_ps_check(txdesc)
02014 #endif
02015 #if (RW_UMESH_EN)
02016 || !mesh_ps_check_peer_presence(vif, txdesc->host.staid)
02017 #endif
02018 )
02019 {
02020 if (txdesc->host.staid != INVALID_STA_IDX)
02021 {
02022 txl_cntrl_postpone(txdesc, access_category);
02023
02024 return (true);
02025 }
02026 else
02027 {
02028
02029 txl_frame_release(txdesc, false);
02030
02031 return (false);
02032 }
02033 }
02034
02035
02036 thd->macctrlinfo2 |= INTERRUPT_EN_TX;
02037
02038
02039
02040 GLOBAL_INT_DISABLE();
02041 #if NX_AMPDU_TX
02042
02043
02044 txl_cntrl_env.txlist[access_category].ppdu_cnt++;
02045
02046 #if RW_MUMIMO_TX_EN
02047 if (txlist->mumimo.users)
02048 {
02049 txl_agg_mumimo_close(access_category);
02050 }
02051 else
02052 #endif
02053 if (txlist->agg[0].desc != NULL)
02054 {
02055 txl_agg_finish(access_category);
02056 }
02057 #endif
02058
02059 #if !NX_FULLY_HOSTED
02060
02061 if (txlist->first_to_download[0] == NULL)
02062 txl_int_fake_transfer(txdesc, access_category);
02063 #endif
02064
02065
02066 co_list_push_back(&txlist->transmitting[0], &txdesc->list_hdr);
02067 GLOBAL_INT_RESTORE();
02068
02069 #if (NX_POWERSAVE)
02070
02071 txl_cntrl_env.pck_cnt++;
02072 #endif
02073
02074 #if NX_FULLY_HOSTED
02075 GLOBAL_INT_DISABLE();
02076 txl_payload_handle(txdesc, txl_buffer_get(txdesc), access_category);
02077 GLOBAL_INT_RESTORE();
02078 #endif
02079
02080
02081 PROF_TX_FRAME_PUSH_CLR();
02082
02083 return (true);
02084 }
02085 #endif
02086
02087 #if !NX_FULLY_HOSTED
02088 void txl_cntrl_dma_isr(void)
02089 {
02090 uint32_t irqstatus;
02091
02092
02093 PROF_TX_DMA_IRQ_SET();
02094
02095
02096 irqstatus = dma_int_status_get() & IPC_DMA_LLI_TX_MASK;
02097
02098
02099 while (irqstatus)
02100 {
02101 uint8_t access_category;
02102 struct txdesc *txdesc;
02103 struct txl_list *txlist;
02104 uint8_t lliidx = (31 - DMA_LLI_IRQ_LSB) - co_clz(irqstatus);
02105
02106
02107 access_category = TX_LLI2AC(lliidx);
02108 txlist = &txl_cntrl_env.txlist[access_category];
02109
02110
02111 dma_int_ack_clear(CO_BIT(lliidx + DMA_LLI_IRQ_LSB));
02112
02113
02114 PROF_TX_AC_IRQ_SET(access_category);
02115
02116
02117 while (txlist->bridgedmacnt != dma_lli_counter_get(lliidx))
02118 {
02119 struct txl_buffer_tag *buffer = txl_buffer_pop(access_category);
02120
02121
02122 ASSERT_ERR(buffer != NULL);
02123
02124
02125 txdesc = buffer->txdesc;
02126
02127
02128 dma_int_ack_clear(CO_BIT(lliidx + DMA_LLI_IRQ_LSB));
02129
02130
02131 txl_payload_handle(txdesc, buffer, access_category);
02132
02133
02134 txlist->bridgedmacnt++;
02135 }
02136
02137
02138 irqstatus = dma_int_status_get() & IPC_DMA_LLI_TX_MASK;
02139 }
02140
02141
02142 PROF_TX_DMA_IRQ_CLR();
02143 }
02144 #endif // !NX_FULLY_HOSTED
02145
02146 void txl_prot_trigger(void)
02147 {
02148 uint32_t status;
02149
02150
02151 status = nxmac_tx_rx_int_status_get() & TX_PROT_IRQ;
02152
02153 #if NX_MAC_HE
02154 if (status & NXMAC_TB_PROT_TRIGGER_BIT)
02155 {
02156 txl_he_tb_prot_trigger();
02157 }
02158 #endif
02159
02160 #if NX_BW_LEN_ADAPT
02161 if (status & TX_BW_DROP_IRQ)
02162 {
02163 uint8_t access_category;
02164 uint32_t bw_drop_status = status & TX_BW_DROP_IRQ;
02165
02166
02167 PROF_BW_DROP_IRQ_SET();
02168
02169
02170 access_category = 31 - co_clz(bw_drop_status) - NXMAC_AC_0BW_DROP_TRIGGER_POS;
02171
02172
02173 PROF_TX_AC_IRQ_SET(access_category);
02174
02175
02176 txl_agg_bw_drop_handle(access_category);
02177
02178
02179 PROF_BW_DROP_IRQ_CLR();
02180 }
02181 #endif
02182
02183
02184 nxmac_tx_rx_int_ack_clear(status);
02185 }
02186
02187 void txl_transmit_trigger(void)
02188 {
02189 uint8_t access_category;
02190 uint32_t status;
02191
02192
02193 status = nxmac_tx_rx_int_status_get() & TX_TRANSMIT_IRQ;
02194
02195 #if NX_MAC_HE
02196
02197 if (status & NXMAC_TB_TX_CANCELLED_BIT)
02198 {
02199
02200 txl_he_tb_transmit_cancelled();
02201 nxmac_tx_rx_int_ack_clear(NXMAC_TB_TX_CANCELLED_BIT);
02202 }
02203
02204 if (status & (NXMAC_TB_TX_TRIGGER_BIT | NXMAC_TB_TX_BUF_TRIGGER_BIT))
02205 {
02206
02207 txl_he_tb_transmit_trigger();
02208 nxmac_tx_rx_int_ack_clear(NXMAC_TB_TX_TRIGGER_BIT | NXMAC_TB_TX_BUF_TRIGGER_BIT);
02209 }
02210 #endif
02211
02212 #if RW_MUMIMO_TX_EN
02213
02214 if (status & NXMAC_SEC_USER_TX_TRIGGER_BIT)
02215 txl_agg_sec_transmit_trigger();
02216 #endif
02217
02218 #if NX_AMSDU_TX
02219
02220 status |= status >> (NXMAC_AC_0_TX_BUF_TRIGGER_POS - NXMAC_AC_0_TX_TRIGGER_POS);
02221 #endif
02222
02223
02224 status &= TX_IRQ_BITS;
02225
02226
02227 if (status == 0)
02228 return;
02229
02230
02231 PROF_TX_MAC_IRQ_SET();
02232
02233
02234 access_category = 31 - co_clz(status) - NXMAC_AC_0_TX_TRIGGER_POS;
02235
02236
02237 ASSERT_ERR(access_category < NX_TXQ_CNT);
02238
02239
02240 #if NX_AMSDU_TX
02241 nxmac_tx_rx_int_ack_clear(CO_BIT(access_category + NXMAC_AC_0_TX_BUF_TRIGGER_POS) |
02242 CO_BIT(access_category + NXMAC_AC_0_TX_TRIGGER_POS));
02243 #else
02244 nxmac_tx_rx_int_ack_clear(CO_BIT(access_category + NXMAC_AC_0_TX_TRIGGER_POS));
02245 #endif
02246
02247
02248 PROF_TX_AC_IRQ_SET(access_category);
02249
02250
02251 txl_frame_exchange_done(access_category);
02252
02253
02254 PROF_TX_MAC_IRQ_CLR();
02255 }
02256
02257 void txl_current_desc_get(int access_category, struct tx_hd **thd)
02258 {
02259 #if NX_AMPDU_TX
02260 if (txl_cntrl_env.txlist[access_category].chk_state != THD_CHK_STATE)
02261 {
02262
02263 struct tx_agg_desc *agg_desc = txl_cntrl_env.txlist[access_category].agg_desc;
02264 *thd = &agg_desc->a_thd;
02265 }
02266 else
02267 #endif
02268 {
02269
02270 struct txdesc *txdesc = (struct txdesc *)co_list_pick(&(txl_cntrl_env.txlist[access_category].transmitting[0]));
02271
02272
02273 if (txdesc != NULL)
02274 {
02275 #if NX_AMPDU_TX
02276
02277 if (is_mpdu_agg(txdesc))
02278 {
02279
02280 struct tx_agg_desc *agg_desc = txdesc->lmac.agg_desc;
02281 *thd = &agg_desc->a_thd;
02282 }
02283 else
02284 #endif
02285 {
02286 struct tx_hd *txhd = &txdesc->lmac.hw_desc->thd;
02287
02288
02289 *thd = txhd;
02290 }
02291 }
02292 else
02293 {
02294
02295 *thd = NULL;
02296 }
02297 }
02298 }
02299
02300 void txl_reset(void)
02301 {
02302 int i,j;
02303 uint16_t seq_num = txl_cntrl_env.seqnbr;
02304
02305
02306 ke_evt_clear(KE_EVT_TXCFM_MASK);
02307
02308
02309 while (dma_dma_status_oft_free_getf() != DMA_OFT_FREE_MASK);
02310
02311 #if (NX_TX_FRAME)
02312
02313 txl_cntrl_env.reset = true;
02314 #endif //(NX_TX_FRAME)
02315
02316 for (i = 0; i < NX_TXQ_CNT; i++)
02317 {
02318 uint8_t lliidx = TX_AC2LLI(i);
02319 struct txl_list *txlist = &txl_cntrl_env.txlist[i];
02320
02321
02322 dma_int_ack_clear(CO_BIT(lliidx));
02323
02324
02325 txl_cfm_flush(i, &txl_cfm_env.cfmlist[i], DESC_DONE_SW_TX_BIT);
02326 for (j=0; j<RW_USER_MAX; j++)
02327 {
02328 txl_cfm_flush(i, &txlist->transmitting[j], DESC_DONE_SW_TX_BIT);
02329 }
02330 #if RW_MUMIMO_TX_EN
02331 for (j=0; j<RW_USER_MAX; j++)
02332 {
02333 if (txlist->mumimo.users & CO_BIT(j))
02334 txl_cfm_flush(i, &txlist->mumimo.tx[j], DESC_DONE_SW_TX_BIT);
02335
02336 if (txlist->mumimo.txdesc[j] != NULL)
02337 txl_cfm_flush_desc(i, txlist->mumimo.txdesc[j], DESC_DONE_SW_TX_BIT);
02338 }
02339 while (1)
02340 {
02341 struct tx_agg_desc *agg_desc = (struct tx_agg_desc *)co_list_pop_front(&txlist->aggregates);
02342
02343 if (agg_desc == NULL)
02344 break;
02345
02346 txl_cfm_flush(i, &agg_desc->cfm, DESC_DONE_SW_TX_BIT);
02347 }
02348 #endif
02349 }
02350
02351
02352 #if NX_AMPDU_TX
02353 txl_agg_reset();
02354 #endif
02355 #if !NX_FULLY_HOSTED
02356 txl_buffer_reinit();
02357 #endif
02358 txl_cfm_init();
02359 #if NX_MAC_HE
02360 txl_he_reset();
02361 #endif
02362
02363 memset(&txl_cntrl_env, 0, sizeof(txl_cntrl_env));
02364
02365 #if RW_MUMIMO_TX_EN
02366
02367 nxmac_drop_to_lower_bw_setf(1);
02368 #endif
02369
02370
02371 txl_cntrl_env.seqnbr = seq_num;
02372
02373 for (i = 0; i < NX_TXQ_CNT; i++)
02374 {
02375 for (j=0; j<RW_USER_MAX; j++)
02376 {
02377 co_list_init(&(txl_cntrl_env.txlist[i].transmitting[j]));
02378 }
02379
02380 #if RW_MUMIMO_TX_EN
02381 txl_cntrl_env.txlist[i].mumimo.open = MU_USER_MASK;
02382 macif_tx_enable_users(i, MU_USER_MASK);
02383 #endif
02384
02385 txl_cntrl_env.txlist[i].last_frame_exch = NULL;
02386 txl_cntrl_env.txlist[i].bridgedmacnt = dma_lli_counter_get(TX_AC2LLI(i));
02387 txl_cntrl_env.txlist[i].chk_state = THD_CHK_STATE;
02388
02389 #if NX_AMPDU_TX
02390 txl_cntrl_env.txlist[i].ppdu_cnt = 0;
02391 txl_cntrl_env.txlist[i].agg_desc = NULL;
02392 txl_cntrl_env.txlist[i].agg_desc_prev = NULL;
02393 co_list_init(&(txl_cntrl_env.txlist[i].aggregates));
02394 #endif
02395 }
02396 }
02397
02398
02399