00001
00018
00019
00020
00021
00022 #include "rxl_hwdesc.h"
00023
00024 #include "ke_event.h"
00025 #include "rxl_cntrl.h"
00026 #if NX_UMAC_PRESENT
00027 #include "rxu_cntrl.h"
00028 #endif
00029 #include "co_utils.h"
00030 #include "macif.h"
00031 #if NX_AMPDU_TX
00032 #include "txl_cfm.h"
00033 #endif
00034 #if NX_MAC_HE
00035 #include "txl_he.h"
00036 #endif
00037 #include "co_endian.h"
00038 #include "reg_mac_pl.h"
00039 #include "reg_mac_core.h"
00040
00041
00042
00043
00044
00045
00047 struct rxl_buffer_tag
00048 {
00050 uint32_t start;
00052 uint32_t end;
00054 uint32_t read;
00055 };
00056
00058 struct rxl_hwdesc_env_tag
00059 {
00061 struct rxl_buffer_tag buf[2];
00062 };
00063
00064
00065
00066
00067
00069 #define RXL_WRAP CO_BIT(31)
00071 #define RXL_RXDESC_SIZE (sizeof_b(struct rxdesc))
00073 #define RXL_RHD_HEADER (sizeof_b(struct rx_upload_cntrl_tag) / 4)
00075 #define RXL_RHD_FOOTER ((RXL_RXDESC_SIZE - sizeof_b(struct rx_hd)) / 4 - RXL_RHD_HEADER)
00077 #define RXL_RBD_HEADER 0
00079 #define RXL_RBD_FOOTER ((sizeof_b(struct rx_payloaddesc) - sizeof_b(struct rx_pbd)) / 4)
00081 #define RXL_BUFFER1_SIZE ((RWNX_MAX_AMSDU_RX + RXL_RXDESC_SIZE) / 2)
00083 #define RXL_BUFFER2_SIZE ((512 + RXL_RXDESC_SIZE) / 4)
00084
00086 enum rx_buf_id
00087 {
00089 DH_BUF = 0,
00091 IH_BUF = 1
00092 };
00093
00094
00095
00096
00097
00099 static struct rxl_hwdesc_env_tag rx_hwdesc_env;
00100
00103 static uint32_t rxl_hw_buffer1[RXL_BUFFER1_SIZE] __SHAREDRAM;
00104
00107 static uint32_t rxl_hw_buffer2[RXL_BUFFER2_SIZE] __SHAREDRAM;
00108
00109
00110
00111
00112
00114 #define RXL_HW_BUFFER_INIT(id) \
00115 do { \
00116 struct rxl_buffer_tag *buf = &rx_hwdesc_env.buf[id - 1]; \
00117 buf->start = CPU2HW(rxl_hw_buffer##id); \
00118 buf->end = buf->start + CO_ALIGN4_LO(sizeof_b(rxl_hw_buffer##id)); \
00119 buf->read = buf->start; \
00120 nxmac_rx_buf_##id##_start_ptr_set(buf->start); \
00121 nxmac_rx_buf_##id##_end_ptr_set(buf->end - 4); \
00122 nxmac_rx_buf_##id##_rd_ptr_set(buf->start); \
00123 nxmac_rx_buf_##id##_wr_ptr_set(buf->start); \
00124 } while(0)
00125
00126
00127
00128
00129
00137 __INLINE bool rxl_dh_buf_has_data(void)
00138 {
00139 return (rx_hwdesc_env.buf[DH_BUF].read != nxmac_rx_buf_1_wr_ptr_get());
00140 }
00141
00149 __INLINE bool rxl_ih_buf_has_data(void)
00150 {
00151 return (rx_hwdesc_env.buf[IH_BUF].read != nxmac_rx_buf_2_wr_ptr_get());
00152 }
00153
00168 __INLINE uint32_t rxl_add_wrap_bit(uint32_t new_read, uint32_t old_read)
00169 {
00170 uint32_t wrap_bit = old_read & RXL_WRAP;
00171
00172 old_read &= ~RXL_WRAP;
00173 if (new_read < old_read)
00174 wrap_bit ^= RXL_WRAP;
00175
00176 return (new_read | wrap_bit);
00177 }
00178
00192 __INLINE void rxl_update_hw_buf_read(uint32_t new_read, int id)
00193 {
00194 if (id == DH_BUF)
00195 {
00196 nxmac_rx_buf_1_rd_ptr_set(rxl_add_wrap_bit(new_read, nxmac_rx_buf_1_rd_ptr_get()));
00197 }
00198 else
00199 {
00200 nxmac_rx_buf_2_rd_ptr_set(rxl_add_wrap_bit(new_read, nxmac_rx_buf_2_rd_ptr_get()));
00201 }
00202 }
00203
00218 __INLINE void rxl_update_sw_buf_read(uint32_t new_read, struct rxl_buffer_tag *buf)
00219 {
00220 buf->read = rxl_add_wrap_bit(new_read, buf->read);
00221 }
00222
00235 __INLINE void rxl_adjust_sw_buf_read(struct rxl_buffer_tag *buf)
00236 {
00237 if (buf->end - (buf->read & ~RXL_WRAP) < RXL_RXDESC_SIZE)
00238 rxl_update_sw_buf_read(buf->start, buf);
00239 }
00240
00252 __INLINE void rxl_adjust_frmlen(struct rx_hd *rhd)
00253 {
00254
00255 if (!rhd->frmlen)
00256 return;
00257
00258 switch (rhd->statinfo & RX_HD_DECRSTATUS)
00259 {
00260 case RX_HD_DECR_CCMP128_SUCCESS:
00261 rhd->frmlen -= MIC_LEN + MAC_FCS_LEN;
00262 break;
00263 case RX_HD_DECR_WEP_SUCCESS:
00264 rhd->frmlen -= ICV_LEN + MAC_FCS_LEN;
00265 break;
00266 case RX_HD_DECR_TKIP_SUCCESS:
00267 rhd->frmlen -= ICV_LEN + MAC_FCS_LEN;
00268 break;
00269 default:
00270 rhd->frmlen -= MAC_FCS_LEN;
00271 break;
00272 }
00273 }
00274
00275 #if NX_AMSDU_DEAGG
00276
00292 static uint16_t rxl_amsdu_subframe_len_get(struct rx_payloaddesc *pd,
00293 uint16_t payl_offset,
00294 uint16_t mpdu_len)
00295 {
00296 uint16_t subfrm_len;
00297 uint32_t subfrm_len_addr;
00298 uint16_t payl_len = pd->pbd.dataendptr - pd->pbd.datastartptr + 1;
00299
00300
00301 if (sizeof_b(struct amsdu_hdr) > mpdu_len)
00302 return 0;
00303
00304
00305
00306
00307
00308 if (payl_offset + sizeof_b(struct amsdu_hdr) <= payl_len)
00309 {
00310 subfrm_len_addr = pd->pbd.datastartptr + payl_offset + 2 * MAC_ADDR_LEN;
00311 }
00312 else
00313 {
00314 uint16_t offset = payl_offset + 2 * MAC_ADDR_LEN - payl_len;
00315 struct rx_payloaddesc *pd_tmp = (struct rx_payloaddesc *)HW2CPU(pd->pbd.next);
00316
00317 subfrm_len_addr = pd_tmp->pbd.datastartptr + offset;
00318 }
00319
00320
00321 subfrm_len = co_ntohs(co_read16(HW2CPU(subfrm_len_addr))) + sizeof_b(struct amsdu_hdr);
00322
00323
00324 if ((subfrm_len > mpdu_len) || (subfrm_len > RX_MAX_AMSDU_SUBFRAME_LEN))
00325 return 0;
00326
00327
00328 if ((mpdu_len - subfrm_len) < 3)
00329
00330
00331 subfrm_len = mpdu_len;
00332 else
00333
00334 subfrm_len = CO_ALIGN4_HI(subfrm_len);
00335
00336 return subfrm_len;
00337 }
00338 #endif
00339
00349 void rxl_rxcntrl_frame(struct rxdesc* rxdesc)
00350 {
00351 uint32_t new_read;
00352 struct rxl_buffer_tag *buf = &rx_hwdesc_env.buf[IH_BUF];
00353 struct rx_dmadesc *dma_hdrdesc = rxl_dmadesc_get(rxdesc);
00354
00355
00356 if (dma_hdrdesc->hd.frmlen != 0)
00357 {
00358 uint16_t framectrl;
00359 #if NX_AMPDU_TX
00360 uint32_t statinfo;
00361 #endif
00362 struct rx_pbd *pd = HW2CPU(dma_hdrdesc->hd.first_pbd_ptr);
00363
00364
00365 ASSERT_REC(pd != NULL);
00366
00367
00368 framectrl = co_read16(HW2CPU(pd->datastartptr));
00369
00370 #if NX_AMPDU_TX
00371
00372 statinfo = dma_hdrdesc->hd.statinfo;
00373 #endif
00374
00375
00376 switch(framectrl & MAC_FCTRL_TYPESUBTYPE_MASK)
00377 {
00378 #if NX_AMPDU_TX
00379 case MAC_FCTRL_BA :
00380
00381 if ((statinfo & (RX_HD_RSP_FRM | RX_HD_SUCCESS)) ==
00382 (RX_HD_RSP_FRM | RX_HD_SUCCESS))
00383 {
00384
00385 txl_ba_push(rxdesc);
00386 }
00387 break;
00388 #endif
00389 #if NX_MAC_HE
00390 case MAC_FCTRL_HE_TRIGGER :
00391
00392 if (statinfo & RX_HD_SUCCESS)
00393 {
00394 txl_he_trigger_push(rxdesc);
00395 }
00396 break;
00397 #endif
00398 default:
00399 break;
00400 }
00401
00402 new_read = CO_ALIGN4_LO(pd->dataendptr) + 4;
00403 }
00404 else
00405 {
00406
00407 ASSERT_REC(dma_hdrdesc->hd.first_pbd_ptr == 0);
00408 new_read = CPU2HW(rxdesc + 1);
00409 }
00410
00411 if (new_read == buf->end)
00412 new_read = buf->start;
00413
00414 rxdesc->new_read = new_read;
00415
00416
00417 rxl_update_sw_buf_read(rxdesc->new_read, buf);
00418
00419
00420 rxl_frame_release(rxdesc);
00421 }
00422
00448 static struct rx_payloaddesc *rxl_payload_transfer(struct rx_payloaddesc **curr_pd,
00449 uint16_t upload_len, uint32_t hostbuf,
00450 uint16_t *payl_offset, int dma_idx,
00451 int irq_en)
00452 {
00453 uint16_t dma_len;
00454 struct dma_desc *dma_desc;
00455 struct dma_desc *first_dma_desc;
00456 struct rx_payloaddesc *pd = *curr_pd;
00457 struct rx_payloaddesc *prev_pd = NULL;
00458 uint16_t payl_off = *payl_offset;
00459
00460
00461 dma_desc = &pd->dma_desc[dma_idx];
00462
00463 first_dma_desc = dma_desc;
00464
00465
00466 while (1)
00467 {
00468 struct dma_desc *dma_desc_next;
00469 uint16_t payl_len = pd->pbd.dataendptr - pd->pbd.datastartptr + 1;
00470
00471
00472 dma_desc->dest = hostbuf;
00473 dma_desc->src = pd->pbd.datastartptr + payl_off;
00474
00475
00476 if ((upload_len + payl_off) < payl_len)
00477 {
00478
00479 dma_len = upload_len;
00480
00481 #if NX_AMSDU_DEAGG
00482
00483 payl_off += upload_len;
00484 #endif
00485 }
00486 else
00487 {
00488
00489 dma_len = payl_len - payl_off;
00490
00491
00492 payl_off = 0;
00493 }
00494
00495
00496 dma_desc->length = dma_len;
00497
00498
00499 dma_desc->ctrl = RX_LLICTRL(0);
00500
00501
00502 hostbuf += dma_len;
00503
00504
00505 upload_len -= dma_len;
00506
00507
00508 if (upload_len == 0)
00509 {
00510 break;
00511 }
00512
00513
00514 prev_pd = pd;
00515 pd = (struct rx_payloaddesc *)HW2CPU(pd->pbd.next);
00516 dma_desc_next = &pd->dma_desc[0];
00517
00518
00519 ASSERT_ERR(pd != NULL);
00520
00521
00522 dma_desc->next = CPU2HW(dma_desc_next);
00523
00524
00525 dma_desc = dma_desc_next;
00526 }
00527
00528
00529 dma_desc->ctrl = RX_LLICTRL(irq_en);
00530
00531
00532 dma_push(first_dma_desc, dma_desc, RX_DATA_UPLOAD_CHAN);
00533
00534 *curr_pd = pd;
00535 *payl_offset = payl_off;
00536
00537 return prev_pd;
00538 }
00539
00540 #if NX_AMSDU_DEAGG
00541
00560 static struct rx_payloaddesc *rxl_amsdu_deagg(struct rx_dmadesc *dma_hdrdesc,
00561 struct rx_payloaddesc **curr_pd,
00562 uint16_t mpdu_len, uint32_t hostbuf,
00563 uint16_t payl_offset)
00564 {
00565 int dma_idx = 0;
00566 int msdu_idx = 0;
00567 uint16_t upload_len;
00568 struct rx_payloaddesc *pd = *curr_pd;
00569 struct rx_payloaddesc *prev_pd = NULL;
00570
00571
00572 memset(dma_hdrdesc->amsdu_hostids, 0, sizeof(dma_hdrdesc->amsdu_hostids));
00573
00574
00575
00576 upload_len = rxl_amsdu_subframe_len_get(pd, payl_offset, mpdu_len);
00577
00578 while (1)
00579 {
00580 prev_pd = rxl_payload_transfer(&pd, upload_len, hostbuf, &payl_offset, dma_idx, 0);
00581 mpdu_len -= upload_len;
00582
00583
00584 if ((mpdu_len != 0) && macif_rx_buf_check())
00585 {
00586
00587
00588 if (payl_offset)
00589 {
00590
00591 dma_idx++;
00592
00593 if (dma_idx >= NX_DMADESC_PER_RX_PDB_CNT)
00594 break;
00595 }
00596 else
00597 {
00598
00599 prev_pd = pd;
00600 pd = (struct rx_payloaddesc *)HW2CPU(pd->pbd.next);
00601
00602
00603 ASSERT_ERR(pd != NULL);
00604
00605 dma_idx = 0;
00606 }
00607
00608 upload_len = rxl_amsdu_subframe_len_get(pd, payl_offset, mpdu_len);
00609
00610
00611 if (upload_len == 0)
00612 break;
00613
00614
00615 hostbuf = macif_rx_buf_get(&dma_hdrdesc->amsdu_hostids[msdu_idx++]) +
00616 RXL_PAYLOAD_OFFSET;
00617 }
00618 else
00619 break;
00620 }
00621
00622 *curr_pd = pd;
00623
00624 return prev_pd;
00625 }
00626 #endif // NX_AMSDU_DEAGG
00627
00640 static void rxl_go_to_last_rbd(struct rxdesc *rxdesc, struct rx_payloaddesc *pd,
00641 struct rx_payloaddesc *prev_pd)
00642 {
00643 uint32_t new_read;
00644 struct rxl_buffer_tag *buf = &rx_hwdesc_env.buf[DH_BUF];
00645
00646
00647
00648 if (pd == NULL)
00649 {
00650 new_read = CPU2HW(rxdesc + 1);
00651 }
00652 else
00653 {
00654
00655 while (1)
00656 {
00657 uint32_t statinfo = pd->pbd.bufstatinfo;
00658
00659
00660 if (statinfo & RX_PD_LASTBUF)
00661 break;
00662
00663
00664 pd = (struct rx_payloaddesc *)HW2CPU(pd->pbd.next);
00665
00666
00667 ASSERT_REC(pd != NULL);
00668 };
00669
00670
00671
00672 new_read = CO_ALIGN4_LO(pd->pbd.dataendptr) + 4;
00673 }
00674
00675 if (new_read == buf->end)
00676 new_read = buf->start;
00677
00678 rxdesc->new_read = new_read;
00679
00680
00681 rxl_update_sw_buf_read(rxdesc->new_read, buf);
00682 }
00683
00684 void rxl_hwdesc_init(void)
00685 {
00686
00687 RXL_HW_BUFFER_INIT(1);
00688 RXL_HW_BUFFER_INIT(2);
00689
00690
00691 nxmac_rx_buf_config_pack(RXL_RBD_FOOTER, RXL_RBD_HEADER, RXL_RHD_FOOTER, RXL_RHD_HEADER);
00692 nxmac_rx_cntrl_2_pack(nxmac_disable_rx_buffer_2_getf(), 1, 1, 1, 128, 2, 1, 1);
00693 }
00694
00695 void rxl_hwdesc_monitor(bool enable)
00696 {
00697
00698 nxmac_accept_all_trigger_frames_setf(enable);
00699
00700
00701 nxmac_disable_rx_buffer_2_setf(enable);
00702
00703
00704 rxl_hwdesc_init();
00705 }
00706
00707 void rxl_frame_release(struct rxdesc* rxdesc)
00708 {
00709
00710 rxl_update_hw_buf_read(rxdesc->new_read, rxdesc->buf_id);
00711 }
00712
00713 void rxl_mpdu_copy(struct rx_pbd *pbd, uint16_t length, uint16_t offset, uint32_t *dst)
00714 {
00715 uint16_t dst_offset = 0;
00716 uint32_t *src;
00717 while (length)
00718 {
00719 uint16_t copy_len;
00720 uint16_t payl_len = pbd->dataendptr - pbd->datastartptr + 1;
00721 if (length < (payl_len - offset))
00722 copy_len = length;
00723 else
00724 copy_len = payl_len - offset;
00725
00726 src = HW2CPU(pbd->datastartptr + offset);
00727 co_copy32(dst + dst_offset/4, src, copy_len);
00728
00729 length -= copy_len;
00730 offset = 0;
00731
00732 if (length == 0)
00733 break;
00734
00735 dst_offset += copy_len;
00736 pbd = HW2CPU(pbd->next);
00737 ASSERT_ERR(pbd != NULL);
00738 }
00739 }
00740
00741 void rxl_mpdu_partial_transfer(struct rxdesc *rxdesc, uint16_t upload_len,
00742 uint32_t hostbuf, uint16_t payl_offset,
00743 cb_rx_dma_func_ptr cb, void *env)
00744 {
00745 struct rx_dmadesc *dma_hdrdesc = rxl_dmadesc_get(rxdesc);
00746 struct rx_payloaddesc *pd = HW2CPU(dma_hdrdesc->hd.first_pbd_ptr);
00747 struct rx_payloaddesc *prev_pd = NULL;
00748
00749
00750 RX_MPDU_XFER_SET();
00751
00752
00753 prev_pd = rxl_payload_transfer(&pd, upload_len, hostbuf, &payl_offset, 0, 1);
00754
00755
00756 rxl_go_to_last_rbd(rxdesc, pd, prev_pd);
00757
00758
00759 RX_MPDU_XFER_CLR();
00760
00761
00762 rxdesc->upload_cntrl.cb = cb;
00763 rxdesc->upload_cntrl.env = env;
00764
00765
00766 rxl_upload_cntrl_push_pending(&rxdesc->upload_cntrl);
00767 }
00768
00769 void rxl_mpdu_transfer(struct rxdesc *rxdesc)
00770 {
00771 #if (NX_UMAC_PRESENT)
00772 struct rx_cntrl_rx_status *rx_status = &rxu_cntrl_env.rx_status;
00773 uint16_t payl_offset = rx_status->payl_offset;
00774 #else
00775 uint16_t payl_offset = 0;
00776 #endif
00777 uint32_t hostbuf, hostbuf_start;
00778 struct dma_desc *dma_desc;
00779 struct rx_dmadesc *dma_hdrdesc = rxl_dmadesc_get(rxdesc);
00780 uint16_t mpdu_len;
00781 struct rx_payloaddesc *pd;
00782 struct rx_payloaddesc *prev_pd = NULL;
00783
00784
00785 RX_MPDU_XFER_SET();
00786
00787
00788 pd = (struct rx_payloaddesc *)HW2CPU(dma_hdrdesc->hd.first_pbd_ptr);
00789
00790
00791 #if (NX_UMAC_PRESENT)
00792 hostbuf_start = macif_rx_buf_get(&rxu_cntrl_env.hostid_current);
00793 #else
00794 hostbuf_start = macif_rx_buf_get();
00795 #endif //(NX_UMAC_PRESENT)
00796
00797
00798 hostbuf = hostbuf_start + RXL_PAYLOAD_OFFSET;
00799
00800
00801 mpdu_len = dma_hdrdesc->hd.frmlen;
00802
00803
00804 ASSERT_REC(mpdu_len != 0);
00805
00806 ASSERT_REC(mpdu_len <= RWNX_MAX_AMSDU_RX);
00807
00808 #if NX_AMSDU_DEAGG
00809
00810 if (dma_hdrdesc->flags & RX_FLAGS_IS_AMSDU_BIT)
00811 {
00812
00813
00814 prev_pd = rxl_amsdu_deagg(dma_hdrdesc, &pd, mpdu_len, hostbuf, payl_offset);
00815 }
00816 else
00817 #endif
00818 {
00819
00820
00821 prev_pd = rxl_payload_transfer(&pd, mpdu_len, hostbuf, &payl_offset, 0, 0);
00822
00823 #if (NX_UMAC_PRESENT)
00824
00825 rx_status->host_buf_addr = hostbuf + mpdu_len;
00826 #endif
00827 }
00828
00829
00830 phy_get_channel(&dma_hdrdesc->phy_info, PHY_PRIM);
00831
00832
00833
00834 dma_hdrdesc->pattern = DMA_HD_RXPATTERN;
00835
00836 dma_desc = &dma_hdrdesc->dma_desc;
00837
00838
00839 dma_desc->dest = hostbuf_start;
00840 dma_desc->src = CPU2HW(&dma_hdrdesc->hd.frmlen);
00841 dma_desc->length = RXL_HEADER_INFO_LEN;
00842
00843 #if NX_UMAC_PRESENT && !NX_FULLY_HOSTED
00844
00845 dma_desc->ctrl = RX_LLICTRL(0);
00846 #else
00847
00848 dma_desc->ctrl = RX_LLICTRL(1);
00849 #endif //NX_UMAC_PRESENT && !NX_FULLY_HOSTED
00850
00851
00852 dma_push(dma_desc, dma_desc, RX_DATA_UPLOAD_CHAN);
00853
00854
00855 rxl_go_to_last_rbd(rxdesc, pd, prev_pd);
00856
00857
00858 RX_MPDU_XFER_CLR();
00859
00860 #if NX_FULLY_HOSTED || !NX_UMAC_PRESENT
00861
00862 rxl_upload_cntrl_push_pending(&rxdesc->upload_cntrl);
00863 #endif
00864 }
00865
00866 void rxl_mpdu_free(struct rxdesc *rxdesc)
00867 {
00868 struct rx_payloaddesc *pd;
00869 struct rx_payloaddesc *prev_pd = NULL;
00870 struct rx_dmadesc *dma_hdrdesc = rxl_dmadesc_get(rxdesc);
00871
00872 RX_MPDU_FREE_SET();
00873
00874
00875 pd = (struct rx_payloaddesc *)HW2CPU(dma_hdrdesc->hd.first_pbd_ptr);
00876
00877
00878 rxl_go_to_last_rbd(rxdesc, pd, prev_pd);
00879
00880 if (rxl_upload_pending())
00881 {
00882 rxdesc->upload_cntrl.cb = NULL;
00883 rxdesc->upload_cntrl.flags |= RX_NO_UPLOAD;
00884 rxl_upload_cntrl_push_pending(&rxdesc->upload_cntrl);
00885 }
00886 else
00887 {
00888
00889 rxl_frame_release(rxdesc);
00890 }
00891
00892 RX_MPDU_FREE_CLR();
00893 }
00894
00895 struct rxdesc *rxl_rxdesc_get(void)
00896 {
00897 struct rxl_buffer_tag *buf = &rx_hwdesc_env.buf[DH_BUF];
00898
00899 if (rxl_dh_buf_has_data())
00900 {
00901
00902 rxl_adjust_sw_buf_read(buf);
00903
00904 return (HW2CPU(buf->read & ~RXL_WRAP));
00905 }
00906
00907 return NULL;
00908 }
00909
00910 void rxl_rxdesc_ready_for_processing(struct rxdesc *rxdesc)
00911 {
00912 struct rx_hd *rhd = &rxdesc->dma_hdrdesc.hd;
00913
00914
00915 ASSERT_ERR(rhd->upatternrx == RX_HEADER_DESC_PATTERN);
00916
00917
00918
00919 rxl_adjust_frmlen(rhd);
00920
00921
00922 #if NX_FULLY_HOSTED
00923 rxdesc->upload_cntrl.cb = NULL;
00924 #else
00925 rxdesc->upload_cntrl.cb = rxl_host_irq_mitigation_update;
00926 #endif
00927 rxdesc->upload_cntrl.rxdesc = rxdesc;
00928 rxdesc->upload_cntrl.flags = 0;
00929 rxdesc->buf_id = DH_BUF;
00930 }
00931
00932 void rxl_mpdu_isr(void)
00933 {
00934
00935 PROF_RX_MAC_IRQ_SET();
00936
00937
00938 nxmac_tx_rx_int_ack_clear(NXMAC_RX_BUFFER_1_TRIGGER_BIT);
00939 nxmac_enable_rx_buffer_1_trigger_setf(0);
00940
00941
00942 ke_evt_set(KE_EVT_RXREADY_BIT);
00943
00944
00945 PROF_RX_MAC_IRQ_CLR();
00946 }
00947
00948 void rxl_immediate_frame_get(void)
00949 {
00950 struct rxl_buffer_tag *buf = &rx_hwdesc_env.buf[IH_BUF];
00951
00952 while (rxl_ih_buf_has_data())
00953 {
00954 struct rxdesc *rxdesc;
00955
00956
00957 rxl_adjust_sw_buf_read(buf);
00958
00959 rxdesc = HW2CPU(buf->read & ~RXL_WRAP);
00960
00961 rxl_rxdesc_ready_for_processing(rxdesc);
00962 rxdesc->buf_id = IH_BUF;
00963
00964 rxl_rxcntrl_frame(rxdesc);
00965 }
00966 }
00967
00968 void rxl_current_desc_get(struct rx_hd **rhd, struct rx_pbd **rbd)
00969 {
00970 struct rxdesc *rxdesc = rxl_rxdesc_get();
00971
00972
00973 if (rxdesc)
00974 {
00975 struct rx_hd *rx_hd = &rxdesc->dma_hdrdesc.hd;
00976 *rhd = rx_hd;
00977 *rbd = HW2CPU(rx_hd->first_pbd_ptr);
00978 }
00979 else
00980 {
00981 *rhd = NULL;
00982 *rbd = NULL;
00983 }
00984 }
00985