00001
00020
00021
00022
00023
00024 #include "co_int.h"
00025 #include "co_bool.h"
00026
00027 #include "txl_buffer.h"
00028 #include "txl_cntrl.h"
00029 #include "txl_agg.h"
00030 #include "macif.h"
00031 #include "dbg_assert.h"
00032
00033 #include "dbg.h"
00034 #include "co_utils.h"
00035 #if NX_UMAC_PRESENT
00036 #include "txu_cntrl.h"
00037 #endif
00038 #if (RW_BFMER_EN)
00039 #include "bfr.h"
00040 #endif //(RW_BFMER_EN)
00041
00042 #if !NX_FULLY_HOSTED
00043
00044
00045
00046
00048 struct txl_buffer_env_tag txl_buffer_env;
00049
00050
00051
00052
00053
00054 #if RW_BFMER_EN
00055
00066 static struct dma_desc *txl_buffer_bfr_dma_desc_conf(struct txdesc *txdesc,
00067 struct dma_desc *dma_desc_bfr,
00068 struct dma_desc *dma_desc)
00069 {
00070 if (bfr_is_enabled() && bfr_tx_frame_ind(txdesc, dma_desc_bfr))
00071 {
00072
00073 dma_desc->next = CPU2HW(dma_desc_bfr);
00074 dma_desc = dma_desc_bfr;
00075 }
00076
00077 return (dma_desc);
00078 }
00079 #endif
00080
00081
00098 static uint32_t txl_buffer_get_params(struct txdesc *txdesc, uint16_t *size, uint16_t *head_len
00099 #if NX_AMSDU_TX
00100 , uint8_t pkt_idx
00101 #endif
00102 )
00103 {
00104 uint32_t needed;
00105 uint16_t add_len;
00106
00107 #if NX_UMAC_PRESENT
00108 #if NX_AMSDU_TX
00109
00110 if (pkt_idx == 0xFF)
00111 {
00112 struct tx_hd *txhd = &txdesc->lmac.hw_desc->thd;
00113 pkt_idx = 0;
00114 add_len = txdesc->umac.head_len + txdesc->umac.tail_len;
00115 *size = txhd->frmlen - add_len - MAC_FCS_LEN;
00116 *head_len = txdesc->umac.head_len;
00117 }
00118 else
00119 {
00120 add_len = 0;
00121 *head_len = 0;
00122 *size = txdesc->host.packet_len[pkt_idx];
00123 if (pkt_idx == 0)
00124 {
00125 add_len = txdesc->umac.head_len;
00126 *head_len = txdesc->umac.head_len;
00127 }
00128 if ((pkt_idx + 1) == txdesc->host.packet_cnt)
00129 {
00130 add_len += txdesc->umac.tail_len;
00131 }
00132 }
00133 #else // NX_AMSDU_TX
00134 add_len = txdesc->umac.head_len + txdesc->umac.tail_len;
00135 *head_len = txdesc->umac.head_len;
00136 *size = txdesc->host.packet_len;
00137 #endif // NX_AMSDU_TX
00138 #if NX_MFP
00139
00140
00141 if (txdesc->host.flags & TXU_CNTRL_MGMT_ROBUST)
00142 {
00143 *head_len = *size + add_len;
00144 }
00145 #endif // NX_MFP
00146 #else // NX_UMAC_PRESENT
00147 add_len = 0;
00148 *head_len = 0;
00149 #if NX_AMSDU_TX
00150 *size = txdesc->host.packet_len[pkt_idx];
00151 #else
00152 *size = txdesc->host.packet_len;
00153 #endif // NX_AMSDU_TX
00154 #endif // NX_UMAC_PRESENT
00155
00156 needed = CO_ALIGN4_HI(*size + add_len + sizeof_b(struct txl_buffer_tag) +
00157 TX_BUFFER_PADDING_MAX) / 4;
00158
00159 return (needed);
00160 }
00161
00162
00176 static struct txl_buffer_tag *txl_buffer_get_space(struct txl_buffer_idx_tag *idx,
00177 uint32_t needed, uint16_t head_len,
00178 uint32_t *remaining)
00179 {
00180 struct txl_buffer_tag *buf = NULL;
00181 uint32_t free = idx->free;
00182 uint32_t freesz = idx->free_size;
00183 uint32_t *pool = idx->pool;
00184
00185
00186 ASSERT_ERR(pool != NULL);
00187 ASSERT_ERR(freesz <= TX_BUFFER_POOL_SIZE);
00188 ASSERT_ERR(free <= TX_BUFFER_POOL_SIZE);
00189
00190 do
00191 {
00192
00193 idx->next_needed = needed;
00194
00195
00196 if (idx->free_size < needed)
00197 break;
00198
00199
00200 if (((TX_BUFFER_POOL_SIZE - free) * 4) < (sizeof_b(struct txl_buffer_tag) + head_len + TX_BUFFER_PADDING_MAX + 1))
00201 {
00202 uint32_t add_len = TX_BUFFER_POOL_SIZE - free;
00203
00204
00205 freesz -= add_len;
00206 free = 0;
00207
00208
00209 if (idx->last != TX_BUFFER_NULL)
00210 {
00211 struct txl_buffer_tag *last_buf = (struct txl_buffer_tag *)&pool[idx->last];
00212 last_buf->length += add_len;
00213 idx->used_area += add_len;
00214 }
00215
00216
00217 if (freesz < needed)
00218 {
00219
00220 idx->free_size = freesz;
00221 idx->free = free;
00222 break;
00223 }
00224 }
00225
00226
00227 buf = (struct txl_buffer_tag *)&pool[free];
00228 buf->length = needed;
00229 buf->flags = 0;
00230 idx->last = free;
00231 idx->next_needed = -1;
00232 freesz -= needed;
00233 *remaining = TX_BUFFER_POOL_SIZE - free;
00234 if (*remaining >= needed)
00235 free += needed;
00236 else
00237 free = needed - *remaining;
00238
00239
00240 idx->free = free;
00241 idx->free_size = freesz;
00242 idx->used_area += needed;
00243 idx->buf_size += needed;
00244 if (idx->buf_size >= TX_BUFFER_MIN_SIZE)
00245 {
00246
00247 idx->count++;
00248
00249 buf->flags |= BUF_FRONTIER;
00250
00251 idx->buf_size = 0;
00252 }
00253 else
00254 {
00255
00256 buf->flags &= ~BUF_FRONTIER;
00257 }
00258 } while (0);
00259
00260 return (buf);
00261 }
00262
00282 static void txl_buffer_transfer(struct txdesc *txdesc, uint8_t access_category,
00283 struct txl_buffer_tag *buf, struct txl_buffer_idx_tag *idx,
00284 uint16_t size, uint32_t remaining, uint16_t head_len
00285 #if NX_AMSDU_TX
00286 , uint8_t pkt_idx
00287 #endif
00288 )
00289 {
00290 struct dma_desc *dma_desc, *dma_desc_pat;
00291 struct tx_pbd *tbd;
00292 uint16_t dma_len, dma_oft;
00293 uint32_t dma_dest;
00294 uint32_t *pool = idx->pool;
00295 uint32_t packet_addr;
00296 uint32_t dma_len_before_wrap;
00297 #if NX_UMAC_PRESENT && NX_AMSDU_TX
00298 int packet_cnt;
00299 #endif
00300 int i = 0;
00301
00302 #if NX_UMAC_PRESENT
00303 dma_oft = head_len + offsetof_b(struct txl_buffer_tag, payload);
00304 #if NX_AMSDU_TX
00305
00306 if (pkt_idx == 0xFF)
00307 {
00308 pkt_idx = 0;
00309 packet_addr = txdesc->host.packet_addr[0];
00310 dma_len = txdesc->host.packet_len[0];
00311 packet_cnt = txdesc->host.packet_cnt;
00312 }
00313 else
00314 {
00315 packet_addr = txdesc->host.packet_addr[pkt_idx];
00316 dma_len = size;
00317 packet_cnt = 1;
00318 }
00319 #else
00320 packet_addr = txdesc->host.packet_addr;
00321 dma_len = size;
00322 #endif // NX_AMSDU_TX
00323 #else // NX_UMAC_PRESENT
00324 #if NX_AMSDU_TX
00325 packet_addr = txdesc->host.packet_addr[pkt_idx];
00326 #else
00327 packet_addr = txdesc->host.packet_addr;
00328 #endif
00329 #if NX_AMSDU_TX
00330 if (pkt_idx > 0)
00331 {
00332
00333 dma_len = size;
00334 dma_oft = offsetof_b(struct txl_buffer_tag, payload);
00335 }
00336 else
00337 #endif // NX_AMSDU_TX
00338 {
00339
00340 dma_len = size + sizeof_b(struct txl_buffer_control) + TX_BUFFER_PADDING_MAX;
00341 dma_oft = offsetof_b(struct txl_buffer_tag, buffer_control);
00342 }
00343 #endif // NX_UMAC_PRESENT
00344
00345
00346 dma_len_before_wrap = remaining * 4 - dma_oft;
00347 dma_desc_pat = &buf->dma_desc_pat;
00348 dma_dest = CPU2HW(buf) + dma_oft;
00349
00350 do
00351 {
00352 dma_desc = &buf->dma_desc[i];
00353
00354
00355 dma_desc->src = packet_addr;
00356 dma_desc->dest = dma_dest;
00357 dma_desc->ctrl = 0;
00358
00359
00360 if (dma_len_before_wrap >= dma_len)
00361 {
00362
00363 dma_len_before_wrap -= dma_len;
00364 dma_desc->length = dma_len;
00365
00366 #if NX_MFP
00367 if (txdesc->host.flags & TXU_CNTRL_MGMT_ROBUST)
00368 {
00369
00370
00371 dma_dest -= head_len;
00372 dma_desc->dest = dma_dest;
00373
00374
00375
00376
00377 if (txdesc->umac.head_len)
00378 {
00379 dma_desc->length = MAC_SHORT_MAC_HDR_LEN;
00380
00381
00382 dma_desc->next = CPU2HW(&buf->dma_desc[1]);
00383 dma_desc = &buf->dma_desc[1];
00384 dma_desc->src = packet_addr + MAC_SHORT_MAC_HDR_LEN;
00385 dma_desc->dest = dma_dest + MAC_SHORT_MAC_HDR_LEN
00386 + txdesc->umac.head_len;
00387 dma_desc->length = dma_len - MAC_SHORT_MAC_HDR_LEN;
00388 dma_desc->ctrl = 0;
00389 }
00390 }
00391 #endif // NX_MFP
00392 }
00393 else
00394 {
00395
00396 dma_desc->length = dma_len_before_wrap;
00397 dma_desc->next = CPU2HW(&idx->desc->dma_desc);
00398
00399
00400 dma_dest = CPU2HW(pool);
00401 dma_len = dma_len - dma_len_before_wrap;
00402
00403 dma_desc = &idx->desc->dma_desc;
00404 dma_desc->dest = dma_dest;
00405 dma_desc->src = packet_addr + dma_len_before_wrap;
00406 dma_desc->length = dma_len;
00407 dma_desc->ctrl = 0;
00408
00409
00410
00411 dma_len_before_wrap = 0xFFFFFFFF;
00412 }
00413
00414 #if NX_UMAC_PRESENT && NX_AMSDU_TX
00415 i++;
00416
00417 if (i == packet_cnt)
00418 break;
00419
00420
00421 dma_desc->next = CPU2HW(&buf->dma_desc[i]);
00422
00423
00424 if (dma_len_before_wrap == 0)
00425 {
00426 dma_len_before_wrap = 0xFFFFFFFF;
00427 dma_dest = CPU2HW(pool);
00428 }
00429 else
00430 dma_dest += dma_len;
00431
00432 dma_len = txdesc->host.packet_len[i];
00433 dma_oft = 0;
00434 packet_addr = txdesc->host.packet_addr[i];
00435 }
00436 while (1);
00437 #else // NX_UMAC_PRESENT && NX_AMSDU_TX
00438 }
00439 while (0);
00440 #endif // NX_UMAC_PRESENT && NX_AMSDU_TX
00441
00442
00443 tbd = &buf->tbd;
00444 tbd->upatterntx = 0;
00445 dma_desc_pat->src = macif_tx_pattern_addr_get();
00446 dma_desc_pat->dest = CPU2HW(&tbd->upatterntx);
00447 dma_desc_pat->length = sizeof_b(tbd->upatterntx);
00448 dma_desc_pat->ctrl = 0;
00449
00450 #if NX_AMSDU_TX
00451 if (pkt_idx > 0)
00452 {
00453
00454 #if NX_AMPDU_TX
00455 if (is_mpdu_agg(txdesc))
00456 {
00457 struct tx_agg_desc *agg_desc = txdesc->lmac.agg_desc;
00458
00459 agg_desc->available_len += size;
00460
00461 if (!(agg_desc->status & AGG_ALLOC) &&
00462 (agg_desc->available_len > TX_BUFFER_MIN_AMPDU_DWNLD))
00463 {
00464
00465
00466 agg_desc->status |= AGG_ALLOC;
00467 buf->flags |= BUF_ALLOC_OK | BUF_INT_MSDU;
00468
00469
00470 dma_desc_pat->ctrl = TX_LLICTRL(access_category, 1);
00471
00472 txl_buffer_push(access_category, buf);
00473 }
00474 }
00475 #endif //NX_AMPDU_TX
00476 }
00477 else
00478 #endif // NX_AMSDU_TX
00479 {
00480 #if (NX_UMAC_PRESENT)
00481 if (!(txdesc->host.flags & TXU_CNTRL_MGMT))
00482 {
00483
00484 txu_cntrl_frame_build(txdesc, CPU2HW(buf->payload) + txdesc->umac.head_len);
00485 }
00486 #endif //(NX_UMAC_PRESENT
00487
00488
00489 #if NX_AMPDU_TX
00490 if (is_mpdu_agg(txdesc))
00491 {
00492 struct tx_agg_desc *agg_desc = txdesc->lmac.agg_desc;
00493
00494 agg_desc->available_len += size;
00495
00496 if (is_mpdu_first(txdesc))
00497 {
00498
00499 dma_desc_pat->ctrl = TX_LLICTRL(access_category, 1);
00500
00501 #if NX_UMAC_PRESENT
00502
00503 txl_buffer_control_copy(txdesc, buf);
00504 #endif
00505
00506 #if (RW_BFMER_EN)
00507
00508 dma_desc = txl_buffer_bfr_dma_desc_conf(txdesc, &buf->dma_desc_bfr,
00509 dma_desc);
00510 #endif
00511
00512 txl_buffer_push(access_category, buf);
00513
00514 if (agg_desc->available_len > TX_BUFFER_MIN_AMPDU_DWNLD)
00515 {
00516
00517 PROF_AGG_FIRST_MPDU_DWNLD_SET();
00518
00519
00520
00521 agg_desc->status |= AGG_ALLOC;
00522 buf->flags |= BUF_ALLOC_OK;
00523
00524
00525 PROF_AGG_FIRST_MPDU_DWNLD_CLR();
00526 }
00527 }
00528 else if (!(agg_desc->status & AGG_ALLOC) &&
00529 ((agg_desc->available_len > TX_BUFFER_MIN_AMPDU_DWNLD)
00530 || is_mpdu_last(txdesc)))
00531 {
00532
00533 PROF_AGG_FIRST_MPDU_DWNLD_SET();
00534
00535
00536
00537 agg_desc->status |= AGG_ALLOC;
00538 buf->flags |= BUF_ALLOC_OK;
00539
00540
00541 dma_desc_pat->ctrl = TX_LLICTRL(access_category, 1);
00542
00543 txl_buffer_push(access_category, buf);
00544
00545
00546 PROF_AGG_FIRST_MPDU_DWNLD_CLR();
00547 }
00548 }
00549 else
00550 {
00551
00552 PROF_AGG_FIRST_MPDU_DWNLD_SET();
00553
00554
00555 dma_desc_pat->ctrl = TX_LLICTRL(access_category, 1);
00556
00557 #if NX_UMAC_PRESENT
00558
00559 txl_buffer_control_copy(txdesc, buf);
00560 #endif
00561
00562 #if (RW_BFMER_EN)
00563
00564 dma_desc = txl_buffer_bfr_dma_desc_conf(txdesc, &buf->dma_desc_bfr,
00565 dma_desc);
00566 #endif
00567
00568 buf->flags |= BUF_ALLOC_OK;
00569 txl_buffer_push(access_category, buf);
00570
00571
00572 PROF_AGG_FIRST_MPDU_DWNLD_CLR();
00573 }
00574 #else // ! NX_AMPDU_TX
00575
00576 dma_desc_pat->ctrl = TX_LLICTRL(access_category, 1);
00577
00578 #if NX_UMAC_PRESENT
00579
00580 txl_buffer_control_copy(txdesc, buf);
00581 #endif
00582
00583 #if (RW_BFMER_EN)
00584
00585 dma_desc = txl_buffer_bfr_dma_desc_conf(txdesc, &buf->dma_desc_bfr,
00586 dma_desc);
00587 #endif
00588
00589 txl_buffer_push(access_category, buf);
00590 #endif // NX_AMPDU_TX
00591 }
00592
00593
00594 dma_desc->next = CPU2HW(dma_desc_pat);
00595
00596
00597 dma_push(&buf->dma_desc[0], dma_desc_pat, IPC_DMA_CHANNEL_DATA_TX);
00598 }
00599
00600
00601 void txl_buffer_reinit(void)
00602 {
00603 int i, j;
00604
00605 for (i=0; i<NX_TXQ_CNT; i++)
00606 {
00607 txl_buffer_env.list[i].first = NULL;
00608 txl_buffer_env.list[i].last = NULL;
00609 for (j = 0; j<RW_USER_MAX; j++)
00610 {
00611 struct txl_buffer_idx_tag *idx = &txl_buffer_env.buf_idx[i][j];
00612 idx->free = 0;
00613 idx->used_area = 0;
00614 idx->free_size = TX_BUFFER_POOL_SIZE;
00615 idx->last = TX_BUFFER_NULL;
00616 idx->count = 0;
00617 idx->buf_size = 0;
00618 if ((i != AC_BCN) || (j == 0))
00619 {
00620 struct txl_buffer_hw_desc_tag *hwdesc = &txl_buffer_hw_desc[i*RW_USER_MAX + j];
00621
00622 hwdesc->pbd.upatterntx = TX_PAYLOAD_DESC_PATTERN;
00623 hwdesc->pbd.bufctrlinfo = 0;
00624 hwdesc->pbd.next = 0;
00625
00626 idx->pool = &txl_buffer_pool[i*RW_USER_MAX + j][0];
00627 idx->desc = hwdesc;
00628 }
00629 else
00630 {
00631 idx->pool = NULL;
00632 idx->desc = NULL;
00633 }
00634 idx->next_needed = -1;
00635 }
00636 }
00637 }
00638
00639 void txl_buffer_reset(uint8_t access_category)
00640 {
00641 int i;
00642
00643 txl_buffer_env.list[access_category].first = NULL;
00644 txl_buffer_env.list[access_category].last = NULL;
00645 for (i = 0; i < RW_USER_MAX; i++)
00646 {
00647 struct txl_buffer_idx_tag *idx = &txl_buffer_env.buf_idx[access_category][i];
00648
00649 idx->free = 0;
00650 idx->used_area = 0;
00651 idx->free_size = TX_BUFFER_POOL_SIZE;
00652 idx->last = TX_BUFFER_NULL;
00653 idx->count = 0;
00654 idx->buf_size = 0;
00655 idx->next_needed = -1;
00656 }
00657 }
00658
00659 #if NX_AMSDU_TX
00660 struct txl_buffer_tag *txl_buffer_alloc(struct txdesc *txdesc, uint8_t access_category,
00661 uint8_t user_idx, uint8_t pkt_idx)
00662 #else
00663 struct txl_buffer_tag *txl_buffer_alloc(struct txdesc *txdesc, uint8_t access_category,
00664 uint8_t user_idx)
00665 #endif
00666 {
00667 struct txl_buffer_tag *buf;
00668 uint16_t size, head_len;
00669 uint32_t needed, remaining;
00670 struct txl_buffer_idx_tag *idx = &txl_buffer_env.buf_idx[access_category][user_idx];
00671
00672
00673 needed = txl_buffer_get_params(txdesc, &size, &head_len
00674 #if NX_AMSDU_TX
00675 , pkt_idx
00676 #endif
00677 );
00678
00679
00680 PROF_TX_BUF_ALLOC_SET();
00681
00682 do
00683 {
00684
00685 buf = txl_buffer_get_space(idx, needed, head_len, &remaining);
00686 if (buf == NULL)
00687 break;
00688
00689 buf->user_idx = user_idx;
00690
00691
00692 txl_buffer_transfer(txdesc, access_category, buf, idx, size, remaining, head_len
00693 #if NX_AMSDU_TX
00694 , pkt_idx
00695 #endif
00696 );
00697
00698 } while(0);
00699
00700
00701 PROF_TX_BUF_ALLOC_CLR();
00702
00703 return(buf);
00704 }
00705
00706
00707 bool txl_buffer_free(struct txl_buffer_tag *buf, uint8_t access_category)
00708 {
00709 struct txl_buffer_idx_tag *idx = &txl_buffer_env.buf_idx[access_category][buf->user_idx];
00710
00711
00712 PROF_TX_BUF_FREE_SET();
00713
00714
00715 idx->used_area -= buf->length;
00716 idx->free_size += buf->length;
00717 if (buf->flags & BUF_FRONTIER)
00718 idx->count--;
00719
00720
00721 ASSERT_ERR(idx->free_size <= TX_BUFFER_POOL_SIZE);
00722 ASSERT_ERR(idx->used_area < TX_BUFFER_POOL_SIZE);
00723
00724
00725 if (idx->used_area == 0)
00726 {
00727 idx->free = 0;
00728 idx->last = TX_BUFFER_NULL;
00729 idx->count = 0;
00730 idx->buf_size = 0;
00731 }
00732
00733
00734 PROF_TX_BUF_FREE_CLR();
00735
00736 return ((buf->flags & BUF_FRONTIER) || (idx->free_size >= idx->next_needed));
00737 }
00738
00739 void txl_buffer_free_all(struct txdesc *txdesc, uint8_t access_category)
00740 {
00741 #if NX_AMSDU_TX
00742 int i;
00743
00744 for (i = 0; i < txdesc->host.packet_cnt; i++)
00745 {
00746 struct txl_buffer_tag *buf = txdesc->lmac.buffer[i];
00747
00748
00749 if (buf != NULL)
00750 {
00751
00752 txl_buffer_free(buf, access_category);
00753
00754
00755 txdesc->lmac.buffer[i] = NULL;
00756 }
00757 };
00758 #else
00759 if (txdesc->lmac.buffer != NULL)
00760 {
00761
00762 txl_buffer_free(txdesc->lmac.buffer, access_category);
00763 txdesc->lmac.buffer = NULL;
00764 }
00765 #endif //NX_AMSDU_TX
00766 }
00767
00768 #endif
00769
00770 void txl_buffer_init(void)
00771 {
00772 #if NX_UMAC_PRESENT
00773 int i;
00774 #endif
00775
00776 #if !NX_FULLY_HOSTED
00777 txl_buffer_reinit();
00778 #endif
00779
00780 #if NX_UMAC_PRESENT
00781 for (i=0; i<NX_REMOTE_STA_MAX; i++)
00782 {
00783 struct tx_policy_tbl *pol = &txl_buffer_control_desc[i].policy_tbl;
00784 pol->upatterntx = POLICY_TABLE_PATTERN;
00785 pol->phycntrlinfo1 = phy_get_ntx() << NX_TX_PT_OFT;
00786 pol->phycntrlinfo2 = TX_NTX_2_ANTENNA_SET(phy_get_ntx());
00787 pol->maccntrlinfo1 = 0;
00788 pol->maccntrlinfo2 = 0xFFFF0704;
00789 pol->ratecntrlinfo[0] = HW_RATE_1MBPS << MCS_INDEX_TX_RCX_OFT;
00790 pol->ratecntrlinfo[1] = HW_RATE_1MBPS << MCS_INDEX_TX_RCX_OFT;
00791 pol->ratecntrlinfo[2] = HW_RATE_1MBPS << MCS_INDEX_TX_RCX_OFT;
00792 pol->ratecntrlinfo[3] = HW_RATE_1MBPS << MCS_INDEX_TX_RCX_OFT;
00793 pol->powercntrlinfo[0] = TX_PWR_LEVEL_SET(nxmac_ofdm_max_pwr_level_getf());
00794 pol->powercntrlinfo[1] = TX_PWR_LEVEL_SET(nxmac_ofdm_max_pwr_level_getf());
00795 pol->powercntrlinfo[2] = TX_PWR_LEVEL_SET(nxmac_ofdm_max_pwr_level_getf());
00796 pol->powercntrlinfo[3] = TX_PWR_LEVEL_SET(nxmac_ofdm_max_pwr_level_getf());
00797
00798 txl_buffer_control_desc[i].mac_control_info = EXPECTED_ACK_NORMAL_ACK | LOW_RATE_RETRY;
00799 txl_buffer_control_desc[i].phy_control_info = 63 << GID_TX_OFT;
00800 }
00801 for (i=0; i<NX_VIRT_DEV_MAX; i++)
00802 {
00803 struct tx_policy_tbl *pol = &txl_buffer_control_desc_bcmc[i].policy_tbl;
00804 pol->upatterntx = POLICY_TABLE_PATTERN;
00805 pol->phycntrlinfo1 = phy_get_ntx() << NX_TX_PT_OFT;
00806 pol->phycntrlinfo2 = TX_NTX_2_ANTENNA_SET(phy_get_ntx());
00807 pol->maccntrlinfo1 = 0;
00808 pol->maccntrlinfo2 = 0xFFFF0704;
00809 pol->ratecntrlinfo[0] = HW_RATE_1MBPS << MCS_INDEX_TX_RCX_OFT;
00810 pol->ratecntrlinfo[1] = 0;
00811 pol->ratecntrlinfo[2] = 0;
00812 pol->ratecntrlinfo[3] = 0;
00813 pol->powercntrlinfo[0] = TX_PWR_LEVEL_SET(nxmac_ofdm_max_pwr_level_getf());
00814 pol->powercntrlinfo[1] = 0;
00815 pol->powercntrlinfo[2] = 0;
00816 pol->powercntrlinfo[3] = 0;
00817
00818 txl_buffer_control_desc_bcmc[i].mac_control_info = EXPECTED_ACK_NO_ACK;
00819 txl_buffer_control_desc_bcmc[i].phy_control_info = 63 << GID_TX_OFT;
00820 }
00821 #endif
00822 }
00823
00824