2 * Copyright (c) 2016 Andriy Voskoboinyk <avos@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
34 #include <sys/mutex.h>
36 #include <sys/kernel.h>
37 #include <sys/socket.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/taskqueue.h>
43 #include <sys/endian.h>
44 #include <sys/linker.h>
47 #include <net/ethernet.h>
48 #include <net/if_media.h>
50 #include <net80211/ieee80211_var.h>
51 #include <net80211/ieee80211_radiotap.h>
53 #include <dev/rtwn/if_rtwnreg.h>
54 #include <dev/rtwn/if_rtwnvar.h>
56 #include <dev/rtwn/if_rtwn_ridx.h>
58 #include <dev/rtwn/rtl8812a/r12a.h>
59 #include <dev/rtwn/rtl8812a/r12a_tx_desc.h>
63 r12a_get_primary_channel(struct rtwn_softc *sc, struct ieee80211_channel *c)
66 if (IEEE80211_IS_CHAN_HT40U(c))
67 return (R12A_TXDW5_PRIM_CHAN_20_80_2);
69 return (R12A_TXDW5_PRIM_CHAN_20_80_3);
73 r12a_tx_set_ht40(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni)
75 struct r12a_tx_desc *txd = (struct r12a_tx_desc *)buf;
78 if (ni->ni_chan != IEEE80211_CHAN_ANYC &&
79 IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
82 prim_chan = r12a_get_primary_channel(sc, ni->ni_chan);
83 txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_BW,
84 R12A_TXDW5_DATA_BW40));
85 txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_PRIM_CHAN,
91 r12a_tx_protection(struct rtwn_softc *sc, struct r12a_tx_desc *txd,
92 enum ieee80211_protmode mode, uint8_t ridx)
94 struct ieee80211com *ic = &sc->sc_ic;
98 case IEEE80211_PROT_CTSONLY:
99 txd->txdw3 |= htole32(R12A_TXDW3_CTS2SELF);
101 case IEEE80211_PROT_RTSCTS:
102 txd->txdw3 |= htole32(R12A_TXDW3_RTSEN);
108 if (mode == IEEE80211_PROT_CTSONLY ||
109 mode == IEEE80211_PROT_RTSCTS) {
110 if (ridx >= RTWN_RIDX_HT_MCS(0))
111 rate = rtwn_ctl_mcsrate(ic->ic_rt, ridx);
113 rate = ieee80211_ctl_rate(ic->ic_rt, ridx2rate[ridx]);
114 ridx = rate2ridx(IEEE80211_RV(rate));
116 txd->txdw4 |= htole32(SM(R12A_TXDW4_RTSRATE, ridx));
117 /* RTS rate fallback limit (max). */
118 txd->txdw4 |= htole32(SM(R12A_TXDW4_RTSRATE_FB_LMT, 0xf));
120 if (RTWN_RATE_IS_CCK(ridx) && ridx != RTWN_RIDX_CCK1 &&
121 (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
122 txd->txdw5 |= htole32(R12A_TXDW5_RTS_SHORT);
127 r12a_tx_raid(struct rtwn_softc *sc, struct r12a_tx_desc *txd,
128 struct ieee80211_node *ni, int ismcast)
130 struct ieee80211com *ic = &sc->sc_ic;
131 struct ieee80211vap *vap = ni->ni_vap;
132 struct ieee80211_channel *chan;
133 enum ieee80211_phymode mode;
136 chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ?
137 ni->ni_chan : ic->ic_curchan;
138 mode = ieee80211_chan2mode(chan);
140 /* NB: group addressed frames are done at 11bg rates for now */
141 if (ismcast || !(ni->ni_flags & IEEE80211_NODE_HT)) {
143 case IEEE80211_MODE_11A:
144 case IEEE80211_MODE_11B:
145 case IEEE80211_MODE_11G:
147 case IEEE80211_MODE_11NA:
148 mode = IEEE80211_MODE_11A;
150 case IEEE80211_MODE_11NG:
151 mode = IEEE80211_MODE_11G;
154 device_printf(sc->sc_dev, "unknown mode(1) %d!\n",
161 case IEEE80211_MODE_11A:
162 raid = R12A_RAID_11G;
164 case IEEE80211_MODE_11B:
165 raid = R12A_RAID_11B;
167 case IEEE80211_MODE_11G:
168 if (vap->iv_flags & IEEE80211_F_PUREG)
169 raid = R12A_RAID_11G;
171 raid = R12A_RAID_11BG;
173 case IEEE80211_MODE_11NA:
174 if (sc->ntxchains == 1)
175 raid = R12A_RAID_11GN_1;
177 raid = R12A_RAID_11GN_2;
179 case IEEE80211_MODE_11NG:
180 if (sc->ntxchains == 1) {
181 if (IEEE80211_IS_CHAN_HT40(chan))
182 raid = R12A_RAID_11BGN_1_40;
184 raid = R12A_RAID_11BGN_1;
186 if (IEEE80211_IS_CHAN_HT40(chan))
187 raid = R12A_RAID_11BGN_2_40;
189 raid = R12A_RAID_11BGN_2;
193 /* TODO: 80 MHz / 11ac */
194 device_printf(sc->sc_dev, "unknown mode(2) %d!\n", mode);
198 txd->txdw1 |= htole32(SM(R12A_TXDW1_RAID, raid));
202 r12a_tx_set_sgi(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni)
204 struct r12a_tx_desc *txd = (struct r12a_tx_desc *)buf;
205 struct ieee80211vap *vap = ni->ni_vap;
207 if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) && /* HT20 */
208 (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20))
209 txd->txdw5 |= htole32(R12A_TXDW5_DATA_SHORT);
210 else if (ni->ni_chan != IEEE80211_CHAN_ANYC && /* HT40 */
211 IEEE80211_IS_CHAN_HT40(ni->ni_chan) &&
212 (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) &&
213 (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40))
214 txd->txdw5 |= htole32(R12A_TXDW5_DATA_SHORT);
218 r12a_tx_set_ldpc(struct rtwn_softc *sc, struct r12a_tx_desc *txd,
219 struct ieee80211_node *ni)
221 struct ieee80211vap *vap = ni->ni_vap;
223 if ((vap->iv_flags_ht & IEEE80211_FHT_LDPC_TX) &&
224 (ni->ni_htcap & IEEE80211_HTCAP_LDPC))
225 txd->txdw5 |= htole32(R12A_TXDW5_DATA_LDPC);
229 r12a_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
230 struct mbuf *m, void *buf, uint8_t ridx, int maxretry)
232 struct ieee80211com *ic = &sc->sc_ic;
233 struct ieee80211vap *vap = ni->ni_vap;
234 struct rtwn_vap *uvp = RTWN_VAP(vap);
235 struct ieee80211_frame *wh;
236 struct r12a_tx_desc *txd;
237 enum ieee80211_protmode prot;
238 uint8_t type, tid, qos, qsel;
239 int hasqos, ismcast, macid;
241 wh = mtod(m, struct ieee80211_frame *);
242 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
243 hasqos = IEEE80211_QOS_HAS_SEQ(wh);
244 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
246 /* Select TX ring for this frame. */
248 qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
249 tid = qos & IEEE80211_QOS_TID;
255 /* Fill Tx descriptor. */
256 txd = (struct r12a_tx_desc *)buf;
257 txd->flags0 |= R12A_FLAGS0_LSG | R12A_FLAGS0_FSG;
259 txd->flags0 |= R12A_FLAGS0_BMCAST;
262 /* Unicast frame, check if an ACK is expected. */
263 if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
264 IEEE80211_QOS_ACKPOLICY_NOACK) {
265 txd->txdw4 = htole32(R12A_TXDW4_RETRY_LMT_ENA);
266 txd->txdw4 |= htole32(SM(R12A_TXDW4_RETRY_LMT,
270 struct rtwn_node *un = RTWN_NODE(ni);
273 if (type == IEEE80211_FC0_TYPE_DATA) {
274 qsel = tid % RTWN_MAX_TID;
276 if (m->m_flags & M_AMPDU_MPDU) {
277 txd->txdw2 |= htole32(R12A_TXDW2_AGGEN);
278 txd->txdw2 |= htole32(SM(R12A_TXDW2_AMPDU_DEN,
279 vap->iv_ampdu_density));
280 txd->txdw3 |= htole32(SM(R12A_TXDW3_MAX_AGG,
283 txd->txdw2 |= htole32(R12A_TXDW2_AGGBK);
285 if (sc->sc_ratectl == RTWN_RATECTL_NET80211) {
286 txd->txdw2 |= htole32(R12A_TXDW2_SPE_RPT);
287 sc->sc_tx_n_active++;
290 if (RTWN_RATE_IS_CCK(ridx) && ridx != RTWN_RIDX_CCK1 &&
291 (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
292 txd->txdw5 |= htole32(R12A_TXDW5_DATA_SHORT);
294 prot = IEEE80211_PROT_NONE;
295 if (ridx >= RTWN_RIDX_HT_MCS(0)) {
296 r12a_tx_set_ht40(sc, txd, ni);
297 r12a_tx_set_sgi(sc, txd, ni);
298 r12a_tx_set_ldpc(sc, txd, ni);
299 prot = ic->ic_htprotmode;
300 } else if (ic->ic_flags & IEEE80211_F_USEPROT)
301 prot = ic->ic_protmode;
303 /* XXX fix last comparison for A-MSDU (in net80211) */
305 if (m->m_pkthdr.len + IEEE80211_CRC_LEN >
306 vap->iv_rtsthreshold &&
307 vap->iv_rtsthreshold != IEEE80211_RTS_MAX)
308 prot = IEEE80211_PROT_RTSCTS;
310 if (prot != IEEE80211_PROT_NONE)
311 r12a_tx_protection(sc, txd, prot, ridx);
312 } else /* IEEE80211_FC0_TYPE_MGT */
313 qsel = R12A_TXDW1_QSEL_MGNT;
315 macid = RTWN_MACID_BC;
316 qsel = R12A_TXDW1_QSEL_MGNT;
319 txd->txdw1 |= htole32(SM(R12A_TXDW1_QSEL, qsel));
320 txd->txdw1 |= htole32(SM(R12A_TXDW1_MACID, macid));
321 txd->txdw4 |= htole32(SM(R12A_TXDW4_DATARATE, ridx));
322 /* Data rate fallback limit (max). */
323 txd->txdw4 |= htole32(SM(R12A_TXDW4_DATARATE_FB_LMT, 0x1f));
324 /* XXX recheck for non-21au */
325 txd->txdw6 |= htole32(SM(R21A_TXDW6_MBSSID, uvp->id));
326 r12a_tx_raid(sc, txd, ni, ismcast);
328 /* Force this rate if needed. */
329 if (sc->sc_ratectl != RTWN_RATECTL_FW)
330 txd->txdw3 |= htole32(R12A_TXDW3_DRVRATE);
333 /* Use HW sequence numbering for non-QoS frames. */
334 txd->txdw8 |= htole32(R12A_TXDW8_HWSEQ_EN);
335 txd->txdw3 |= htole32(SM(R12A_TXDW3_SEQ_SEL, uvp->id));
339 if (m->m_flags & M_AMPDU_MPDU) {
340 seqno = ni->ni_txseqs[tid];
341 ni->ni_txseqs[tid]++;
343 seqno = M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE;
345 /* Set sequence number. */
346 txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ, seqno));
351 r12a_fill_tx_desc_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
352 struct mbuf *m, void *buf, const struct ieee80211_bpf_params *params)
354 struct ieee80211vap *vap = ni->ni_vap;
355 struct rtwn_vap *uvp = RTWN_VAP(vap);
356 struct ieee80211_frame *wh;
357 struct r12a_tx_desc *txd;
361 /* XXX TODO: 11n checks, matching rtwn_fill_tx_desc() */
363 wh = mtod(m, struct ieee80211_frame *);
364 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
365 ridx = rate2ridx(params->ibp_rate0);
367 /* Fill Tx descriptor. */
368 txd = (struct r12a_tx_desc *)buf;
369 txd->flags0 |= R12A_FLAGS0_LSG | R12A_FLAGS0_FSG;
371 txd->flags0 |= R12A_FLAGS0_BMCAST;
373 if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) {
374 txd->txdw4 = htole32(R12A_TXDW4_RETRY_LMT_ENA);
375 txd->txdw4 |= htole32(SM(R12A_TXDW4_RETRY_LMT,
378 if (params->ibp_flags & IEEE80211_BPF_RTS)
379 r12a_tx_protection(sc, txd, IEEE80211_PROT_RTSCTS, ridx);
380 if (params->ibp_flags & IEEE80211_BPF_CTS)
381 r12a_tx_protection(sc, txd, IEEE80211_PROT_CTSONLY, ridx);
383 txd->txdw1 |= htole32(SM(R12A_TXDW1_MACID, RTWN_MACID_BC));
384 txd->txdw1 |= htole32(SM(R12A_TXDW1_QSEL, R12A_TXDW1_QSEL_MGNT));
386 /* Set TX rate index. */
387 txd->txdw4 |= htole32(SM(R12A_TXDW4_DATARATE, ridx));
388 txd->txdw4 |= htole32(SM(R12A_TXDW4_DATARATE_FB_LMT, 0x1f));
389 txd->txdw6 |= htole32(SM(R21A_TXDW6_MBSSID, uvp->id));
390 txd->txdw3 |= htole32(R12A_TXDW3_DRVRATE);
391 r12a_tx_raid(sc, txd, ni, ismcast);
393 if (!IEEE80211_QOS_HAS_SEQ(wh)) {
394 /* Use HW sequence numbering for non-QoS frames. */
395 txd->txdw8 |= htole32(R12A_TXDW8_HWSEQ_EN);
396 txd->txdw3 |= htole32(SM(R12A_TXDW3_SEQ_SEL, uvp->id));
398 /* Set sequence number. */
399 txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ,
400 M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE));
405 r12a_fill_tx_desc_null(struct rtwn_softc *sc, void *buf, int is11b, int qos,
408 struct r12a_tx_desc *txd = (struct r12a_tx_desc *)buf;
410 txd->flags0 = R12A_FLAGS0_FSG | R12A_FLAGS0_LSG | R12A_FLAGS0_OWN;
411 txd->txdw1 = htole32(
412 SM(R12A_TXDW1_QSEL, R12A_TXDW1_QSEL_MGNT));
414 txd->txdw3 = htole32(R12A_TXDW3_DRVRATE);
415 txd->txdw6 = htole32(SM(R21A_TXDW6_MBSSID, id));
417 txd->txdw4 = htole32(SM(R12A_TXDW4_DATARATE,
420 txd->txdw4 = htole32(SM(R12A_TXDW4_DATARATE,
425 txd->txdw8 = htole32(R12A_TXDW8_HWSEQ_EN);
426 txd->txdw3 |= htole32(SM(R12A_TXDW3_SEQ_SEL, id));
432 r12a_tx_radiotap_flags(const void *buf)
434 const struct r12a_tx_desc *txd = buf;
437 if (!(txd->txdw5 & htole32(R12A_TXDW5_DATA_SHORT)))
440 rate = MS(le32toh(txd->txdw4), R12A_TXDW4_DATARATE);
441 if (RTWN_RATE_IS_CCK(rate))
442 flags = IEEE80211_RADIOTAP_F_SHORTPRE;
444 flags = IEEE80211_RADIOTAP_F_SHORTGI;