Skip to content

Commit d2cdb41

Browse files
authored
Merge pull request #1 from ctarver/conjugate_branch
Conjugate branch
2 parents 8034f87 + bd1cc5a commit d2cdb41

4 files changed

Lines changed: 69 additions & 10 deletions

File tree

ILA_DPD.m

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@
3636
nIterations % Number of iterations used in the ILA learning
3737
block_size % Block size used for each iteration in the learning
3838
coeffs % DPD coefficients
39+
use_conj % Use a conjugate branch as well
40+
use_dc_term % use a dc term
3941
end
4042

4143
methods
@@ -47,6 +49,8 @@
4749
params.memory_depth = 3;
4850
params.nIterations = 3;
4951
params.block_size = 50000;
52+
params.use_conj = 0;
53+
params.use_dc_term = 0;
5054
end
5155

5256
if mod(params.order, 2) == 0
@@ -58,6 +62,9 @@
5862
obj.nIterations = params.nIterations;
5963
obj.block_size = params.block_size;
6064

65+
obj.use_conj = params.use_conj;
66+
obj.use_dc_term = params.use_dc_term;
67+
6168
% Start DPD coeffs being completely linear (no effect)
6269
obj.coeffs = zeros(obj.convert_order_to_number_of_coeffs, obj.memory_depth);
6370
obj.coeffs(1) = 1;
@@ -137,6 +144,7 @@ function perform_learning(obj, x, pa)
137144
number_of_basis_vectors = obj.memory_depth * obj.convert_order_to_number_of_coeffs;
138145
X = zeros(length(x), number_of_basis_vectors);
139146

147+
% Main branch
140148
count = 1;
141149
for i = 1:2:obj.order
142150
branch = x .* abs(x).^(i-1);
@@ -147,6 +155,24 @@ function perform_learning(obj, x, pa)
147155
count = count + 1;
148156
end
149157
end
158+
159+
if obj.use_conj
160+
% Conjugate branch
161+
for i = 1:2:obj.order
162+
branch = conj(x) .* abs(x).^(i-1);
163+
for j = 1:obj.memory_depth
164+
delayed_version = zeros(size(branch));
165+
delayed_version(j:end) = branch(1:end - j + 1);
166+
X(:, count) = delayed_version;
167+
count = count + 1;
168+
end
169+
end
170+
end
171+
172+
% DC
173+
if obj.use_dc_term
174+
X(:, count) = 1;
175+
end
150176
end
151177

152178

@@ -158,7 +184,16 @@ function perform_learning(obj, x, pa)
158184
if nargin == 1
159185
order = obj.order;
160186
end
187+
161188
number_of_coeffs = (order + 1) / 2;
189+
190+
if obj.use_conj
191+
number_of_coeffs = 2 * number_of_coeffs;
192+
end
193+
194+
if obj.use_dc_term
195+
number_of_coeffs = number_of_coeffs + 1;
196+
end
162197
end
163198

164199

README.md

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,3 +44,12 @@ The DPD is a nonlinear function that approximates an inverse of the PA's nonline
4444
One strong advantage of this model is that the FIR filter and hence any coefficients of the model are after their corresponding nonlinearities. This means the output of the model can be expressed as a linear system, *y = X b*. Here, X is a matrix where each column is a different nonlinear branch of the form *x|x|^{i-1}* where *i* is the order which can only be odd, and *x* is the input signal. If we have input/output samples, this can be a linear least-squares regression problem since the model coefficients are linear with regards to the input. Here we want to *minimize_b || y - X b ||* for some experimental *x* and *y.* This minimizes the sum of squared residuals and gives us the best fit.
4545

4646
The DPD can't easily be modeled in this method directly because we don't know the desired predistorter output. We just know the desired PA output, actual PA output, and the original input signal. The indirect learning architecture allows us to circumvent this. When fully converged, the PA output should be linear, and so the input to the pre and post distorters would be equivalent, their outputs would be the same, and the error signal would be zero. When training, we use the postdistorter. We want the output of the postdistorter to be equal to the output of the predistorter so that there is no error. We can run this to train and get some PA output signal. Then for the postdistorter, we have input sample (the PA ouput) and a desired postdistorter output (the predistorter output). We can start with some DPD coefficients (such as a pure linear DPD) then perform a LS fit to find the best coefficients to fit the postdistorter. We copy this to the predistorter and repeat for a few iterations.
47+
48+
## References:
49+
For the conjugate branch:
50+
```
51+
L. Anttila, P. Handel and M. Valkama, "Joint Mitigation of Power Amplifier and I/Q Modulator Impairments in Broadband Direct-Conversion Transmitters," in IEEE Transactions on Microwave Theory and Techniques, vol. 58, no. 4, pp. 730-739, April 2010.
52+
doi: 10.1109/TMTT.2010.2041579
53+
keywords: {modulators;power amplifiers;radio transmitters;telecommunication channels;broadband direct-conversion transmitters;frequency-dependent power amplifier;I/Q modulator impairments;direct-conversion radio transmitters;extended parallel Hammerstein structure;parameter estimation stage;indirect learning architecture;adjacent channel power ratio;Broadband amplifiers;Power amplifiers;Radio transmitters;Digital modulation;Predistortion;Local oscillators;Wideband;Nonlinear distortion;Radiofrequency amplifiers;Frequency estimation;Digital predistortion (PD);direct-conversion radio;in-phase and quadrature (I/Q) imbalance;I/Q modulator;local oscillator (LO) leakage;mirror-frequency interference (MFI);power amplifier (PA);spectral regrowth},
54+
URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=5431085&isnumber=5446455
55+
```

example.m

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@
66
addpath(genpath('WARPLab-Matlab-Wrapper'))
77
addpath(genpath('Power-Amplifier-Model'))
88

9-
rms_input = 0.20;
9+
rms_input = 0.50;
1010

1111
% Setup the PA simulator or TX board
12-
PA_board = 'WARP'; % either 'WARP', 'webRF', or 'none'
12+
PA_board = 'webRF'; % either 'WARP', 'webRF', or 'none'
1313
switch PA_board
1414
case 'WARP'
1515
warp_params.nBoards = 1; % Number of boards
@@ -20,12 +20,13 @@
2020
board = PowerAmplifier(7, 4);
2121
Fs = 40e6; % WARP board sampling rate.
2222
case 'webRF'
23-
board = webRF();
23+
dbm_power = -26;
24+
board = webRF(dbm_power);
2425
Fs = 200e6; % webRF sampling rate.
2526
end
2627

2728
% Setup OFDM
28-
ofdm_params.nSubcarriers = 300;
29+
ofdm_params.nSubcarriers = 600;
2930
ofdm_params.subcarrier_spacing = 15e3; % 15kHz subcarrier spacing
3031
ofdm_params.constellation = 'QPSK';
3132
ofdm_params.cp_length = 144; % Number of samples in cyclic prefix.
@@ -38,21 +39,32 @@
3839
tx_data = normalize_for_pa(upsampled_tx_data, rms_input);
3940

4041
% Setup DPD
41-
dpd_params.order = 7;
42-
dpd_params.memory_depth = 3;
43-
dpd_params.nIterations = 3;
42+
dpd_params.order = 11;
43+
dpd_params.memory_depth = 4;
44+
dpd_params.nIterations = 2;
4445
dpd_params.block_size = 50000;
46+
47+
dpd_params.use_conj = 1;
48+
dpd_params.use_dc_term = 1;
49+
conj_dpd = ILA_DPD(dpd_params);
50+
51+
dpd_params.use_conj = 0;
52+
dpd_params.use_dc_term = 0;
4553
dpd = ILA_DPD(dpd_params);
4654

4755
%% Run Expierement
4856
w_out_dpd = board.transmit(tx_data);
57+
conj_dpd.perform_learning(tx_data, board);
58+
w_conj_dpd = board.transmit(conj_dpd.predistort(tx_data));
59+
4960
dpd.perform_learning(tx_data, board);
5061
w_dpd = board.transmit(dpd.predistort(tx_data));
5162

5263
%% Plot
5364
plot_results('psd', 'Original TX signal', tx_data, 40e6)
5465
plot_results('psd', 'No DPD', w_out_dpd, 40e6)
55-
plot_results('psd', 'With DPD', w_dpd, 40e6)
66+
plot_results('psd', 'With Normal DPD', w_dpd, 40e6)
67+
plot_results('psd', 'With Conjug DPD', w_conj_dpd, 40e6)
5668

5769
%% Some helper functions
5870
function out = up_sample(in, Fs, sampling_rate)

webRF.m

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,12 @@
1111
end
1212

1313
methods
14-
function obj = webRF()
14+
function obj = webRF(dbm_power)
1515
%webRF Construct an instance of this class
16-
obj.RMSin = -21;
16+
if nargin == 0
17+
dbm_power = -24;
18+
end
19+
obj.RMSin = dbm_power;
1720
obj.synchronization.sub_sample = 1;
1821
end
1922

0 commit comments

Comments
 (0)