@@ -37,337 +37,5 @@ interactive interface allows the simulation to be explored freely; users can
3737examine the signals both visually through numerous graphs, or by listening to
3838the test signals directly.
3939
40- ## Implementation
4140
42- Since our demonstration takes place purely in the digital domain, we
43- unfortunately cannot use real continuous time analog inputs and outputs.
44- Instead, we simulate the ADC-DAC processes in the discrete time domain. The
45- analog input and output are represented as discrete time signals with a high
46- sampling rate; at the time of writing, the maximum sampling rate supported
47- by WebAudio is 96 kHz.
4841
49- The ADC process consists of several steps, including antialiasing, sampling,
50- and quantization. All of these are simulated in our model: antialiasing is
51- achieved with a windowed sinc FIR lowpass filter of order specified by the
52- user; sampling is approximated by downsampling the input signal by an
53- integer factor; and quantization is achieved by multiplying the sampled
54- signal (which ranges from -1.0 to 1.0) by the maximum integer value possible
55- given the requested bit depth (e.g. 255 for a bit depth of 8 bits), and then
56- rounding every sample to the nearest integer. The DAC process is simulated
57- in turn by zero stuffing and lowpass filtering the sampled and quantized
58- output of the ADC simultion.
59-
60- In summary, the continuous time input is simulated by a 96 kHz discrete time
61- signal, the sampled output of the ADC process is simulated by a downsampled
62- and quantized signal, and the continuous time reconstruction output by the
63- DAC is simulated by upsampling the "sampled" signal back to 96 kHz. In our
64- tests we have found this model to be reasonable; many key concepts, such as
65- critical sampling, aliasing, and quantization noise are well represented in
66- our simulation.
67-
68- For more details, the reader is encouraged to peruse the rest of the source
69- code in this document. Many comments have been included to aid readers who
70- are unfamiliar with javascript. Any questions you may have about the
71- implementation of the simulation can only be definitively answered by
72- understanding the source code, but please feel free to contact the project
73- maintainers if you have any questions.
74-
75- ``` javascript
76- */
77-
78- // `renderWavesImpl` returns an anonymous function that is bound in the widget
79- // constructor. This is done in order to seperate the implementation of the
80- // simulation from the other implementation details so that this documentation
81- // can be more easily accessed.
82-
83- const soundTimeSeconds = 1.5 ;
84- const fadeTimeSeconds = 0.125 ;
85- function renderWavesImpl (settings , fft , p ) { return (playback = false ) => {
86-
87- // if we are not rendering for playback, we are rendering for simulation
88- let simulation = ! playback;
89-
90- // select the buffer to render to; playback buffer, or simulation buffer
91- var original = playback ? settings .original_pb : settings .original ;
92- var reconstructed = playback ? settings .reconstructed_pb : settings .reconstructed ;
93- var stuffed = settings .stuffed ;
94-
95- // calculate harmonics ------------------------------------------------------
96-
97- // The signal is generated using simple additive synthesis. Because of this,
98- // the exact frequency content of the signal can be determined a priori based
99- // on the settings. We generate this information here so that it can be used
100- // not only by the synthesis process below, but also by several of the graphs
101- // used to illustrate the frequency domain content of the signal.
102-
103- // We only calculate the harmonics for the simulation; it is assumed they will
104- // already have been calculated earlier when rendering for playback
105-
106- if (simulation) {
107- let harmonic_number = 1 ;
108- let harmonic_amplitude = 1 ;
109- let invert = 1 ;
110- let harmInc = (settings .harmType == " Odd" || settings .harmType == " Even" ) ? 2 : 1 ;
111-
112- for (let i = 0 ; simulation && i < settings .numHarm ; i++ ) {
113-
114- // the amplitude of each harmonic depends on the harmonic slope setting
115- if (settings .harmSlope == " lin" ) harmonic_amplitude = 1 - i/ settings .numHarm ;
116- else if (settings .harmSlope == " 1/x" ) harmonic_amplitude = 1 / harmonic_number;
117- else if (settings .harmSlope == " 1/x2" ) harmonic_amplitude = 1 / harmonic_number/ harmonic_number;
118- else if (settings .harmSlope == " flat" ) harmonic_amplitude = 1 ;
119-
120- // In case the harmonic slope is 1/x^2 and the harmonic type is "odd",
121- // by inverting every other harmonic we generate a nice triangle wave.
122- if (settings .harmSlope == " 1/x2" && settings .harmType == " Odd" ) {
123- harmonic_amplitude = harmonic_amplitude * invert;
124- invert *= - 1 ;
125- }
126-
127- // the frequency of each partial is a multiple of the fundamental frequency
128- settings .harmonicFreqs [i] = harmonic_number* settings .fundFreq ;
129-
130- // The harmonic amplitude is calculated above according to the harmonic
131- // slope setting, taking into account the special case for generating a
132- // triangle.
133- settings .harmonicAmps [i] = harmonic_amplitude;
134-
135- // With harmonic type set to "even" we want the fundamental and even
136- // harmonics. To achieve this, we increment the harmonic number by 1 after
137- // the fundamental and by 2 after every other partial.
138- if (i == 0 && settings .harmType == " Even" ) harmonic_number += 1 ;
139- else harmonic_number += harmInc;
140- }
141- }
142-
143- // render original wave -----------------------------------------------------
144-
145- // initialize the signal buffer with all zeros (silence)
146- original .fill (0 );
147-
148- // For the sample at time `n` in the signal buffer `original`,
149- // generate the sum of all the partials based on the previously calculated
150- // frequency and amplitude values.
151- original .forEach ( (_ , n , arr ) => {
152- for (let harmonic = 0 ; harmonic < settings .numHarm ; harmonic++ ) {
153-
154- let fundamental_frequency = settings .harmonicFreqs [0 ];
155- let frequency = settings .harmonicFreqs [harmonic];
156- let amplitude = settings .harmonicAmps [harmonic];
157-
158- // convert phase offset specified in degrees to radians
159- let phase_offset = Math .PI / 180 * settings .phase ;
160-
161- // adjust phase offset so that harmonics are shifted appropriately
162- let phase_offset_adjusted = phase_offset * frequency / fundamental_frequency;
163-
164- let radian_frequency = 2 * Math .PI * frequency;
165- let phase_increment = radian_frequency / WEBAUDIO_MAX_SAMPLERATE ;
166- let phase = phase_increment * n + phase_offset_adjusted;
167-
168- // accumulate the amplitude contribution from the current harmonic
169- arr[n] += amplitude * Math .sin ( phase );
170- }
171- });
172-
173- // linearly search for the maximum amplitude value (easy but not efficient)
174- let max = 0 ;
175- original .forEach ( (x , n , y ) => {if (x > max) max = x} );
176-
177- // normlize and apply amplitude scaling
178- original .forEach ( (x , n , y ) => y[n] = settings .amplitude * x / max );
179-
180- // apply antialiasing filter if applicable ----------------------------------
181-
182- // The antialiasing and reconstruction filters are generated using Fili.js.
183- // (https://github.com/markert/fili.js/)
184- let firCalculator = new Fili.FirCoeffs ();
185- // Fili uses the windowed sinc method to generate FIR lowpass filters.
186- // Like real antialiasing and reconstruction filters, the filters used in the
187- // simulation are not ideal brick wall filters, but approximations.
188-
189- // apply antialiasing only if the filter order is set
190- if (settings .antialiasing > 1 ) {
191-
192- // specify the filter parameters; Fs = sampling rate, Fc = cutoff frequency
193-
194- // The cutoff for the antialiasing filter is set to the Nyquist frequency
195- // of the simulated sampling process. The sampling rate of the "sampled"
196- // signal is WEBAUDIO_MAX_SAMPLERATE / the downsampling factor. This is
197- // divided by 2 to get the Nyquist frequency.
198- var filterCoeffs = firCalculator .lowpass (
199- { order: settings .antialiasing
200- , Fs: WEBAUDIO_MAX_SAMPLERATE
201- , Fc: (WEBAUDIO_MAX_SAMPLERATE / settings .downsamplingFactor ) / 2
202- });
203-
204- // generate the filter
205- var filter = new Fili.FirFilter (filterCoeffs);
206-
207- // apply the filter
208- original .forEach ( (x , n , y ) => y[n] = filter .singleStep (x) );
209-
210- // time shift the signal by half the filter order to compensate for the
211- // delay introduced by the FIR filter
212- original .forEach ( (x , i , arr ) => arr[i - settings .antialiasing / 2 ] = x );
213- }
214-
215- // downsample original wave -------------------------------------------------
216-
217- // zero initialize the reconstruction, and zero stuffed buffers
218- reconstructed .fill (0 );
219- stuffed .fill (0 );
220-
221- // generate new signal buffers for the downsampled signal and quantization
222- // noise whose sizes are initialized according to the currently set
223- // downsampling factor
224- if (playback) {
225- settings .downsampled_pb = new Float32Array (p .round (original .length / settings .downsamplingFactor ));
226- settings .quantNoise_pb = new Float32Array (p .round (original .length / settings .downsamplingFactor ));
227- } else {
228- settings .downsampled = new Float32Array (p .round (original .length / settings .downsamplingFactor ));
229- settings .quantNoise = new Float32Array (p .round (original .length / settings .downsamplingFactor ));
230- }
231- var downsampled = playback ? settings .downsampled_pb : settings .downsampled ;
232- var quantNoise = playback ? settings .quantNoise_pb : settings .quantNoise ;
233- var quantNoiseStuffed = settings .quantNoiseStuffed ;
234- quantNoiseStuffed .fill (0 );
235-
236- // calculate the maximum integer value representable with the given bit depth
237- let maxInt = p .pow (2 , settings .bitDepth ) - 1 ;
238-
239- let stepSize = (settings .quantType == " midTread" ) ? 2 / (maxInt- 1 ) : 2 / (maxInt);
240-
241- // generate the output of the simulated ADC process by "sampling" (actually
242- // just downsampling), and quantizing with dither. During this process, we
243- // also load the buffer for the reconstructed signal with the sampled values;
244- // this allows us to skip an explicit zero-stuffing step later
245-
246- downsampled .forEach ( (_ , n , arr ) => {
247-
248- // keep only every kth sample where k is the integer downsampling factor
249- let y = original[n * settings .downsamplingFactor ];
250- y = y > 1.0 ? 1.0 : y < - 1.0 ? - 1.0 : y; // apply clipping
251-
252- // if the bit depth is set to the maximum, we skip quantization and dither
253- if (settings .bitDepth == BIT_DEPTH_MAX ) {
254-
255- // record the sampled output of the ADC process
256- arr[n] = y;
257-
258- // sparsely fill the reconstruction and zero stuffed buffers to avoid
259- // having to explicitly zero-stuff
260- reconstructed[n * settings .downsamplingFactor ] = y;
261- stuffed[n * settings .downsamplingFactor ] = y * settings .downsamplingFactor ;
262- return ;
263- }
264-
265- // generate dither noise
266- let dither = (2 * Math .random () - 1 ) * settings .dither ;
267-
268- let quantized;
269- // Add dither signal and quantize. Constrain so we dont clip after dither
270- switch (settings .quantType ) {
271- case " midTread" :
272- quantized = stepSize* p .floor (p .constrain ((y+ dither),- 1 ,0.99 )/ stepSize + 0.5 );
273- break ;
274- case " midRise" :
275- quantized = stepSize* (p .floor (p .constrain ((y+ dither),- 1 ,0.99 )/ stepSize) + 0.5 );
276- break ;
277- }
278-
279- // record the sampled and quantized output of the ADC process with clipping
280- arr[n] = quantized;
281-
282-
283- // sparsely fill the reconstruction buffer to avoid having to zero-stuff
284- reconstructed[n * settings .downsamplingFactor ] = quantized;
285- stuffed[n * settings .downsamplingFactor ] = quantized * settings .downsamplingFactor ;
286-
287- // record the quantization error
288- quantNoise[n] = quantized - y;
289- quantNoiseStuffed[n * settings .downsamplingFactor ] = quantNoise[n];
290- });
291-
292- // render reconstructed wave by low pass filtering the zero stuffed array----
293-
294- // specify filter parameters; as before, the cutoff is set to the Nyquist
295- var filterCoeffs = firCalculator .lowpass (
296- { order: 200
297- , Fs: WEBAUDIO_MAX_SAMPLERATE
298- , Fc: (WEBAUDIO_MAX_SAMPLERATE / settings .downsamplingFactor ) / 2
299- });
300-
301- // generate the filter
302- var filter = new Fili.FirFilter (filterCoeffs);
303-
304- // apply the filter
305- reconstructed .forEach ( (x , n , arr ) => {
306- let y = filter .singleStep (x);
307-
308- // To retain the correct amplitude, we must multiply the output of the
309- // filter by the downsampling factor.
310- arr[n] = y * settings .downsamplingFactor ;
311- });
312-
313- // time shift the signal by half the filter order to compensate for the delay
314- // introduced by the FIR filter
315- reconstructed .forEach ( (x , n , arr ) => arr[n - 100 ] = x );
316-
317- // render FFTs --------------------------------------------------------------
318- // TODO: apply windows?
319-
320- // The FFTs of the signals at the various stages of the process are generated
321- // using fft.js (https://github.com/indutny/fft.js). The call to
322- // `realTransform()` performs the FFT, and the call to `completeSpectrum`
323- // fills the upper half of the spectrum, which is otherwise not calculated
324- // since it is a redundant reflection of the lower half of the spectrum.
325-
326- if (simulation) {
327- fft .realTransform (settings .originalFreq , original);
328- fft .completeSpectrum (settings .originalFreq );
329-
330- fft .realTransform (settings .stuffedFreq , stuffed)
331- fft .completeSpectrum (settings .reconstructedFreq );
332-
333- fft .realTransform (settings .reconstructedFreq , reconstructed)
334- fft .completeSpectrum (settings .reconstructedFreq );
335-
336- fft .realTransform (settings .quantNoiseFreq , quantNoiseStuffed)
337- fft .completeSpectrum (settings .quantNoiseFreq );
338- }
339-
340- // fade in and out and suppress clipping distortions ------------------------
341-
342- // Audio output is windowed to prevent pops. The envelope is a simple linear
343- // ramp up at the beginning and linear ramp down at the end.
344-
345- if (playback) {
346- // This normalization makes sure the original signal isn't clipped.
347- // The output is clipped during the simulation, so this may reduce its peak
348- // amplitude a bit, but since the clipping adds distortion the perceived
349- // loudness is relatively the same as the original signal in my testing.
350- let normalize = settings .amplitude > 1.0 ? settings .amplitude : 1.0 ;
351-
352- // Define the fade function
353- let fade = (_ , n , arr ) => {
354- let fadeTimeSamps = Math .min (fadeTimeSeconds * WEBAUDIO_MAX_SAMPLERATE , arr .length / 2 );
355- // The conditional ensures there is a fade even if the fade time is longer than the signal
356- if (n < fadeTimeSamps)
357- arr[n] = (n / fadeTimeSamps) * arr[n] / normalize;
358- else if (n > arr .length - fadeTimeSamps)
359- arr[n] = ((arr .length - n) / fadeTimeSamps) * arr[n] / normalize;
360- else arr[n] = arr[n] / normalize;
361- };
362-
363- // Apply the fade function
364- original .forEach (fade);
365- reconstructed .forEach (fade);
366- quantNoise .forEach (fade);
367- }
368-
369-
370- }}
371- /*
372- ```
373- */
0 commit comments