# BUILD 15/9
FFTAudioProcessor.java
```java
**
* Created by Nhien Nguyen on 5/5/2022
* <p>
* An audio processor which forwards the input to the output,
* but also takes the input and executes a Fast-Fourier Transformation (FFT) on it.
* The results of this transformation is an array of frequencies with their amplitudes,
* which will be forwarded to the listener<br>
* The sequence diagram is apply in Exoplayer 2.16.1
* <p>
* <p>
* <center><object style="width: 480px; height: 190px;" type="image/jpeg"
* <img
* src="https://i.imgur.com/Bjcr9dy.jpg" style="width: 480px; height: 190px"
* alt="FFTAudioProcessor Sequence diagram"></object></center>
* <p>
*/
@SuppressWarnings("FieldCanBeLocal")
public class FFTAudioProcessor implements AudioProcessor {
public static final String TAG = "FFTAudioProcessor";
private static final int HEADER_LENGTH = 44;
public static int SAMPLE_SIZE = 4096; //Default 4096 -> The higher SAMPLE_SIZE value, the slower data return
public final long EXO_MIN_BUFFER_DURATION_US = 250000L;
public final long EXO_MAX_BUFFER_DURATION_US = 750000L;
public final long EXO_BUFFER_MULTIPLICATION_FACTOR = 4;
private final boolean isDebugging = false;
private final byte[] scratchBuffer = new byte[1024];
private final ByteBuffer scratchByteBuffer = ByteBuffer.wrap(scratchBuffer).order(ByteOrder.LITTLE_ENDIAN);
public int BUFFER_EXTRA_SIZE = SAMPLE_SIZE * 8; //lossless 8 mp3 2
public FFTAudioProcessor.FFTListener listener;
private byte[] mTempByteArray = new byte[SAMPLE_SIZE * 2];
private float[] mSrc = new float[SAMPLE_SIZE];
private float[] mDst = new float[SAMPLE_SIZE + 2];
private float[] fft;
private byte[] tempBufferArray;
private Noise mNoise;
private ByteBuffer mProcessBuffer;
private ByteBuffer mFftBuffer;
private ByteBuffer mOutputBuffer;
private ByteBuffer mSrcBuffer;
private int mSrcBufferPosition = 0;
private int mAudioTrackBufferSize = 0;
private int mChannelCount;
private int mSampleRateHz;
private int mEncoding = 0;
private int mBytesToProcess = SAMPLE_SIZE * 2;
private int position;
private int limit;
private int frameCount;
private int singleChannelOutputSize; //UNUSED YET
private int outputSize;
private boolean mIsActive;
private boolean wasActive;
private boolean mInputEnded;
private long mFFTProcessTimeMs;
// Fields for legacy queueInput
private int bytesWritten;
private @Nullable
RandomAccessFile randomAccessFile;
private int counter;
private final TeeAudioProcessor.AudioBufferSink audioBufferSink = new TeeAudioProcessor.WavFileAudioBufferSink("safeBufferSink");
public FFTAudioProcessor() {
mProcessBuffer = AudioProcessor.EMPTY_BUFFER;
mFftBuffer = AudioProcessor.EMPTY_BUFFER;
mOutputBuffer = AudioProcessor.EMPTY_BUFFER;
mChannelCount = Format.NO_VALUE;
mSampleRateHz = Format.NO_VALUE;
}
private int getDefaultBufferSizeInBytes() {
if (isDebugging) Log.i(TAG, "getDefaultBufferSizeInBytes");
int outputPcmFrameSize = Util.getPcmFrameSize(mEncoding, mChannelCount);
int minBufferSize = AudioTrack.getMinBufferSize(mSampleRateHz, Util.getAudioTrackChannelConfig(mChannelCount), mEncoding);
Assertions.checkState(minBufferSize != ERROR_BAD_VALUE);
int multipliedBufferSize = (int) (minBufferSize * EXO_BUFFER_MULTIPLICATION_FACTOR);
int minAppBufferSize = (int) durationUsToFrames(EXO_MIN_BUFFER_DURATION_US) * outputPcmFrameSize;
long maxAppBufferSize = Math.max(minBufferSize, (durationUsToFrames(EXO_MAX_BUFFER_DURATION_US) * outputPcmFrameSize));
int bufferSizeInFrames = (int) (Util.constrainValue(multipliedBufferSize, minAppBufferSize, maxAppBufferSize) / outputPcmFrameSize);
return bufferSizeInFrames * outputPcmFrameSize;
}
private long durationUsToFrames(long durationUs) {
return durationUs * mSampleRateHz / C.MICROS_PER_SECOND;
}
@Override
public boolean configure(int sampleRateHz, int channelCount, int encoding) throws UnhandledFormatException {
if (isDebugging) Log.i(TAG, "Configure");
if (encoding != C.ENCODING_PCM_16BIT) {
throw new UnhandledFormatException(sampleRateHz, channelCount, encoding);
}
mSampleRateHz = sampleRateHz;
mChannelCount = channelCount;
mEncoding = encoding;
wasActive = mIsActive;
mIsActive = true;
mNoise = Noise.real(SAMPLE_SIZE);
mAudioTrackBufferSize = getDefaultBufferSizeInBytes();
mSrcBuffer = ByteBuffer.allocate(mAudioTrackBufferSize + BUFFER_EXTRA_SIZE);
Log.i("FFTProcessorDetail", toString());
return !wasActive;
}
@Override
public boolean isActive() {
return mIsActive;
}
@Override
public int getOutputChannelCount() {
return mChannelCount;
}
@Override
public int getOutputEncoding() {
return mEncoding;
}
@Override
public int getOutputSampleRateHz() {
return mSampleRateHz;
}
@Override
public void queueInput(ByteBuffer inputBuffer) {
if (isDebugging) Log.i(TAG, "queueInput");
try {
int remaining = inputBuffer.remaining();
if (remaining == 0) return;
position = inputBuffer.position();
limit = inputBuffer.limit();
frameCount = (limit - position) / (2 * mChannelCount);
singleChannelOutputSize = frameCount * 2;
outputSize = frameCount * mChannelCount * 2;
if (mProcessBuffer.capacity() < outputSize) {
mProcessBuffer = ByteBuffer.allocateDirect(outputSize).order(ByteOrder.nativeOrder());
} else {
mProcessBuffer.clear();
}
if (mFftBuffer.capacity() < remaining) {
mFftBuffer = ByteBuffer.allocateDirect(remaining).order(ByteOrder.nativeOrder());
} else {
mFftBuffer.clear();
}
while (position < limit) {
int summedUp = 0;
for (int channelIndex = 0; channelIndex < mChannelCount; channelIndex++) {
short current = inputBuffer.getShort(position + 2 * channelIndex);
mProcessBuffer.putShort(current);
summedUp += current;
}
mFftBuffer.putShort((short) (summedUp / mChannelCount));
position += mChannelCount * 2;
}
inputBuffer.position(limit);
processFFT(mFftBuffer);
mProcessBuffer.flip();
mOutputBuffer = this.mProcessBuffer;
} catch (Exception e){
e.printStackTrace();
safeQueueInput(inputBuffer);
}
}
private void safeQueueInput(ByteBuffer buffer) {
int remaining = buffer.remaining();
if (remaining == 0) {
return;
}
audioBufferSink.handleBuffer(buffer.asReadOnlyBuffer());
if (this.mProcessBuffer.capacity() < remaining) {
this.mProcessBuffer = ByteBuffer.allocateDirect(remaining).order(ByteOrder.nativeOrder());
} else {
this.mProcessBuffer.clear();
}
this.mProcessBuffer.put(buffer);
this.mProcessBuffer.flip();
mOutputBuffer = this.mProcessBuffer;
}
private void processFFT(ByteBuffer buffer) {
if (isDebugging) Log.i(TAG, "processFFT");
if (listener == null) {
return;
}
try {
mSrcBuffer.put(buffer.array());
mSrcBufferPosition += buffer.array().length;
mBytesToProcess = SAMPLE_SIZE * 2;
Byte currentByte = null;
while (mSrcBufferPosition > mAudioTrackBufferSize) {
mSrcBuffer.position(0);
mSrcBuffer.get(mTempByteArray, 0, mBytesToProcess);
for (int idx = 0; idx < mTempByteArray.length; idx++) {
if (currentByte == null) {
currentByte = mTempByteArray[idx];
} else {
mSrc[idx / 2] = ((float) currentByte * Byte.MAX_VALUE + mTempByteArray[idx]) / (Byte.MAX_VALUE * Byte.MAX_VALUE);
mDst[idx / 2] = 0f;
currentByte = null;
}
}
tempBufferArray = new byte[mSrcBuffer.remaining()];
Arrays.fill(tempBufferArray, (byte) 0);
mSrcBuffer.get(tempBufferArray);
mSrcBuffer.position(mBytesToProcess);
mSrcBuffer.compact();
mSrcBufferPosition -= mBytesToProcess;
mSrcBuffer.position(mSrcBufferPosition);
fft = mNoise.fft(mSrc, mDst);
listener.onFFTReady(mSampleRateHz, mChannelCount, fft);
if (isDebugging)
Log.i("FFTProcessTime", "Process in " + (System.currentTimeMillis() - mFFTProcessTimeMs) + " ms");
mFFTProcessTimeMs = System.currentTimeMillis();
}
} catch (Exception e) {
e.printStackTrace();
}
}
@Override
public void queueEndOfStream() {
mInputEnded = true;
mProcessBuffer = AudioProcessor.EMPTY_BUFFER;
}
@Override
public ByteBuffer getOutput() {
ByteBuffer outputBuffer = this.mOutputBuffer;
this.mOutputBuffer = AudioProcessor.EMPTY_BUFFER;
return outputBuffer;
}
@Override
public boolean isEnded() {
return mInputEnded && mProcessBuffer == AudioProcessor.EMPTY_BUFFER;
}
@Override
public void flush() {
mOutputBuffer = AudioProcessor.EMPTY_BUFFER;
mInputEnded = false;
audioBufferSink.flush(mSampleRateHz, mChannelCount, mEncoding);
}
@Override
public void reset() {
if (isDebugging) Log.i(TAG, "reset");
flush();
mProcessBuffer = AudioProcessor.EMPTY_BUFFER;
mFftBuffer = AudioProcessor.EMPTY_BUFFER;
mOutputBuffer = AudioProcessor.EMPTY_BUFFER;
mEncoding = Format.NO_VALUE;
mChannelCount = Format.NO_VALUE;
mSampleRateHz = Format.NO_VALUE;
}
public void closeInstance() {
mNoise.close();
}
public void showToast(Context context, String msg) {
Toast.makeText(context, msg, Toast.LENGTH_SHORT).show();
}
@NonNull
@Override
public String toString() {
return "FFTAudioProcessor{" +
"SAMPLE_SIZE=" + SAMPLE_SIZE +
", BUFFER_EXTRA_SIZE=" + BUFFER_EXTRA_SIZE +
", mAudioTrackBufferSize=" + mAudioTrackBufferSize +
", mChannelCount=" + mChannelCount +
", mSampleRateHz=" + mSampleRateHz +
", mEncoding=" + mEncoding +
'}';
}
public interface FFTListener {
void onFFTReady(int sampleRateHz, int channelCount, float[] FFTArray);
}
}
```