Split the audio converter.

monitor
Elliott Liggett 2022-09-03 15:48:05 -07:00
rodzic 7dfac11315
commit 8eed3a360b
3 zmienionych plików z 319 dodań i 78 usunięć

Wyświetl plik

@ -4,94 +4,227 @@
audioConverter::audioConverter(sharedAudioType *m, bool isInputAudio, QObject* parent) : QObject(parent) audioConverter::audioConverter(sharedAudioType *m, bool isInputAudio, QObject* parent) : QObject(parent)
{ {
this->monitor = m; this->sharedAudioInfo = m;
this->isInputAudio = isInputAudio; this->isInputAudio = isInputAudio;
qDebug(logAudioConverter()) << "AudioConverter Monitor test: isInput: " << isInputAudio << ", monitor number: " << m->testNumber;
this->eigenFormat.setCodec("audio/PCM");
this->eigenFormat.setSampleRate(48000);
if(isInputAudio) if(isInputAudio)
m->testNumber = 10; this->eigenFormat.setChannelCount(1);
else else
m->testNumber = 9; this->eigenFormat.setChannelCount(2);
this->eigenFormat.setSampleSize(32);
this->eigenFormat.setSampleType(QAudioFormat::Float);
} }
bool audioConverter::init(QAudioFormat inFormat, QAudioFormat outFormat, quint8 opusComplexity, quint8 resampleQuality) bool audioConverter::init(QAudioFormat inFormat, QAudioFormat outFormat, quint8 opusComplexity, quint8 resampleQuality)
{ {
this->inFormat = inFormat; this->inFormat = inFormat;
this->outFormat = outFormat; this->outFormat = outFormat;
this->opusComplexity = opusComplexity; this->opusComplexity = opusComplexity;
this->resampleQuality = resampleQuality; this->resampleQuality = resampleQuality;
qInfo(logAudioConverter) << "Starting audioConverter() Input:" << inFormat.channelCount() << "Channels of" << inFormat.codec() << inFormat.sampleRate() << inFormat.sampleType() << inFormat.sampleSize() << qInfo(logAudioConverter) << "Starting audioConverter() Input:" << inFormat.channelCount() << "Channels of" << inFormat.codec() << inFormat.sampleRate() << inFormat.sampleType() << inFormat.sampleSize() <<
"Output:" << outFormat.channelCount() << "Channels of" << outFormat.codec() << outFormat.sampleRate() << outFormat.sampleType() << outFormat.sampleSize(); "Output:" << outFormat.channelCount() << "Channels of" << outFormat.codec() << outFormat.sampleRate() << outFormat.sampleType() << outFormat.sampleSize();
if (inFormat.byteOrder() != outFormat.byteOrder()) { if (inFormat.byteOrder() != outFormat.byteOrder()) {
qInfo(logAudioConverter) << "Byteorder mismatch in:" << inFormat.byteOrder() << "out:" << outFormat.byteOrder(); qInfo(logAudioConverter) << "Byteorder mismatch in:" << inFormat.byteOrder() << "out:" << outFormat.byteOrder();
}
if (inFormat.codec() == "audio/opus")
{
// Create instance of opus decoder
int opus_err = 0;
opusDecoder = opus_decoder_create(inFormat.sampleRate(), inFormat.channelCount(), &opus_err);
qInfo(logAudioConverter()) << "Creating opus decoder: " << opus_strerror(opus_err);
}
if (outFormat.codec() == "audio/opus")
{
// Create instance of opus encoder
int opus_err = 0;
opusEncoder = opus_encoder_create(outFormat.sampleRate(), outFormat.channelCount(), OPUS_APPLICATION_AUDIO, &opus_err);
//opus_encoder_ctl(opusEncoder, OPUS_SET_LSB_DEPTH(16));
//opus_encoder_ctl(opusEncoder, OPUS_SET_INBAND_FEC(1));
//opus_encoder_ctl(opusEncoder, OPUS_SET_DTX(1));
//opus_encoder_ctl(opusEncoder, OPUS_SET_PACKET_LOSS_PERC(5));
opus_encoder_ctl(opusEncoder, OPUS_SET_COMPLEXITY(opusComplexity)); // Reduce complexity to maybe lower CPU?
qInfo(logAudioConverter()) << "Creating opus encoder: " << opus_strerror(opus_err);
}
if (inFormat.sampleRate() != outFormat.sampleRate())
{
int resampleError = 0;
unsigned int ratioNum;
unsigned int ratioDen;
// Sample rate conversion required.
resampler = wf_resampler_init(outFormat.channelCount(), inFormat.sampleRate(), outFormat.sampleRate(), resampleQuality, &resampleError);
wf_resampler_get_ratio(resampler, &ratioNum, &ratioDen);
resampleRatio = static_cast<double>(ratioDen) / ratioNum;
qInfo(logAudioConverter()) << "wf_resampler_init() returned: " << resampleError << " resampleRatio: " << resampleRatio;
}
if((!this->isInputAudio) && (monitor != Q_NULLPTR) && (!monitor->allocated))
{
// TODO Remove this
monitor->allocated = true;
} }
return true;
if (inFormat.codec() == "audio/opus")
{
// Create instance of opus decoder
int opus_err = 0;
opusDecoder = opus_decoder_create(inFormat.sampleRate(), inFormat.channelCount(), &opus_err);
qInfo(logAudioConverter()) << "Creating opus decoder: " << opus_strerror(opus_err);
}
if (outFormat.codec() == "audio/opus")
{
// Create instance of opus encoder
int opus_err = 0;
opusEncoder = opus_encoder_create(outFormat.sampleRate(), outFormat.channelCount(), OPUS_APPLICATION_AUDIO, &opus_err);
//opus_encoder_ctl(opusEncoder, OPUS_SET_LSB_DEPTH(16));
//opus_encoder_ctl(opusEncoder, OPUS_SET_INBAND_FEC(1));
//opus_encoder_ctl(opusEncoder, OPUS_SET_DTX(1));
//opus_encoder_ctl(opusEncoder, OPUS_SET_PACKET_LOSS_PERC(5));
opus_encoder_ctl(opusEncoder, OPUS_SET_COMPLEXITY(opusComplexity)); // Reduce complexity to maybe lower CPU?
qInfo(logAudioConverter()) << "Creating opus encoder: " << opus_strerror(opus_err);
}
if (inFormat.sampleRate() != outFormat.sampleRate())
{
int resampleError = 0;
unsigned int ratioNum;
unsigned int ratioDen;
// Sample rate conversion required.
resampler = wf_resampler_init(outFormat.channelCount(), inFormat.sampleRate(), outFormat.sampleRate(), resampleQuality, &resampleError);
wf_resampler_get_ratio(resampler, &ratioNum, &ratioDen);
resampleRatio = static_cast<double>(ratioDen) / ratioNum;
qInfo(logAudioConverter()) << "wf_resampler_init() returned: " << resampleError << " resampleRatio: " << resampleRatio;
}
return true;
} }
audioConverter::~audioConverter() audioConverter::~audioConverter()
{ {
qInfo(logAudioConverter) << "Closing audioConverter() Input:" << inFormat.channelCount() << "Channels of" << inFormat.codec() << inFormat.sampleRate() << inFormat.sampleType() << inFormat.sampleSize() << qInfo(logAudioConverter) << "Closing audioConverter() Input:" << inFormat.channelCount() << "Channels of" << inFormat.codec() << inFormat.sampleRate() << inFormat.sampleType() << inFormat.sampleSize() <<
"Output:" << outFormat.channelCount() << "Channels of" << outFormat.codec() << outFormat.sampleRate() << outFormat.sampleType() << outFormat.sampleSize(); "Output:" << outFormat.channelCount() << "Channels of" << outFormat.codec() << outFormat.sampleRate() << outFormat.sampleType() << outFormat.sampleSize();
if (opusEncoder != Q_NULLPTR) { if (opusEncoder != Q_NULLPTR) {
qInfo(logAudioConverter()) << "Destroying opus encoder"; qInfo(logAudioConverter()) << "Destroying opus encoder";
opus_encoder_destroy(opusEncoder); opus_encoder_destroy(opusEncoder);
} }
if (opusDecoder != Q_NULLPTR) { if (opusDecoder != Q_NULLPTR) {
qInfo(logAudioConverter()) << "Destroying opus decoder"; qInfo(logAudioConverter()) << "Destroying opus decoder";
opus_decoder_destroy(opusDecoder); opus_decoder_destroy(opusDecoder);
} }
if (resampler != Q_NULLPTR) { if (resampler != Q_NULLPTR) {
speex_resampler_destroy(resampler); speex_resampler_destroy(resampler);
qDebug(logAudioConverter()) << "Resampler closed"; qDebug(logAudioConverter()) << "Resampler closed";
} }
qDebug(logAudioConverter()) << "test number: " << monitor->testNumber; qDebug(logAudioConverter()) << "test number: " << sharedAudioInfo->testNumber;
}
bool audioConverter::convertToEigenFloat(audioPacket audio)
{
// If inFormat and outFormat are identical, just emit the data back (removed as it doesn't then process amplitude)
if (audio.data.size() > 0)
{
if (inFormat.codec() == "audio/opus")
{
unsigned char* in = (unsigned char*)audio.data.data();
//Decode the frame.
int nSamples = opus_packet_get_nb_samples(in, audio.data.size(), inFormat.sampleRate());
if (nSamples == -1) {
// No opus data yet?
return false;
}
QByteArray outPacket(nSamples * sizeof(float) * inFormat.channelCount(), (char)0xff); // Preset the output buffer size.
float* out = (float*)outPacket.data();
//if (audio.seq > lastAudioSequence + 1) {
// nSamples = opus_decode_float(opusDecoder, Q_NULLPTR, 0, out, nSamples, 1);
//}
//else {
nSamples = opus_decode_float(opusDecoder, in, audio.data.size(), out, nSamples, 0);
//}
//lastAudioSequence = audio.seq;
audio.data.clear();
audio.data = outPacket; // Replace incoming data with converted.
}
else if (inFormat.codec() == "audio/PCMU")
{
// Current packet is "technically" 8bit so need to create a new buffer that is 16bit
QByteArray outPacket((int)audio.data.length() * 2, (char)0xff);
qint16* out = (qint16*)outPacket.data();
for (int f = 0; f < audio.data.length(); f++)
{
*out++ = ulaw_decode[(quint8)audio.data[f]];
}
audio.data.clear();
audio.data = outPacket; // Replace incoming data with converted.
// Make sure that sample size/type is set correctly
}
Eigen::VectorXf samplesF;
if (inFormat.sampleType() == QAudioFormat::SignedInt && inFormat.sampleSize() == 32)
{
Eigen::Ref<VectorXint32> samplesI = Eigen::Map<VectorXint32>(reinterpret_cast<qint32*>(audio.data.data()), audio.data.size() / int(sizeof(qint32)));
samplesF = samplesI.cast<float>() / float(std::numeric_limits<qint32>::max());
}
else if (inFormat.sampleType() == QAudioFormat::SignedInt && inFormat.sampleSize() == 16)
{
Eigen::Ref<VectorXint16> samplesI = Eigen::Map<VectorXint16>(reinterpret_cast<qint16*>(audio.data.data()), audio.data.size() / int(sizeof(qint16)));
samplesF = samplesI.cast<float>() / float(std::numeric_limits<qint16>::max());
}
else if (inFormat.sampleType() == QAudioFormat::SignedInt && inFormat.sampleSize() == 8)
{
Eigen::Ref<VectorXint8> samplesI = Eigen::Map<VectorXint8>(reinterpret_cast<qint8*>(audio.data.data()), audio.data.size() / int(sizeof(qint8)));
samplesF = samplesI.cast<float>() / float(std::numeric_limits<qint8>::max());;
}
else if (inFormat.sampleType() == QAudioFormat::UnSignedInt && inFormat.sampleSize() == 8)
{
Eigen::Ref<VectorXuint8> samplesI = Eigen::Map<VectorXuint8>(reinterpret_cast<quint8*>(audio.data.data()), audio.data.size() / int(sizeof(quint8)));
samplesF = samplesI.cast<float>() / float(std::numeric_limits<quint8>::max());;
}
else if (inFormat.sampleType() == QAudioFormat::Float) {
samplesF = Eigen::Map<Eigen::VectorXf>(reinterpret_cast<float*>(audio.data.data()), audio.data.size() / int(sizeof(float)));
}
else
{
qInfo(logAudio()) << "Unsupported Input Sample Type:" << inFormat.sampleType() << "Size:" << inFormat.sampleSize();
}
if (samplesF.size() > 0)
{
audio.amplitudePeak = samplesF.array().abs().maxCoeff();
//audio.amplitudeRMS = samplesF.array().abs().mean(); // zero for tx audio
//audio.amplitudeRMS = samplesF.norm() / sqrt(samplesF.size()); // too high values. Zero for tx audio.
// Set the volume
samplesF *= audio.volume;
/*
samplesF is now an Eigen Vector of the current samples in float format
The next step is to convert to the correct number of channels in outFormat.channelCount()
*/
if (inFormat.channelCount() == 2 && eigenFormat.channelCount() == 1) {
// If we need to drop one of the audio channels, do it now
Eigen::VectorXf samplesTemp(samplesF.size() / 2);
samplesTemp = Eigen::Map<Eigen::VectorXf, 0, Eigen::InnerStride<2> >(samplesF.data(), samplesF.size() / 2);
samplesF = samplesTemp;
}
else if (inFormat.channelCount() == 1 && eigenFormat.channelCount() == 2) {
// Convert mono to stereo if required
Eigen::VectorXf samplesTemp(samplesF.size() * 2);
Eigen::Map<Eigen::VectorXf, 0, Eigen::InnerStride<2> >(samplesTemp.data(), samplesF.size()) = samplesF;
Eigen::Map<Eigen::VectorXf, 0, Eigen::InnerStride<2> >(samplesTemp.data() + 1, samplesF.size()) = samplesF;
samplesF = samplesTemp;
}
/*
Next step is to resample (if needed)
*/
if (resampler != Q_NULLPTR && resampleRatio != 1.0)
{
quint32 outFrames = ((samplesF.size() / eigenFormat.channelCount()) * resampleRatio);
quint32 inFrames = (samplesF.size() / eigenFormat.channelCount());
QByteArray outPacket(outFrames * eigenFormat.channelCount() * sizeof(float), (char)0xff); // Preset the output buffer size.
const float* in = (float*)samplesF.data();
float* out = (float*)outPacket.data();
int err = 0;
if (eigenFormat.channelCount() == 1) {
err = wf_resampler_process_float(resampler, 0, in, &inFrames, out, &outFrames);
}
else {
err = wf_resampler_process_interleaved_float(resampler, in, &inFrames, out, &outFrames);
}
if (err) {
qInfo(logAudioConverter()) << "Resampler error " << err << " inFrames:" << inFrames << " outFrames:" << outFrames;
}
samplesF = Eigen::Map<Eigen::VectorXf>(reinterpret_cast<float*>(outPacket.data()), outPacket.size() / int(sizeof(float)));
}
emit convertedEigenFloat(samplesF);
return true;
}
}
qDebug(logAudioConverter) << "Detected empty packet";
return false;
} }
bool audioConverter::convert(audioPacket audio) bool audioConverter::convert(audioPacket audio)
@ -99,7 +232,7 @@ bool audioConverter::convert(audioPacket audio)
// If inFormat and outFormat are identical, just emit the data back (removed as it doesn't then process amplitude) // If inFormat and outFormat are identical, just emit the data back (removed as it doesn't then process amplitude)
if (audio.data.size() > 0) if (audio.data.size() > 0)
{ {
if (inFormat.codec() == "audio/opus") if (inFormat.codec() == "audio/opus")
{ {
@ -174,9 +307,6 @@ bool audioConverter::convert(audioPacket audio)
audio.amplitudePeak = samplesF.array().abs().maxCoeff(); audio.amplitudePeak = samplesF.array().abs().maxCoeff();
//audio.amplitudeRMS = samplesF.array().abs().mean(); // zero for tx audio //audio.amplitudeRMS = samplesF.array().abs().mean(); // zero for tx audio
//audio.amplitudeRMS = samplesF.norm() / sqrt(samplesF.size()); // too high values. Zero for tx audio. //audio.amplitudeRMS = samplesF.norm() / sqrt(samplesF.size()); // too high values. Zero for tx audio.
//audio.amplitudeRMS = samplesF.squaredNorm(); // tx not zero. Values higher than peak sometimes
//audio.amplitudeRMS = samplesF.norm(); // too small values. also too small on TX
//audio.amplitudeRMS = samplesF.blueNorm(); // scale same as norm, too small.
// Set the volume // Set the volume
samplesF *= audio.volume; samplesF *= audio.volume;
@ -227,6 +357,12 @@ bool audioConverter::convert(audioPacket audio)
samplesF = Eigen::Map<Eigen::VectorXf>(reinterpret_cast<float*>(outPacket.data()), outPacket.size() / int(sizeof(float))); samplesF = Eigen::Map<Eigen::VectorXf>(reinterpret_cast<float*>(outPacket.data()), outPacket.size() / int(sizeof(float)));
} }
/* /*
If output is Opus so encode it now, don't do any more conversion on the output of Opus. If output is Opus so encode it now, don't do any more conversion on the output of Opus.
*/ */
@ -262,7 +398,7 @@ bool audioConverter::convert(audioPacket audio)
if (outFormat.sampleType() == QAudioFormat::UnSignedInt && outFormat.sampleSize() == 8) if (outFormat.sampleType() == QAudioFormat::UnSignedInt && outFormat.sampleSize() == 8)
{ {
Eigen::VectorXf samplesITemp = samplesF * float(std::numeric_limits<qint8>::max()); Eigen::VectorXf samplesITemp = samplesF * float(std::numeric_limits<qint8>::max());
samplesITemp.array() += 127; samplesITemp.array() += 127;
VectorXuint8 samplesI = samplesITemp.cast<quint8>(); VectorXuint8 samplesI = samplesITemp.cast<quint8>();
audio.data = QByteArray(reinterpret_cast<char*>(samplesI.data()), int(samplesI.size()) * int(sizeof(quint8))); audio.data = QByteArray(reinterpret_cast<char*>(samplesI.data()), int(samplesI.size()) * int(sizeof(quint8)));
@ -322,13 +458,112 @@ bool audioConverter::convert(audioPacket audio)
} }
} }
} }
else else
{ {
qDebug(logAudioConverter) << "Detected empty packet"; qDebug(logAudioConverter) << "Detected empty packet";
} }
} }
emit converted(audio); emit converted(audio);
return true; return true;
} }
bool audioConverter::convertFromEigenFloat(Eigen::VectorXf samplesF)
{
/*
If output is Opus so encode it now, don't do any more conversion on the output of Opus.
*/
audioPacket audio;
if (outFormat.codec() == "audio/opus")
{
float* in = (float*)samplesF.data();
QByteArray outPacket(1275, (char)0xff); // Preset the output buffer size to MAXIMUM possible Opus frame size
unsigned char* out = (unsigned char*)outPacket.data();
int nbBytes = opus_encode_float(opusEncoder, in, (samplesF.size() / outFormat.channelCount()), out, outPacket.length());
if (nbBytes < 0)
{
qInfo(logAudioConverter()) << "Opus encode failed:" << opus_strerror(nbBytes) << "Num Samples:" << samplesF.size();
return false;
}
else {
outPacket.resize(nbBytes);
audio.data.clear();
audio.data = outPacket; // Copy output packet back to input buffer.
//samplesF = Eigen::Map<Eigen::VectorXf>(reinterpret_cast<float*>(outPacket.data()), outPacket.size() / int(sizeof(float)));
}
}
else {
/*
Now convert back into the output format required
*/
audio.data.clear();
if (outFormat.sampleType() == QAudioFormat::UnSignedInt && outFormat.sampleSize() == 8)
{
Eigen::VectorXf samplesITemp = samplesF * float(std::numeric_limits<qint8>::max());
samplesITemp.array() += 127;
VectorXuint8 samplesI = samplesITemp.cast<quint8>();
audio.data = QByteArray(reinterpret_cast<char*>(samplesI.data()), int(samplesI.size()) * int(sizeof(quint8)));
}
else if (outFormat.sampleType() == QAudioFormat::SignedInt && outFormat.sampleSize() == 8)
{
Eigen::VectorXf samplesITemp = samplesF * float(std::numeric_limits<qint8>::max());
VectorXint8 samplesI = samplesITemp.cast<qint8>();
audio.data = QByteArray(reinterpret_cast<char*>(samplesI.data()), int(samplesI.size()) * int(sizeof(qint8)));
}
else if (outFormat.sampleType() == QAudioFormat::SignedInt && outFormat.sampleSize() == 16)
{
Eigen::VectorXf samplesITemp = samplesF * float(std::numeric_limits<qint16>::max());
VectorXint16 samplesI = samplesITemp.cast<qint16>();
audio.data = QByteArray(reinterpret_cast<char*>(samplesI.data()), int(samplesI.size()) * int(sizeof(qint16)));
}
else if (outFormat.sampleType() == QAudioFormat::SignedInt && outFormat.sampleSize() == 32)
{
Eigen::VectorXf samplesITemp = samplesF * float(std::numeric_limits<qint32>::max());
VectorXint32 samplesI = samplesITemp.cast<qint32>();
audio.data = QByteArray(reinterpret_cast<char*>(samplesI.data()), int(samplesI.size()) * int(sizeof(qint32)));
}
else if (outFormat.sampleType() == QAudioFormat::Float)
{
audio.data = QByteArray(reinterpret_cast<char*>(samplesF.data()), int(samplesF.size()) * int(sizeof(float)));
}
else {
qInfo(logAudio()) << "Unsupported Output Sample Type:" << outFormat.sampleType() << "Size:" << outFormat.sampleSize();
}
/*
As we currently don't have a float based uLaw encoder, this must be done
after all other conversion has taken place.
*/
if (outFormat.codec() == "audio/PCMU")
{
QByteArray outPacket((int)audio.data.length() / 2, (char)0xff);
qint16* in = (qint16*)audio.data.data();
for (int f = 0; f < outPacket.length(); f++)
{
qint16 sample = *in++;
int sign = (sample >> 8) & 0x80;
if (sign)
sample = (short)-sample;
if (sample > cClip)
sample = cClip;
sample = (short)(sample + cBias);
int exponent = (int)MuLawCompressTable[(sample >> 7) & 0xFF];
int mantissa = (sample >> (exponent + 3)) & 0x0F;
int compressedByte = ~(sign | (exponent << 4) | mantissa);
outPacket[f] = (quint8)compressedByte;
}
audio.data.clear();
audio.data = outPacket; // Copy output packet back to input buffer.
}
}
emit converted(audio);
return true;
}

Wyświetl plik

@ -64,13 +64,18 @@ public:
public slots: public slots:
bool init(QAudioFormat inFormat, QAudioFormat outFormat, quint8 opusComplexity, quint8 resampleQuality); bool init(QAudioFormat inFormat, QAudioFormat outFormat, quint8 opusComplexity, quint8 resampleQuality);
bool convert(audioPacket audio); bool convert(audioPacket audio);
bool convertToEigenFloat(audioPacket audio);
bool convertFromEigenFloat(Eigen::VectorXf eigenAudio);
signals: signals:
void converted(audioPacket audio); void converted(audioPacket audio);
void convertedEigenFloat(Eigen::VectorXf eigenAudio);
void convertedFromEigenFloat(audioPacket audio);
protected: protected:
QAudioFormat inFormat; QAudioFormat inFormat;
QAudioFormat outFormat; QAudioFormat outFormat;
QAudioFormat eigenFormat;
OpusEncoder* opusEncoder = Q_NULLPTR; OpusEncoder* opusEncoder = Q_NULLPTR;
OpusDecoder* opusDecoder = Q_NULLPTR; OpusDecoder* opusDecoder = Q_NULLPTR;
SpeexResamplerState* resampler = Q_NULLPTR; SpeexResamplerState* resampler = Q_NULLPTR;
@ -78,7 +83,7 @@ protected:
quint8 resampleQuality = 4; quint8 resampleQuality = 4;
double resampleRatio=1.0; // Default resample ratio is 1:1 double resampleRatio=1.0; // Default resample ratio is 1:1
quint32 lastAudioSequence; quint32 lastAudioSequence;
sharedAudioType *monitor; sharedAudioType *sharedAudioInfo;
bool isInputAudio = false; bool isInputAudio = false;
}; };

Wyświetl plik

@ -334,6 +334,8 @@ int paHandler::writeData(const void* inputBuffer, void* outputBuffer,
void paHandler::mixAudio(QByteArray *dest, unsigned char *monitorSource) void paHandler::mixAudio(QByteArray *dest, unsigned char *monitorSource)
{ {
// TODO: write versions of mix audio for the various formats that port
// audio supports.
int size = dest->size(); int size = dest->size();
float* mon = reinterpret_cast<float*>(monitorSource); float* mon = reinterpret_cast<float*>(monitorSource);
float* dst = reinterpret_cast<float*>(dest->data()); float* dst = reinterpret_cast<float*>(dest->data());
@ -344,7 +346,6 @@ void paHandler::mixAudio(QByteArray *dest, unsigned char *monitorSource)
{ {
dst[s] = dst[s] + sharedAudioInfo->monitorVolume*mon[s]; dst[s] = dst[s] + sharedAudioInfo->monitorVolume*mon[s];
} }
} }
void paHandler::convertedOutput(audioPacket packet) { void paHandler::convertedOutput(audioPacket packet) {