8 years, 10 months ago.

analogin

i'm having trouble using with sparkfun electret microphone.is there a possible way that a microphone would only receive a input when i say something and not taking to much pointless noise.is there a way to do this.?when it receive an input it will correlate and find angle.is there an improvement that i should do in my code.?to pointing in my code would be a great help.thank you and this is my source of helphttps://www2.informatik.uni-hamburg.de/wtm/ps/murray_ulm04.pdf

#include "mbed.h"
#include "math.h"

#define SAMPLE_PERIOD 60
#define SAMPLE 1024
#define PI  3.1416
#define freq 44100
#define sos 384 //m/s
#define WINDOWSIZE 128

Serial pc(USBTX, USBRX);
DigitalOut myled(LED1);
AnalogIn  rightmic(p16);
AnalogIn  leftmic(p19);
BusOut    unused(p15,p17,p18,p20); 
Timer t;

double max1,min1; 
double max,min;
float left_results[SAMPLE];
float right_results[SAMPLE];
float timesampling;
int peakoffset;
int count;

int correlate(float left_results[] , float right_results[])
{
    float peakCorrelation = 0;
    int peakoffset = 0;
    float correlation;
    int firstLeftSample = SAMPLE/ 2 - WINDOWSIZE/2; // the first sample in the left data such that our window will be centered in the middle.
    int timeoffset = -firstLeftSample ;//minimum time test
    while ( (timeoffset + firstLeftSample  + WINDOWSIZE ) < SAMPLE) 
    {
        correlation = 0;
        for (int i = 0; i<WINDOWSIZE ; i++) 
        {
        correlation += left_results[firstLeftSample  + i] * right_results[firstLeftSample + timeoffset +i];
        }
        if (correlation  > peakCorrelation) // look for the peak..
        { 
        peakCorrelation = correlation;
        peakoffset = timeoffset;
        timeoffset++;
        }
        pc.printf("lag=%d",peakoffset);
    }
  return peakoffset;
}
int angle(int peakoffset)
{
    float c=0.15;//distance between two mics...
    float distance=(peakoffset*timesampling)*sos;//distance of unknown which is the speaker or length of 'a'..
    int theta=asin(distance/c)*180/PI;    //phase of the speaker   
    return theta;
}
int main() 
{   
    max = 0.0;
    min = 3.3;
    t.start();
    for (int i=0; i<SAMPLE; i++)
    {
        while(t.read_ms()<i*SAMPLE_PERIOD)// wait until the next sample time....
        {
            left_results[i]=leftmic.read();
            right_results[i] = rightmic.read();
        
            if (right_results[i] > max) 
                max = right_results[i];
            if (right_results[i] < min) 
                min = right_results[i];
            if (left_results[i] > max) 
                max = left_results[i];
            if (left_results[i] < min) 
                min = left_results[i];        
        }
    
    t.reset();
    max=max*3.3;
    min=min*3.3;
    pc.printf(" max=%0.3f, min=%0.3f\n\r",max,min);
    max=0.0;
    min=3.3;
    } 
    correlate(left_results , right_results);   
}

1 Answer

8 years, 10 months ago.

I think a "simple" solution for this would be FFT (fast fourier transform). This lets you analyse the soundspectrum and subtract noise from your sample before computing.

Or maybe just look at the total power input.

Measure the sum of the squares of the most recent 50 or so samples, once that is over a certain threshold on either input start logging your data for the direction finding.

An FFT would allow a smarter system, e.g. by looking for the frequencies associated with speech, but assuming your background noise isn't almost as loud as the signal you want to locate then looking for anything over a certain volume should work fine.

posted by Andy A 11 Jun 2015

for the sum of square do i have to do individually for the mics or both together?and is the certain threshold value that i have to start logging?

posted by faris abdat 12 Jun 2015

Individually for each mic. I'd look for the sum going over a value on either of them and then start the logging. How high that value is will depend on the microphones, the volumes you are trying to locate and the amount of noise. Something like 4 times the average for background noise would be a good starting point.

Also what is the idle voltage on your microphones? Unless you are throwing half your signal away they should be set so that silence gives you a value of about 0.5. You should then subtract that idle level when reading the signals.

left_results[i]=leftmic.read() - idleLevel;
posted by Andy A 12 Jun 2015

this is what i have done but there are some errors.it will print my layer1 den layer den start printing the left.read() and right.read(). i'm not sure why

int main() 
{   
        for(int i=0;i<SAMPLE;i++)
        {
            t.start();
            while(t.read_ms()<i*SAMPLE_PERIOD)// wait until the next sample time....
            {
                left_results[i]=leftmic.read();
                right_results[i] = rightmic.read();
                pc.printf("%.3f, %.3f\n\r", left_results[i],right_results[i]);  
            }
            t.reset();
            if(i%50==0)
            {                
                pc.printf("layer1\n\r");
                for(int a=0;a<=50;a++)
                {
                    pc.printf("layer2\n\r");
                    pc.printf("%.3f, %.3f\n\r", left_results[a],right_results[a]);
                    sumleft[a]+=left_results[i]*left_results[i];
                    sumright[a]+=right_results[i]*right_results[i];
                    if(sumleft[a]>1||sumright[a]>1)
                    {
                        correlate(left_results,right_results);
                        angle(peakoffset);
                    }
                }           
            }
        }
}
posted by faris abdat 18 Jun 2015