1、N=32elseif(i=3), legend(N=64elseif(i=4) ,legend(N=128elseif(i=5) ,legend(N=256elseif(i=6) ,legend(N=512end(2)加汉明窗a=a(:h=hanning(2.(i-2)*N);%形成一个汉明窗,长度为2.(i-2)*N if(i=2), legend(2) 短时平均过零率a=wavread(n=length(a);N=320;subplot(3,1,1),plot(a);h=linspace(1,1,N); %求卷积得其短时能量函数Ensubplot(3,1,2),plot(En); for
2、i=1:n-1 if a(i)=0 b(i)= 1; else b(i) = -1; end if a(i+1) b(i+1)=1; b(i+1)= -1; w(i)=abs(b(i+1)-b(i); %求出每相邻两点符号的差值的绝对值 end k=1;j=0;while (k+N-1)n Zm(k)=0; for i=0:N-1; Zm(k)=Zm(k)+w(k+i); j=j+1; k=k+N/2; %每次移动半个窗 for w=1:j Q(w)=Zm(160*(w-1)+1)/(2*N); %短时平均过零率 subplot(3,1,3),plot(Q),grid;3) 自相关函数 N=2
3、40y=wavread(y=y(:x=y(13271:13510);x=x.*rectwin(240);R=zeros(1,240);for k=1:240for n=1:240-kR(k)=R(k)+x(n)*x(n+k); j=1:240;plot(j,R);grid;实验二 基于MATLAB分析语音信号频域特征1)短时谱 clearsubplot(2,1,1),plot(a);title(original signalgridN=256;h=hamming(N);for m=1:N b(m)=a(m)*h(m)end y=20*log(abs(fft(b)subplot(2,1,2)pl
4、ot(y);短时谱2)语谱图 x,fs,nbits=wavread() x=x(:specgram(x,512,fs,100);xlabel(时间(s)ylabel(频率(Hz)语谱图3)倒谱和复倒谱(1)加矩形窗时的倒谱和复倒谱clear,4000,4350);N=300;b(m)=a(m)*h(m);c=cceps(b);c=fftshift(c);d=rceps(b);d=fftshift(d); subplot(2,1,1)plot(d);加矩形窗时的倒谱plot(c);加矩形窗时的复倒谱(2)加汉明窗时的倒谱和复倒谱a=a(;加汉明窗时的倒谱加汉明窗时的复倒谱实验三 基于MATLAB
5、的LPC分析MusicSource = wavread(MusicSource=MusicSource(:Music_source = MusicSource;N = 256; % window length,N = 100 - 1000;Hamm = hamming(N); % create Hamming windowframe = input(请键入想要处理的帧位置 = % origin is current frameorigin = Music_source(frame - 1) * (N / 2) + 1):(frame - 1) * (N / 2) + N);Frame = or
6、igin .* Hamm%Short Time Fourier Transforms1,f1,t1 = specgram(MusicSource,N,N/2,N);Xs1,Ys1 = size(s1);for i = 1:Xs1 FTframe1(i) = s1(i,frame);N1 = input(请键入预测器阶数 = % N1 is predictors ordercoef,gain = lpc(Frame,N1); % LPC analysis using Levinson-Durbin recursionest_Frame = filter(0 -coef(2:end),1,Fram
7、e); % estimate frame(LP)FFT_est = fft(est_Frame);err = Frame - est_Frame; % error% FFT_err = fft(err);subplot(2,1,1),plot(1:N,Frame,1:N,est_Frame,-r原始语音帧vs.预测后语音帧subplot(2,1,2),plot(err);误差pause%subplot(2,1,2),plot(f,20*log(abs(FTframe2);% Gain solution using G2 = Rn(0) - sum(ai*Rn(i),i = 1,2,.,PfLe
8、ngth(1 : 2 * N) = origin,zeros(1,N);Xm = fft(fLength,2 * N);X = Xm .* conj(Xm);Y = fft(X , 2 * N);Rk = Y(1 : N);PART = sum(coef(2 : N1 + 1) .* Rk(1 : N1);G = sqrt(sum(Frame.2) - PART);A = (FTframe1 - FFT_est(1 : length(f1) ./ FTframe1 ; % inverse filter A(Z)subplot(2,1,1),plot(f1,20*log(abs(FTframe1
9、),f1,(20*log(abs(1 ./ A),subplot(2,1,2),plot(f1,(20*log(abs(G ./ A);LPC谱%plot(abs(ifft(FTframe1 ./ (G ./ A);excited%plot(f1,20*log(abs(FFT_est(1 :) .* A / G );%pause% find_pitchtemp = FTframe1 - FFT_est(1 :);% not move higher frequnce pitch1 = log(abs(temp);pLength = length(pitch1);result1 = ifft(pi
10、tch1,N);% move higher frequncepitch1(pLength - 32) : pLength) = 0;result2 = ifft(pitch1,N);% direct do real cepstrum with errpitch = fftshift(rceps(err);origin_pitch = fftshift(rceps(Frame);subplot(211),plot(origin_pitch);原始语音帧倒谱(直接调用函数)subplot(212),plot(pitch);预测误差倒谱(直接调用函数)subplot(211),plot(1:length(result1),fftshift(real(result1);预测误差倒谱(根据定义编写,没有去除高频分量)subplot(212),plot(1:length(result2),fftshift(real(result2);预测误差倒谱(根据定义编写,去除高频分量)实验四 基于VQ的特定人孤立词语音识别研究1、mfcc.mfunction ccc = mfcc(
copyright@ 2008-2022 冰豆网网站版权所有
经营许可证编号:鄂ICP备2022015515号-1