1、 int width, height; int allocated; VideoPicture; typedef struct VideoState AVFormatContext *pFormatCtx; int videoStream, audioStream; AVStream *audio_st; PacketQueue audioq; uint8_t audio_buf(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2; unsigned int audio_buf_size; unsigned int audio_buf_index; AVPacket a
2、udio_pkt; uint8_t *audio_pkt_data; int audio_pkt_size; AVStream *video_st; PacketQueue videoq; VideoPicture pictqVIDEO_PICTURE_QUEUE_SIZE; int pictq_size, pictq_rindex, pictq_windex; SDL_mutex *pictq_mutex; SDL_cond *pictq_cond; SDL_Thread *parse_tid; SDL_Thread *video_tid; char filename1024; int qu
3、it; VideoState; SDL_Surface *screen; VideoState *global_video_state; void packet_queue_init(PacketQueue *q) memset(q, 0, sizeof(PacketQueue); q-mutex = SDL_CreateMutex();cond = SDL_CreateCond(); int packet_queue_put(PacketQueue *q, AVPacket *pkt) AVPacketList *pkt1; if(av_dup_packet(pkt) pkt = *pkt;
4、next = NULL; SDL_LockMutex(q-mutex);q-last_pkt) first_pkt = pkt1; else last_pkt-next = pkt1;last_pkt = pkt1;nb_packets+;size += pkt1-pkt.size; SDL_CondSignal(q-cond); SDL_UnlockMutex(q- return 0; static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) int ret; for(;) if(global_video_st
5、ate-quit) ret = -1; break; pkt1 = q-first_pkt; if (pkt1) first_pkt = pkt1-next;first_pkt) last_pkt = NULL;nb_packets-;size -= pkt1- *pkt = pkt1-pkt; av_free(pkt1); ret = 1; else if (!block) ret = 0; SDL_CondWait(q-cond, q- return ret; int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int bu
6、f_size) int len1, data_size; AVPacket *pkt = &is-audio_pkt; while(is-audio_pkt_size data_size = buf_size; len1 = avcodec_decode_audio2(is-audio_st-codec, (int16_t *)audio_buf, &data_size, is-audio_pkt_data, is-audio_pkt_size); if(len1 audio_pkt_size = 0;audio_pkt_data += len1;audio_pkt_size -= len1;
7、 if(data_size data) av_free_packet(pkt); if(is- if(packet_queue_get(&audioq, pkt, 1) data;audio_pkt_size = pkt-size; void audio_callback(void *userdata, Uint8 *stream, int len) VideoState *is = (VideoState *)userdata; int len1, audio_size; while(len audio_buf_index = is-audio_buf_size) audio_size =
8、audio_decode_frame(is, is-audio_buf, sizeof(is-audio_buf); if(audio_size audio_buf, 0, is-audio_buf_size);audio_buf_size = audio_size;audio_buf_index = 0; len1 = is-audio_buf_size - is-audio_buf_index; if(len1 len) len1 = len; memcpy(stream, (uint8_t *)is-audio_buf + is-audio_buf_index, len1); len -
9、= len1; stream += len1;audio_buf_index += len1; static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) /printf(sdl_refresh_timer_cb called:interval-%dn,interval); SDL_Event event; event.type = FF_REFRESH_EVENT; event.user.data1 = opaque; SDL_PushEvent(&event); /派发FF_REFRESH_EVENT事件 static
10、 void schedule_refresh(VideoState *is, int delay) schedule_refresh called:delay-%dn,delay); SDL_AddTimer(delay, sdl_refresh_timer_cb, is); /sdl_refresh_timer_cb函数在延时delay毫秒后,只会被执行一次,is是sdl_refresh_timer_cb的参数 void video_display(VideoState *is) video_display calledn); SDL_Rect rect; VideoPicture *vp;
11、 AVPicture pict; float aspect_ratio; int w, h, x, y; int i; vp = &pictqis-pictq_rindex; if(vp-bmp) video_st-codec-sample_aspect_ratio.num = 0) aspect_ratio = 0; aspect_ratio = av_q2d(is-sample_aspect_ratio) * width / is-height; if(aspect_ratio width / (float)is- h = screen-h; w = (int)(h * aspect_ra
12、tio) & -3; if(w screen-w) w = screen-w; h = (int)(w / aspect_ratio) & x = (screen-w - w) / 2; y = (screen-h - h) / 2; rect.x = x; rect.y = y; rect.w = w; rect.h = h; SDL_DisplayYUVOverlay(vp-bmp, &rect); void video_refresh_timer(void *userdata) video_st) pictq_size = 0) schedule_refresh(is, 1); sche
13、dule_refresh(is, 80); video_display(is); if(+is-pictq_rindex = VIDEO_PICTURE_QUEUE_SIZE) pictq_rindex = 0; SDL_LockMutex(is-pictq_mutex);pictq_size-; SDL_CondSignal(is-pictq_cond); SDL_UnlockMutex(is- schedule_refresh(is, 100); void alloc_picture(void *userdata) pictq_windex; / we already have one m
14、ake another, bigger/smaller SDL_FreeYUVOverlay(vp-bmp); / Allocate a place to put our YUV image on that screen vp-bmp = SDL_CreateYUVOverlay(is-width, height, SDL_YV12_OVERLAY, screen);width = is-width;height = is-allocated = 1; int queue_picture(VideoState *is, AVFrame *pFrame) queue_picture called
15、n int dst_pix_fmt; static struct SwsContext *img_convert_ctx; if (img_convert_ctx = NULL) img_convert_ctx = sws_getContext(is-width, is-pix_fmt, PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL); fprintf(stderr, Cannot initialize the conversion contextn exit(1);pictq_size = VIDEO_PICTURE_QUEUE_SIZE & !
16、SDL_CondWait(is-pictq_cond, is- / windex is set to 0 initially if(!vp-bmp | width !width | height !height) allocated = 0; event.type = FF_ALLOC_EVENT; event.user.data1 = is; while(!allocated & /没有得到消息时解锁,得到消息后加锁,和SDL_CondSignal配对使用 SDL_LockYUVOverlay(vp- dst_pix_fmt = PIX_FMT_YUV420P; pict.data0 = v
17、p-bmp-pixels0; pict.data1 = vp-pixels2; pict.data2 = vp-pixels1; pict.linesize0 = vp-pitches0; pict.linesize1 = vp-pitches2; pict.linesize2 = vp-pitches1; / Convert the image into YUV format that SDL uses sws_scale(img_convert_ctx, pFrame-data, pFrame-linesize, 0, is-height, pict.data, pict.linesize
18、); SDL_UnlockYUVOverlay(vp-pictq_windex = VIDEO_PICTURE_QUEUE_SIZE) pictq_windex = 0;pictq_size+; int video_thread(void *arg) video_thread called VideoState *is = (VideoState *)arg; AVPacket pkt1, *packet = &pkt1; int len1, frameFinished; AVFrame *pFrame; pFrame = avcodec_alloc_frame();videoq, packet, 1) codec, pFrame, &frameFinished, packet-data, packet-size); / Did we get a video f
copyright@ 2008-2022 冰豆网网站版权所有
经营许可证编号:鄂ICP备2022015515号-1