Commit 517e104b authored by Mikhail Karpenko's avatar Mikhail Karpenko

WIP: write audio samples to file

This commit includes lots of debug output and intended to save
current state for further tests. Minor changes include new time
calculation functions and the start of audio stream is synchronized with
the next nearest video frame (was between frames).
parent c6891c7f
......@@ -540,6 +540,12 @@ int sendImageFrame(camogm_state *state)
int fp;
int port = state->port_num;
// === debug code ===
struct timeval tv;
gettimeofday(&tv, NULL);
fprintf(debug_file, "start time %ld:%06ld\n", tv.tv_sec, tv.tv_usec);
// === end of debug ===
// This is probably needed only for Quicktime (not to exceed already allocated frame index)
if (!state->rawdev_op && (state->frameno >= (state->max_frames))) {
D3(fprintf(debug_file, "sendImageFrame:1: state->frameno(0x%x) >= state->max_frames(0x%x)\n", state->frameno, state->max_frames));
......@@ -624,6 +630,10 @@ int sendImageFrame(camogm_state *state)
return -CAMOGM_FRAME_NOT_READY; // the required frame is not ready
}
// === debug code ===
gettimeofday(&tv, NULL);
fprintf(debug_file, " time %ld:%06ld ", tv.tv_sec, tv.tv_usec);
// === end of debug ===
D3(fprintf(debug_file, "_4_"));
if (state->exif) {
......@@ -690,42 +700,31 @@ int sendImageFrame(camogm_state *state)
if (state->audio.ctx_a.begin_of_stream_with_audio) {
D6(fprintf(debug_file, "\n"));
if (state->audio.ctx_a.audio_trigger) {
state->audio.ts_video_start = state->audio.ts_audio;
state->audio.ts_video_start.tv_usec += state->frame_period[port] / 2;
time_normalize(&state->audio.ts_video_start);
state->audio.ctx_a.audio_trigger = 0;
// calculate how many audio samples we need to skip here
long audio_to_skip = 0;
struct timeval tv = state->audio.ts_video;
// calculate how many audio frames we need to skip to synch with the next video frame
state->audio.ts_video_start = state->audio.ts_audio;
struct timeval tv = state->audio.ts_video; // next frame right after audio started
while (time_comp(&tv, &state->audio.ts_video_start) < 0) {
tv.tv_usec += state->frame_period[port];
time_normalize(&tv);
}
tv.tv_sec -= 1;
tv.tv_usec += 1000000;
tv.tv_usec -= state->frame_period[port] / 2;
time_normalize(&tv);
if (tv.tv_sec != state->audio.ts_audio.tv_sec) {
audio_to_skip = tv.tv_sec - state->audio.ts_audio.tv_sec;
audio_to_skip *= 1000000;
}
audio_to_skip += tv.tv_usec;
audio_to_skip -= state->audio.ts_audio.tv_usec;
struct timeval skip_audio_time = time_sub(&tv, &state->audio.ts_audio); // audio time we need to skip to the next frame
unsigned long long skip_audio_us = time_to_us(&skip_audio_time);
double s = state->audio.audio_rate;
s /= 1000.0;
s *= audio_to_skip;
s *= skip_audio_us;
s /= 1000.0;
state->audio.ctx_a.audio_skip_samples = (long) s;
state->audio.ctx_a.time_start = tv;
D6(fprintf(debug_file , "audio started at: %ld:%06ld; we need to record it from: %ld:%06ld; audio_to_skip_us == %ld; "
D6(fprintf(debug_file , "audio started at: %ld:%06ld; we need to record it from: %ld:%06ld; audio_to_skip_us == %lld; "
"audio samples to skip == %lld\n",
state->audio.ts_audio.tv_sec, state->audio.ts_audio.tv_usec, tv.tv_sec, tv.tv_usec, audio_to_skip,
state->audio.ts_audio.tv_sec, state->audio.ts_audio.tv_usec, tv.tv_sec, tv.tv_usec, skip_audio_us,
state->audio.ctx_a.audio_skip_samples));
}
D6(fprintf(debug_file, "audio (start): %ld:%06ld; video (current): %ld:%06ld; frame period is: %d us\n",
state->audio.ts_audio.tv_sec, state->audio.ts_audio.tv_usec, state->audio.ts_video.tv_sec, state->audio.ts_video.tv_usec,
state->frame_period[port]));
if (time_comp(&state->audio.ts_video_start, &state->audio.ts_video) > 0) {
if (time_comp(&state->audio.ctx_a.time_start, &state->audio.ts_video) > 0) {
D6(fprintf(debug_file, "skip this video frame\n"));
sync_ok = 0;
} else {
......@@ -734,6 +733,11 @@ int sendImageFrame(camogm_state *state)
}
}
// === debug code ===
gettimeofday(&tv, NULL);
fprintf(debug_file, " time %ld:%06ld ", tv.tv_sec, tv.tv_usec);
// === end of debug ===
if (sync_ok) {
switch (state->format) {
case CAMOGM_FORMAT_NONE: rslt = 0; break;
......@@ -746,6 +750,12 @@ int sendImageFrame(camogm_state *state)
// skip only first video frames that are ahead of audio stream
rslt = 0;
}
// === debug code ===
gettimeofday(&tv, NULL);
fprintf(debug_file, " time %ld:%06ld ", tv.tv_sec, tv.tv_usec);
// === end of debug ===
if (rslt) {
D3(fprintf(debug_file, "sendImageFrame:12: camogm_frame_***() returned %d\n", rslt));
return rslt;
......@@ -1434,7 +1444,14 @@ char * getLineFromPipe(FILE* npipe)
if (!cmdbufp) cmdbuf[cmdbufp] = 0; //null-terminate first access (probably not needed for the static buffer
nlp = strpbrk(cmdbuf, ";\n");
if (!nlp) { //no complete string, try to read more
// === debug code (around fread) ===
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
fl = fread(&cmdbuf[cmdbufp], 1, sizeof(cmdbuf) - cmdbufp - 1, npipe);
gettimeofday(&tv2, NULL);
fprintf(debug_file, "pipe read time: start %ld:%06ld, end %ld:%06ld\n", tv1.tv_sec, tv1.tv_usec, tv2.tv_sec, tv2.tv_usec);
// === end of debug ===
cmdbuf[cmdbufp + fl] = 0;
// is there any complete string in a buffer after reading?
nlp = strpbrk(&cmdbuf[cmdbufp], ";\n"); // there were no new lines before cmdbufp
......@@ -1743,10 +1760,40 @@ int listener_loop(camogm_state *state)
if (cmd < 0) D0(fprintf(debug_file, "Unrecognized command\n"));
} else if (state->prog_state == STATE_RUNNING) { // no commands in queue, started
switch ((rslt = -sendImageFrame(state))) {
case 0:
if (state->format == CAMOGM_FORMAT_MOV) {
case 0: {
// === debug ===
double fps = 1000000 / state->frame_period[state->port_num];
double avg_rate = 0;
double ratio = 0;
struct timeval tv;
int samples;
if (state->frameno != 0) {
avg_rate = ((double)(state->audio.audio_samples + state->audio.avail_samples) / (double)state->frameno) * fps;
ratio = (double)state->audio.audio_rate / avg_rate;
}
samples = ((double)state->frameno / fps) * state->audio.audio_rate;
fprintf(debug_file, "frames recorded: %d, average sampling rate: %f, ratio: %f, expected sample count: %d\n",
state->frameno, avg_rate, ratio, samples);
gettimeofday(&tv, NULL);
fprintf(debug_file, "system time %ld:%06ld\n", tv.tv_sec, tv.tv_usec);
// === end of debug ===
// skip audio processing while sync video frame is not found
if (state->format == CAMOGM_FORMAT_MOV && !state->audio.ctx_a.begin_of_stream_with_audio) {
audio_process(&state->audio);
// === debug code ===
float fps = 1000000 / state->frame_period[state->port_num];
int samples;
samples = ((float)state->frameno / fps) * state->audio.audio_rate;
float r = (float)state->audio.audio_samples / (float)samples;
fprintf(debug_file, "(recorded samples / expected samples) = %f\n", r);
long calc_diff = samples - state->audio.calc_frames;
fprintf(debug_file, "calc_frames_diff = %ld\n", calc_diff);
// === end of debug ===
state->audio.frame_period = state->frame_period[state->port_num];
}
}
break; // frame sent OK, nothing to do (TODO: check file length/duration)
case CAMOGM_FRAME_NOT_READY: // just wait for the frame to appear at the current pointer
// we'll wait for a frame, not to waste resources. But if the compressor is stopped this program will not respond to any commands
......@@ -1756,7 +1803,15 @@ int listener_loop(camogm_state *state)
D0(fprintf(debug_file, "%s:line %d got broken frame (%d) before waiting for ready\n", __FILE__, __LINE__, fp0));
rslt = CAMOGM_FRAME_BROKEN;
} else {
// === debug code (around lseek) ===
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
fp1 = lseek(state->fd_circ[curr_port], LSEEK_CIRC_WAIT, SEEK_END);
gettimeofday(&tv2, NULL);
fprintf(debug_file,"time in sleep: start %ld:%06ld, end %ld:%06ld\n", tv1.tv_sec, tv1.tv_usec, tv2.tv_sec, tv2.tv_usec);
// === end of debug ===
if (fp1 < 0) {
D0(fprintf(debug_file, "%s:line %d got broken frame (%d) while waiting for ready. Before that fp0=0x%x\n", __FILE__, __LINE__, fp1, fp0));
rslt = CAMOGM_FRAME_BROKEN;
......@@ -2038,6 +2093,13 @@ unsigned int select_port(camogm_state *state)
if (state->prog_state == STATE_STARTING || state->prog_state == STATE_RUNNING)
D6(fprintf(debug_file, "Selecting sensor port, buffer free size: "));
for (int i = 0; i < SENSOR_PORTS; i++) {
// === debug code ===
struct timeval tv;
gettimeofday(&tv, NULL);
fprintf(debug_file, " time: %ld:%06ld ", tv.tv_sec, tv.tv_usec);
// === end of debug ===
if (is_chn_active(state, i)) {
file_pos = lseek(state->fd_circ[i], 0, SEEK_CUR);
if (file_pos != -1) {
......
......@@ -26,7 +26,11 @@
#include "camogm_audio.h"
#include "thelper.h"
// for debug only
#include <math.h>
static void audio_deinit(struct audio *audio);
static bool skip_audio(struct audio *audio, snd_pcm_uframes_t frames);
/**
* Initialize audio interface.
......@@ -60,12 +64,14 @@ void audio_init(struct audio *audio, bool restart)
snd_pcm_status_t *status; // allocated on stack, do not free
snd_timestamp_t audio_ts;
audio->audio_format = SND_PCM_FORMAT_S16_LE;
audio->ctx_a.sbuffer_len = audio->audio_rate * audio->ctx_a.sample_time;
audio->ctx_a.sbuffer_len /= 1000;
audio->ctx_a.sbuffer_len -= audio->ctx_a.sbuffer_len % 2;
// 'while' loop here just to break initialization sequence after an error
while (true) {
audio->ctx_a.sbuffer = (void *)malloc(audio->ctx_a.sbuffer_len * audio->audio_channels * AUDIO_BPS);
size_t buff_size = audio->ctx_a.sbuffer_len * audio->audio_channels * (snd_pcm_format_physical_width(audio->audio_format) / 8);
audio->ctx_a.sbuffer = malloc(buff_size);
if (audio->ctx_a.sbuffer == NULL) {
D0(fprintf(debug_file, "error: can not allocate buffer for audio samples: %s\n", strerror(errno)));
break;
......@@ -79,11 +85,15 @@ void audio_init(struct audio *audio, bool restart)
break;
if ((err = snd_pcm_hw_params_set_access(audio->ctx_a.capture_hnd, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED)) < 0)
break;
if ((err = snd_pcm_hw_params_set_format(audio->ctx_a.capture_hnd, hw_params, SND_PCM_FORMAT_S16_LE)) < 0)
if ((err = snd_pcm_hw_params_set_format(audio->ctx_a.capture_hnd, hw_params, audio->audio_format)) < 0)
break;
if ((err = snd_pcm_hw_params_set_rate_near(audio->ctx_a.capture_hnd, hw_params, &t, 0)) < 0)
break;
if (audio->audio_rate != t)
D1(fprintf(debug_file, "Requested audio sampling rate is not supported, set %u Hz\n", t));
audio->audio_rate = t;
if ((err = snd_pcm_hw_params_set_channels(audio->ctx_a.capture_hnd, hw_params, audio->audio_channels)) < 0)
break;
if ((err = snd_pcm_hw_params_set_period_time_near(audio->ctx_a.capture_hnd, hw_params, &period_time, 0)) < 0)
......@@ -110,7 +120,8 @@ void audio_init(struct audio *audio, bool restart)
snd_pcm_prepare(audio->ctx_a.capture_hnd);
snd_pcm_reset(audio->ctx_a.capture_hnd);
audio_set_volume(audio->audio_volume);
snd_pcm_readi(audio->ctx_a.capture_hnd, (void *)audio->ctx_a.sbuffer, 8);
// read some samples to force the driver to start time stamping
snd_pcm_readi(audio->ctx_a.capture_hnd, audio->ctx_a.sbuffer, 8);
snd_pcm_status_alloca(&status);
snd_pcm_status(audio->ctx_a.capture_hnd, status);
snd_pcm_status_get_tstamp(status, &audio_ts);
......@@ -126,23 +137,23 @@ void audio_init(struct audio *audio, bool restart)
gettimeofday(&sys_tv, NULL);
struct timeval d; // system and FPGA time difference
d.tv_sec = sys_tv.tv_sec - 1;
d.tv_usec = sys_tv.tv_usec + 1000000;
d.tv_sec -= fpga_tv.tv_sec;
d.tv_usec -= fpga_tv.tv_usec;
time_normalize(&d);
d = time_sub(&sys_tv, &fpga_tv);
audio->sys_fpga_timediff = d;
struct timeval tv;
tv.tv_sec = audio_ts.tv_sec;
tv.tv_usec = audio_ts.tv_usec;
tv.tv_sec -= 1;
tv.tv_usec += 1000000;
tv.tv_sec -= d.tv_sec;
tv.tv_usec -= d.tv_usec;
time_normalize(&tv);
audio->ts_audio = tv;
D4(fprintf(debug_file, "audio_init OK, system time = %ld:%06ld, FPGA time = %ld:%06ld\n",
sys_tv.tv_sec, sys_tv.tv_usec, fpga_tv.tv_sec, fpga_tv.tv_usec));
audio->ts_audio = time_sub(&tv, &d);
audio->sf_timediff = tv;
// === debug code ===
snd_pcm_uframes_t val;
snd_pcm_hw_params_get_buffer_size_max(hw_params, &val);
fprintf(debug_file, "ALSA buffer size: %lu\n", val);
// === end of debug ===
D4(fprintf(debug_file, "audio_init OK, system time = %ld:%06ld, FPGA time = %ld:%06ld, audio start time = %ld:%06ld, audio_ts = %ld:%06ld\n",
sys_tv.tv_sec, sys_tv.tv_usec, fpga_tv.tv_sec, fpga_tv.tv_usec, audio->ts_audio.tv_sec, audio->ts_audio.tv_usec,
audio_ts.tv_sec, audio_ts.tv_usec));
} else {
audio->set_audio_enable = 0;
audio->audio_enable = 0;
......@@ -165,6 +176,7 @@ void audio_start(struct audio *audio)
* Process audio stream.
* Asserts:
* audio->write_samples pointer to a function is not set
* number of audio frames remaining for recording is positive value
* @param audio pointer to a structure containing audio parameters and buffers
* @return None
*/
......@@ -178,11 +190,11 @@ void audio_process(struct audio *audio)
snd_timestamp_t ts;
snd_pcm_status_t *status; // allocated on stack, do not free
assert(audio->write_samples);
if (audio->audio_enable == 0)
return;
assert(audio->write_samples);
snd_pcm_status_alloca(&status);
for (;;) {
long avail = 0;
......@@ -195,11 +207,44 @@ void audio_process(struct audio *audio)
snd_pcm_status(audio->ctx_a.capture_hnd, status);
snd_pcm_status_get_tstamp(status, &ts);
avail = snd_pcm_status_get_avail(status);
snd_pcm_uframes_t to_read = audio->ctx_a.sbuffer_len; // length in samples
if (audio->ctx_a.rem_samples < 0)
audio->ctx_a.rem_samples = 0;
if (avail >= audio->ctx_a.sbuffer_len && audio->ctx_a.rem_samples == 0)
// === debug code ====
int sbf; // samples before recent frame
int samples, total;
struct timeval av_tdiff, ts_corrected;
total = audio->audio_samples + audio->skip_samples;
fprintf(debug_file, "recent frame tstamp: %ld:%06ld\n", audio->ts_video.tv_sec, audio->ts_video.tv_usec);
fprintf(debug_file, "available samples: %ld, recorded samples (+skipped): %ld (%d)\n",
avail, audio->audio_samples, total);
ts_corrected = time_sub(&ts, &audio->sys_fpga_timediff);
fprintf(debug_file, "tstamp: %ld:%06ld, corrected tstamp: %ld:%06ld\n", ts.tv_sec, ts.tv_usec, ts_corrected.tv_sec, ts_corrected.tv_usec);
av_tdiff = time_sub(&ts_corrected, &audio->ts_video);
samples = (int)floor(((double)av_tdiff.tv_sec + (double)av_tdiff.tv_usec / 1000000) * audio->audio_rate);
fprintf(debug_file, "time diff since last frame: %ld:%06ld, # of samples since last frame: %d\n", av_tdiff.tv_sec, av_tdiff.tv_usec, samples);
if (samples > avail) {
// some samples have already been recorded
samples -= avail;
sbf = audio->audio_samples - samples;
} else {
sbf = audio->audio_samples + (avail - samples);
}
fprintf(debug_file, "samples before recent frame: %d\n", sbf);
if (avail == 0) {
snd_pcm_state_t s = snd_pcm_status_get_state(status);
fprintf(debug_file, "stream state: %d\n", s);
}
audio->avail_samples = avail;
// === end of debug ===
assert(audio->ctx_a.rem_samples >= 0);
snd_pcm_uframes_t to_read = audio->ctx_a.sbuffer_len; // length in audio frames
if (avail >= audio->ctx_a.sbuffer_len && audio->ctx_a.rem_samples == 0) {
if (skip_audio(audio, avail))
continue;
to_push_flag = 1;
}
if (audio->ctx_a.rem_samples > 0) {
if (audio->ctx_a.rem_samples > audio->ctx_a.sbuffer_len) {
if (avail >= audio->ctx_a.sbuffer_len) {
......@@ -216,30 +261,26 @@ void audio_process(struct audio *audio)
}
}
if (to_push_flag) {
slen = snd_pcm_readi(audio->ctx_a.capture_hnd, (void *)audio->ctx_a.sbuffer, to_read);
slen = snd_pcm_readi(audio->ctx_a.capture_hnd, audio->ctx_a.sbuffer, to_read);
if (slen > 0) {
int flag = 1;
long offset = 0;
// check the length of the movie and sound track
if (to_push_flag == 1) {
struct timeval sl = audio->ctx_a.time_last;
sl.tv_usec += audio->ctx_a.sample_time;
time_normalize(&sl);
struct timeval m_end;
m_end = audio->ts_video;
m_end.tv_usec += audio->frame_period / 2;
time_normalize(&m_end);
struct timeval m_len;
m_len.tv_sec = m_end.tv_sec - 1;
m_len.tv_usec = m_end.tv_usec + 1000000;
m_len.tv_sec -= audio->ctx_a.time_start.tv_sec;
m_len.tv_usec -= audio->ctx_a.time_start.tv_usec;
time_normalize(&m_len);
if (time_comp(&sl, &m_len) > 0) {
// sound too early - skip this sequence
break;
}
}
// // check the length of the movie and sound track, proceed only if audio and video already in sync
// if (to_push_flag == 1 && audio->ctx_a.begin_of_stream_with_audio) {
// struct timeval sl = audio->ctx_a.time_last;
// sl.tv_usec += audio->ctx_a.sample_time;
// time_normalize(&sl);
// struct timeval m_end;
// m_end = audio->ts_video;
// m_end.tv_usec += audio->frame_period / 2;
// time_normalize(&m_end);
// struct timeval m_len;
// m_len = time_sub(&m_end, &audio->ctx_a.time_start);
// if (time_comp(&sl, &m_len) > 0) {
// D4(fprintf(debug_file, "Sound chunk is too early, skip it\n"));
// break;
// }
// }
// we need to skip some samples in a new session, but if we just switch the frames then
// we need to split new samples in the buffer into two parts - for the previous file,
// and the next one...
......@@ -258,8 +299,8 @@ void audio_process(struct audio *audio)
long samples = slen - offset;
audio->ctx_a.audio_count += samples;
_buf = (void *)audio->ctx_a.sbuffer;
_buf = (void *)((char *) _buf + offset * AUDIO_BPS * audio->audio_channels);
_buf_len = samples * AUDIO_BPS * audio->audio_channels;
_buf = (void *)((char *) _buf + offset * audio->audio_channels * (snd_pcm_format_physical_width(audio->audio_format) / 8));
_buf_len = samples * audio->audio_channels * (snd_pcm_format_physical_width(audio->audio_format) / 8);
audio->write_samples(audio, _buf, _buf_len, samples);
float tr = 1.0 / audio->audio_rate;
......@@ -273,9 +314,28 @@ void audio_process(struct audio *audio)
D6(fprintf(debug_file, "%d: sound time %lu:%06lu, at %ld:%06ld; samples: %ld\n",
counter, s, us, tv_sys.tv_sec, tv_sys.tv_usec, samples));
}
} else {
if (slen == -EPIPE || slen == -ESTRPIPE) {
int err;
fprintf(debug_file, "snd_pcm_readi returned error: %ld\n", (long)slen);
err = snd_pcm_recover(audio->ctx_a.capture_hnd, slen, 0);
snd_pcm_reset(audio->ctx_a.capture_hnd);
// snd_pcm_drain(audio->ctx_a.capture_hnd);
// err = snd_pcm_prepare(audio->ctx_a.capture_hnd);
if (err != 0) {
D0(fprintf(debug_file, "error: ALSA could not recover audio buffer, error code: %s\n", snd_strerror(err)));
// TODO: restart audio interface
break;
} else {
fprintf(debug_file, "audio error recover complete, trying to restart the stream\n");
// snd_pcm_drain(audio->ctx_a.capture_hnd);
// err = snd_pcm_prepare(audio->ctx_a.capture_hnd);
// fprintf(debug_file, "snd_pcm_prepare returned %d\n", err);
}
}
}
} else {
D3(fprintf(debug_file, "error reading from ALSA buffer, error code %ld\n", slen));
// no audio frames for processing, return
break;
}
}
......@@ -283,28 +343,27 @@ void audio_process(struct audio *audio)
/**
* Finalize audio stream and stop hardware.
* Asserts:
* audio->write_samples pointer to a function is not set
* @param audio pointer to a structure containing audio parameters and buffers
* @param reset flag indicating that HW should be reset as well
* @return None
*/
void audio_finish(struct audio *audio, bool reset)
{
struct timeval m_end;
struct timeval m_end, m_len, av_diff, frame_period;
D6(fprintf(debug_file, "movie start at: %ld:%06ld\n", audio->ctx_a.time_start.tv_sec, audio->ctx_a.time_start.tv_usec));
m_end = audio->ts_video;
m_end.tv_usec += audio->frame_period / 2;
time_normalize(&m_end);
D6(fprintf(debug_file, "movie end at: %ld:%06ld\n", m_end.tv_sec, m_end.tv_usec));
struct timeval m_len;
m_len.tv_sec = m_end.tv_sec - 1;
m_len.tv_usec = m_end.tv_usec + 1000000;
m_len.tv_sec -= audio->ctx_a.time_start.tv_sec;
m_len.tv_usec -= audio->ctx_a.time_start.tv_usec;
time_normalize(&m_len);
m_len = time_sub(&m_end, &audio->ctx_a.time_start);
D6(fprintf(debug_file, "movie length: %ld:%06ld\n", m_len.tv_sec, m_len.tv_usec));
audio->m_len = m_len;
audio->ctx_a.time_start = m_end;
assert(audio->get_fpga_time);
// calculate how many samples we need to save now for the end
struct timeval fpga_tv, sys_tv, audio_last;
audio->get_fpga_time((const struct audio *)audio, &fpga_tv);
......@@ -315,29 +374,39 @@ void audio_finish(struct audio *audio, bool reset)
D6(fprintf(debug_file, " FPGA time == %ld:%06ld\n", fpga_tv.tv_sec, fpga_tv.tv_usec));
D6(fprintf(debug_file, "AUDIO sys time == %ld:%06ld\n", audio->ctx_a.time_last.tv_sec, audio->ctx_a.time_last.tv_usec););
audio_last = audio->ctx_a.time_last;
if (m_len.tv_sec > audio_last.tv_sec) {
m_len.tv_sec--;
m_len.tv_usec += 1000000;
// audio_last = audio->ctx_a.time_last;
// if (m_len.tv_sec > audio_last.tv_sec) {
// m_len.tv_sec--;
// m_len.tv_usec += 1000000;
// }
// m_len.tv_sec -= audio_last.tv_sec;
// m_len.tv_usec -= audio_last.tv_usec;
// time_normalize(&m_len);
// long to_finish_us = time_to_us(&m_len);
av_diff = time_sub(&m_len, &audio->ctx_a.time_last);
frame_period = us_to_time(audio->frame_period);
av_diff = time_add(&av_diff, &frame_period); // plus duration of the last video frame
long to_finish_us = time_to_us(&av_diff);
float period_us = (1.0 / audio->audio_rate) * 1000000;
// D6(fprintf(debug_file, "... and now we need to save audio for this time: %ld:%06ld - i.e. %06ld usecs\n", m_len.tv_sec, m_len.tv_usec, to_finish_us));
D6(fprintf(debug_file, "... and now we need to save audio for this time: %ld:%06ld - i.e. %06ld usecs\n", av_diff.tv_sec, av_diff.tv_usec, to_finish_us));
if (to_finish_us > period_us) {
double s = audio->audio_rate;
s /= 1000.0;
s *= to_finish_us;
s /= 1000.0;
audio->ctx_a.rem_samples = (long) s;
// from the state->tv_video_start to ctx_a.time_last (with FPGA time recalculation)
do {
fprintf(debug_file, "process remaining %ld samples\n", audio->ctx_a.rem_samples);
audio_process(audio);
fprintf(debug_file, "rem_samples = %ld\n", audio->ctx_a.rem_samples);
if (audio->ctx_a.rem_samples > 0)
// sched_yield();
// TODO: calculate sleep time base on the number of samples required
usleep(100000);
} while (audio->ctx_a.rem_samples > 0);
}
m_len.tv_sec -= audio_last.tv_sec;
m_len.tv_usec -= audio_last.tv_usec;
time_normalize(&m_len);
long to_finish_us = m_len.tv_usec + 1000000 * m_len.tv_sec;
D6(fprintf(debug_file, "... and now we need to save audio for this time: %ld:%06ld - i.e. %06ld usecs\n", m_len.tv_sec, m_len.tv_usec, to_finish_us));
double s = audio->audio_rate;
s /= 1000.0;
s *= to_finish_us;
s /= 1000.0;
audio->ctx_a.rem_samples = (long) s;
// from the state->tv_video_start to ctx_a.time_last (with FPGA time recalculation)
do {
fprintf(debug_file, "process remaining samples\n");
audio_process(audio);
fprintf(debug_file, "rem_samples = %ld\n", audio->ctx_a.rem_samples);
if (audio->ctx_a.rem_samples > 0)
sched_yield();
} while (audio->ctx_a.rem_samples > 0);
if (reset)
audio_deinit(audio);
......@@ -401,3 +470,30 @@ static void audio_deinit(struct audio *audio)
gettimeofday(&tv, NULL);
D4(fprintf(debug_file, "audio deinitialized at %ld:%06ld\n", tv.tv_sec, tv.tv_usec));
}
/**
* Skip some audio frames in the beginning of recording to synchronize audio and video streams.
* @param audio pointer to a structure containing audio parameters and buffers
* @param frames number of frames available
* @return True if frames were skipped and False otherwise
*/
static bool skip_audio(struct audio *audio, snd_pcm_uframes_t frames)
{
bool ret_val = false;
snd_pcm_uframes_t skip;
if (audio->ctx_a.audio_skip_samples != 0) {
D5(fprintf(debug_file, "skip_samples = %lld, available samples = %ld\n", audio->ctx_a.audio_skip_samples, frames));
if (audio->ctx_a.audio_skip_samples >= frames) {
audio->ctx_a.audio_skip_samples -= frames;
skip = frames;
} else {
skip = audio->ctx_a.audio_skip_samples;
audio->ctx_a.audio_skip_samples = 0;
}
snd_pcm_forward(audio->ctx_a.capture_hnd, skip);
ret_val = true;
}
return ret_val;
}
......@@ -35,16 +35,15 @@
#define AUDIO_RATE_MIN 11025
#define AUDIO_RATE_MAX 44100
#define DEFAULT_AUDIO_VOLUME 0xffff
#define AUDIO_BPS 2 ///< bytes per sample for a single channel (can be 1 or 2)
struct context_audio {
char *sbuffer; ///< buffer for audio samples
long sbuffer_len; ///< the length of samples buffer in samples
long sample_time; ///< duration of one audio sample in ms
long long audio_count; ///< total number of audio samples
long sample_time; ///< duration of one chunk of audio data, in ms
long long audio_count; ///< total number of audio frames
struct timeval time_start; ///< start time, set only when stream starts and updated with each new file
struct timeval time_last; ///< time of last audio sample
struct timeval time_last; ///< calculated time of last audio sample (this value is not taken from ALSA)
long rem_samples; ///< remaining samples
int begin_of_stream_with_audio; ///<
......@@ -55,8 +54,8 @@ struct context_audio {
};
struct audio {
int audio_enable; ///< flag indicating if audio is enabled
int audio_rate; ///< sample rate
int audio_enable; ///< flag indicating that audio is enabled
int audio_rate; ///< sample rate, in Hz
int audio_channels; ///< number of channels
int audio_volume; ///< volume set in range [0..0xFFFF]
......@@ -76,11 +75,21 @@ struct audio {
struct timeval ts_audio; ///< time stamp when audio stream started
struct timeval ts_video; ///< time stamp of each new frame
struct timeval ts_video_start; ///< time stamp of starting video frame
int frame_period; ///< video frame period, used to calculate time stamps for audio samples
int frame_period; ///< video frame period, in microseconds
snd_pcm_format_t audio_format; ///< format of audio samples as defined in 'enum snd_pcm_format_t'
void (*get_fpga_time)(const struct audio *audio, struct timeval *tv);//< callback function which can get FPGA time
int (*write_samples)(struct audio *audio, void *buff, long len, long slen); ///< callback function which actually write data to file, this must be set
///< in the camogm_init_* function when appropriate format is selected
// === debug ===
struct timeval sf_timediff; // system to fpga time difference at the beginning of the stream
struct timeval m_len;
struct timeval sys_fpga_timediff;
int avail_samples;
long calc_frames; // calculated number of frames by current video frame
struct timeval prev_ts_video;
long long skip_samples;
// === end of debug ===
};
void audio_init(struct audio *audio, bool restart);
......
......@@ -26,7 +26,11 @@
#include <sys/types.h>
#include <assert.h>
// for debug only
#include <math.h>
#include "camogm_mov.h"
#include "thelper.h"
/** @brief QuickTime header length (w/o index tables) enough to accommodate static data */
#define QUICKTIME_MIN_HEADER 0x300
......@@ -236,11 +240,11 @@ static int camogm_audio_mov(struct audio *audio, void *buff, long len, long slen
ssize_t wr_len;
camogm_state *state = container_of(audio, camogm_state, audio);
D6(fprintf(debug_file, "write audio sample, len = %d, slen = %d\n", len, slen));
D6(fprintf(debug_file, "write audio sample, len = %ld, slen = %ld\n", len, slen));
wr_len = write(state->ivf, buff, len);
if (wr_len < len) {
D0(fprintf(debug_file, "audio samples write error: %s; returned %d, expected %d\n", strerror(errno), wr_len, len));
D0(fprintf(debug_file, "audio samples write error: %s; returned %d, expected %ld\n", strerror(errno), wr_len, len));
close(state->ivf);
state->ivf = -1;
return CAMOGM_FRAME_FILE_ERR;
......@@ -295,6 +299,9 @@ int camogm_end_mov(camogm_state *state)
NULL, // array of frame lengths to build an index
state->frame_data_start
);
// === debug code ===
fprintf(debug_file, "total # of video frames: %d, total # of audio samples: %ld\n", state->frameno, state->audio.audio_samples);
// === end of debug ===
close(state->ivf);
state->ivf = -1;
// free memory used for index
......@@ -633,6 +640,11 @@ int quicktime_template_parser( camogm_state *state,
iFileLen = strlen(iFile);
lseek(ofd, 0, SEEK_SET);
// === debug ===
struct timeval m_len = state->audio.m_len; // duration of movie
fprintf(debug_file, "frameno: %d, duration: %ld:%06ld, audio_samples: %ld\n", state->frameno, m_len.tv_sec, m_len.tv_usec, state->audio.audio_samples);
// === ebd of debug ===
audio_timescale = state->audio.audio_rate;
audio_rate = audio_timescale; // QuickTime defines sample rate as unsigned 16.16 fixed-point number
audio_rate <<= 16;
......
......@@ -18,7 +18,7 @@
00000000 # Selection Time
00000000 # Selection Duration
00000000 # Current Time
00000002 # Next Track ID
00000003 # Next Track ID
} # 'mvhd
{'trak
{'tkhd
......@@ -283,6 +283,11 @@
} # 'stbl
} # 'minf
} #'mdia
{'tref # Track reference atom, identifies this track as related to video track
{'sync
00000001 # ID of the related video track
} # 'sync
} # 'tref
} #'trak
# {'udta
......
......@@ -70,3 +70,17 @@ struct timeval time_sub(const struct timeval *tv1, const struct timeval *tv2)
return ret_val;
}
/**
* Add one time value to another and return the sum
*/
struct timeval time_add(const struct timeval *tv1, const struct timeval *tv2)
{
struct timeval ret_val = *tv1;
ret_val.tv_sec += tv2->tv_sec;
ret_val.tv_usec += tv2->tv_usec;
time_normalize(&ret_val);
return ret_val;
}
......@@ -27,5 +27,36 @@
void time_normalize(struct timeval *tv);
int time_comp(struct timeval *t1, struct timeval *t2);
struct timeval time_sub(const struct timeval *tv1, const struct timeval *tv2);
struct timeval time_add(const struct timeval *tv1, const struct timeval *tv2);
/**
* Convert time represented by timeval structure to time in microseconds
* @param tv time value to convert
* @return time in microseconds
*/
inline unsigned long long time_to_us(const struct timeval *tv)
{
unsigned long long t;
t = tv->tv_sec * 1000000;
t += tv->tv_usec;
return t;
}
/**
* Convert time in microseconds to time represented by timeval structure
* @param us time in microseconds
* @return time in timeval structure
*/
inline struct timeval us_to_time(unsigned long us)
{
struct timeval tv;
tv.tv_sec = us / 1000000;
tv.tv_usec = us % 1000000;
return tv;
}
#endif /* _THELPER_H */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment