* Make error detection in timestamp parsing much stronger.

* Avoid extracting persistent data (such as video width*height) from 
  received data if we suspect that an error has occured (MPEG-TS TEI or
  continuity counter errors).

* Add src/plumbing/tsfix.[ch]: Streaming code that fixes up missing
  timestamps and normalize timecode to start at 0.

* Move PTS estimation code and timestamp normalization out from central
  parse code. Subscribers that really needs it should use tsfix plumbing
  instead.

* Let DVR code use tsfix plumbing.

* Don't send "pts" and "dts" over HTSP if timestamps are missing.
This commit is contained in:
Andreas Öman 2010-06-15 14:53:39 +00:00
parent 5b56688ebe
commit 3c0e1141ff
13 changed files with 592 additions and 282 deletions

View file

@ -70,6 +70,8 @@ SRCS = src/main.c \
src/rawtsinput.c \
src/iptv_input.c \
SRCS += src/plumbing/tsfix.c \
SRCS += src/dvr/dvr_db.c \
src/dvr/dvr_rec.c \
src/dvr/dvr_autorec.c

View file

@ -153,7 +153,7 @@ typedef struct dvr_entry {
th_subscription_t *de_s;
streaming_queue_t de_sq;
streaming_target_t *de_tsfix;
/**
* Initialized upon SUBSCRIPTION_TRANSPORT_RUN

View file

@ -33,6 +33,8 @@
#include "spawn.h"
#include "transports.h"
#include "plumbing/tsfix.h"
static const AVRational mpeg_tc = {1, 90000};
typedef struct dvr_rec_stream {
@ -85,8 +87,10 @@ dvr_rec_subscribe(dvr_entry_t *de)
else
weight = 300;
de->de_tsfix = tsfix_create(&de->de_sq.sq_st);
de->de_s = subscription_create_from_channel(de->de_channel, weight,
buf, &de->de_sq.sq_st, 0);
buf, de->de_tsfix, 0);
}
/**
@ -104,6 +108,8 @@ dvr_rec_unsubscribe(dvr_entry_t *de, int stopcode)
pthread_join(de->de_thread, NULL);
de->de_s = NULL;
tsfix_destroy(de->de_tsfix);
de->de_last_error = stopcode;
}

View file

@ -1405,12 +1405,18 @@ htsp_stream_deliver(htsp_subscription_t *hs, th_pkt_t *pkt)
htsmsg_add_u32(m, "stream", pkt->pkt_componentindex);
htsmsg_add_u32(m, "com", pkt->pkt_commercial);
int64_t pts = av_rescale_q(pkt->pkt_pts, mpeg_tc, AV_TIME_BASE_Q);
int64_t dts = av_rescale_q(pkt->pkt_dts, mpeg_tc, AV_TIME_BASE_Q);
uint32_t dur = av_rescale_q(pkt->pkt_duration, mpeg_tc, AV_TIME_BASE_Q);
htsmsg_add_s64(m, "dts", dts);
htsmsg_add_s64(m, "pts", pts);
if(pkt->pkt_pts != AV_NOPTS_VALUE) {
int64_t pts = av_rescale_q(pkt->pkt_pts, mpeg_tc, AV_TIME_BASE_Q);
htsmsg_add_s64(m, "pts", pts);
}
if(pkt->pkt_dts != AV_NOPTS_VALUE) {
int64_t dts = av_rescale_q(pkt->pkt_dts, mpeg_tc, AV_TIME_BASE_Q);
htsmsg_add_s64(m, "dts", dts);
}
uint32_t dur = av_rescale_q(pkt->pkt_duration, mpeg_tc, AV_TIME_BASE_Q);
htsmsg_add_u32(m, "duration", dur);
pkt = pkt_merge_global(pkt);

View file

@ -132,3 +132,31 @@ pkt_merge_global(th_pkt_t *pkt)
return n;
}
/**
*
*/
th_pkt_t *
pkt_copy(th_pkt_t *pkt)
{
th_pkt_t *n = malloc(sizeof(th_pkt_t));
*n = *pkt;
n->pkt_refcount = 1;
if(pkt->pkt_globaldata_len) {
n->pkt_globaldata = malloc(pkt->pkt_globaldata_len +
FF_INPUT_BUFFER_PADDING_SIZE);
memcpy(n->pkt_globaldata, pkt->pkt_globaldata, pkt->pkt_globaldata_len);
}
if(pkt->pkt_payloadlen) {
n->pkt_payload = malloc(pkt->pkt_payloadlen +
FF_INPUT_BUFFER_PADDING_SIZE);
memcpy(n->pkt_payload, pkt->pkt_payload, pkt->pkt_payloadlen);
}
return n;
}

View file

@ -37,6 +37,7 @@ typedef struct th_pkt {
uint8_t pkt_commercial;
uint8_t pkt_componentindex;
uint8_t pkt_frametype;
uint8_t pkt_field; // Set if packet is only a half frame (a field)
uint8_t *pkt_payload;
int pkt_payloadlen;
@ -71,4 +72,6 @@ th_pkt_t *pkt_alloc(void *data, size_t datalen, int64_t pts, int64_t dts);
th_pkt_t *pkt_merge_global(th_pkt_t *pkt);
th_pkt_t *pkt_copy(th_pkt_t *pkt);
#endif /* PACKET_H_ */

View file

@ -294,7 +294,7 @@ h264_decode_pic_parameter_set(th_stream_t *st, bitstream_t *bs)
int
h264_decode_slice_header(th_stream_t *st, bitstream_t *bs, int *pkttype,
int *duration)
int *duration, int *isfield)
{
h264_private_t *p;
int slice_type, pps_id, sps_id, fnum;
@ -329,21 +329,18 @@ h264_decode_slice_header(th_stream_t *st, bitstream_t *bs, int *pkttype,
fnum = read_bits(bs, p->sps[sps_id].max_frame_num_bits);
int structure;
int field = 0;
int timebase = 180000;
if(p->sps[sps_id].mbs_only_flag) {
structure = 0;
} else {
if(!p->sps[sps_id].mbs_only_flag) {
if(read_bits1(bs)) {
read_bits1(bs); // bottom field
structure = 1;
} else {
structure = 2;
field = 1;
}
}
*isfield = field;
if(p->sps[sps_id].time_scale != 0) {
int d = timebase * p->sps[sps_id].units_in_tick / p->sps[sps_id].time_scale;
*duration = d;
@ -356,7 +353,7 @@ h264_decode_slice_header(th_stream_t *st, bitstream_t *bs, int *pkttype,
st->st_vbv_delay = -1;
if(p->sps[sps_id].width && p->sps[sps_id].height)
if(p->sps[sps_id].width && p->sps[sps_id].height && !st->st_buffer_errors)
parser_set_stream_vsize(st, p->sps[sps_id].width, p->sps[sps_id].height);
return 0;
}

View file

@ -28,6 +28,6 @@ int h264_decode_seq_parameter_set(th_stream_t *st, bitstream_t *bs);
int h264_decode_pic_parameter_set(th_stream_t *st, bitstream_t *bs);
int h264_decode_slice_header(th_stream_t *st, bitstream_t *bs, int *pkttype,
int *duration);
int *duration, int *isfield);
#endif /* PARSER_H264_H_ */

View file

@ -60,13 +60,27 @@ static const AVRational mpeg_tc = {1, 90000};
x; \
})
#define getpts(b, l) ({ \
int64_t _pts; \
_pts = (int64_t)((getu8(b, l) >> 1) & 0x07) << 30; \
_pts |= (int64_t)(getu16(b, l) >> 1) << 15; \
_pts |= (int64_t)(getu16(b, l) >> 1); \
_pts; \
})
static int64_t
getpts(uint8_t *p)
{
int a = p[0];
int b = (p[1] << 8) | p[2];
int c = (p[3] << 8) | p[4];
if((a & 1) && (b & 1) && (c & 1)) {
return
((int64_t)((a >> 1) & 0x07) << 30) |
((int64_t)((b >> 1) ) << 15) |
((int64_t)((c >> 1) ))
;
} else {
// Marker bits not present
return AV_NOPTS_VALUE;
}
}
static int parse_mpeg2video(th_transport_t *t, th_stream_t *st, size_t len,
@ -99,18 +113,12 @@ static void parse_mpegaudio(th_transport_t *t, th_stream_t *st, th_pkt_t *pkt);
static void parse_ac3(th_transport_t *t, th_stream_t *st, th_pkt_t *pkt);
static void parse_compute_pts(th_transport_t *t, th_stream_t *st,
th_pkt_t *pkt);
static void parser_deliver(th_transport_t *t, th_stream_t *st, th_pkt_t *pkt,
int checkts);
static void parser_deliver(th_transport_t *t, th_stream_t *st, th_pkt_t *pkt);
static int parse_pes_header(th_transport_t *t, th_stream_t *st, uint8_t *buf,
size_t len);
static void parser_compute_duration(th_transport_t *t, th_stream_t *st,
th_pktref_t *pr);
/**
* Parse raw mpeg data
*/
@ -118,6 +126,12 @@ void
parse_mpeg_ts(th_transport_t *t, th_stream_t *st, const uint8_t *data,
int len, int start, int err)
{
if(start)
st->st_buffer_errors = 0;
if(err)
st->st_buffer_errors++;
switch(st->st_type) {
case SCT_MPEG2VIDEO:
parse_video(t, st, data, len, parse_mpeg2video);
@ -247,7 +261,7 @@ parse_aac(th_transport_t *t, th_stream_t *st, const uint8_t *data,
pkt = parse_latm_audio_mux_element(t, st, st->st_buffer + p + 3, muxlen);
if(pkt != NULL)
parser_deliver(t, st, pkt, 1);
parser_deliver(t, st, pkt);
p += muxlen + 3;
} else {
@ -316,6 +330,7 @@ parse_video(th_transport_t *t, th_stream_t *st, const uint8_t *data, int len,
/* Reset packet parser upon length error or if parser
tells us so */
st->st_buffer_ptr = 0;
st->st_buffer_errors = 0;
st->st_buffer[st->st_buffer_ptr++] = sc >> 24;
st->st_buffer[st->st_buffer_ptr++] = sc >> 16;
st->st_buffer[st->st_buffer_ptr++] = sc >> 8;
@ -378,10 +393,6 @@ parse_audio(th_transport_t *t, th_stream_t *st, const uint8_t *data,
st->st_parser_state = 2;
//There is no video pid, start the stream right away
if(t->tht_dts_start == AV_NOPTS_VALUE && t->tht_servicetype == ST_RADIO)
t->tht_dts_start = st->st_curdts;
assert(len >= 0);
if(len == 0)
return;
@ -452,7 +463,7 @@ parse_mpegaudio(th_transport_t *t, th_stream_t *st, th_pkt_t *pkt)
pkt->pkt_duration = duration;
st->st_nextdts = pkt->pkt_dts + duration;
parser_deliver(t, st, pkt, 1);
parser_deliver(t, st, pkt);
}
@ -528,7 +539,7 @@ parse_ac3(th_transport_t *t, th_stream_t *st, th_pkt_t *pkt)
pkt->pkt_duration = duration;
st->st_nextdts = pkt->pkt_dts + duration;
parser_deliver(t, st, pkt, 1);
parser_deliver(t, st, pkt);
}
@ -541,7 +552,7 @@ parse_ac3(th_transport_t *t, th_stream_t *st, th_pkt_t *pkt)
static int
parse_pes_header(th_transport_t *t, th_stream_t *st, uint8_t *buf, size_t len)
{
int64_t dts, pts;
int64_t dts, pts, d;
int hdr, flags, hlen;
hdr = getu8(buf, len);
@ -555,24 +566,36 @@ parse_pes_header(th_transport_t *t, th_stream_t *st, uint8_t *buf, size_t len)
if(hlen < 10)
goto err;
pts = getpts(buf, len);
dts = getpts(buf, len);
pts = getpts(buf);
dts = getpts(buf + 5);
d = (pts - dts) & PTS_MASK;
if(d > 180000) // More than two seconds of PTS/DTS delta, probably corrupt
pts = dts = AV_NOPTS_VALUE;
} else if((flags & 0xc0) == 0x80) {
if(hlen < 5)
goto err;
dts = pts = getpts(buf, len);
dts = pts = getpts(buf);
} else
return hlen + 3;
st->st_curdts = dts & PTS_MASK;
st->st_curpts = pts & PTS_MASK;
if(st->st_buffer_errors) {
st->st_curdts = AV_NOPTS_VALUE;
st->st_curpts = AV_NOPTS_VALUE;
} else {
st->st_curdts = dts;
st->st_curpts = pts;
}
return hlen + 3;
err:
limitedlog(&st->st_loglimit_pes, "TS", transport_component_nicename(st),
"Corrupted PES header");
st->st_curdts = AV_NOPTS_VALUE;
st->st_curpts = AV_NOPTS_VALUE;
limitedlog(&st->st_loglimit_pes, "TS", transport_component_nicename(st),
"Corrupted PES header");
return -1;
}
@ -613,11 +636,6 @@ parse_mpeg2video_pic_start(th_transport_t *t, th_stream_t *st, int *frametype,
*frametype = pct;
/* If this is the first I-frame seen, set dts_start as a reference
offset */
if(pct == PKT_I_FRAME && t->tht_dts_start == AV_NOPTS_VALUE)
t->tht_dts_start = st->st_curdts;
v = read_bits(bs, 16); /* vbv_delay */
if(v == 0xffff)
st->st_vbv_delay = -1;
@ -730,7 +748,7 @@ parse_mpeg2video(th_transport_t *t, th_stream_t *st, size_t len,
case 0x00000100:
/* Picture start code */
if(st->st_frame_duration == 0 || st->st_curdts == AV_NOPTS_VALUE)
if(st->st_frame_duration == 0)
return 1;
if(parse_mpeg2video_pic_start(t, st, &frametype, &bs))
@ -747,9 +765,11 @@ parse_mpeg2video(th_transport_t *t, th_stream_t *st, size_t len,
case 0x000001b3:
/* Sequence start code */
if(parse_mpeg2video_seq_start(t, st, &bs))
return 1;
parser_global_data_move(st, buf, len);
if(!st->st_buffer_errors) {
if(parse_mpeg2video_seq_start(t, st, &bs))
return 1;
parser_global_data_move(st, buf, len);
}
return 2;
case 0x000001b5:
@ -758,11 +778,13 @@ parse_mpeg2video(th_transport_t *t, th_stream_t *st, size_t len,
switch(buf[4] >> 4) {
case 0x1:
// Sequence Extension
parser_global_data_move(st, buf, len);
if(!st->st_buffer_errors)
parser_global_data_move(st, buf, len);
return 2;
case 0x2:
// Sequence Display Extension
parser_global_data_move(st, buf, len);
if(!st->st_buffer_errors)
parser_global_data_move(st, buf, len);
return 2;
}
break;
@ -790,13 +812,14 @@ parse_mpeg2video(th_transport_t *t, th_stream_t *st, size_t len,
pkt->pkt_payloadlen = st->st_buffer_ptr - 4;
pkt->pkt_duration = st->st_frame_duration;
parse_compute_pts(t, st, pkt);
parser_deliver(t, st, pkt);
st->st_curpkt = NULL;
st->st_buffer = malloc(st->st_buffer_size);
/* If we know the frame duration, increase DTS accordingly */
st->st_curdts += st->st_frame_duration;
if(st->st_curdts != AV_NOPTS_VALUE)
st->st_curdts += st->st_frame_duration;
/* PTS cannot be extrapolated (it's not linear) */
st->st_curpts = AV_NOPTS_VALUE;
@ -806,7 +829,8 @@ parse_mpeg2video(th_transport_t *t, th_stream_t *st, size_t len,
case 0x000001b8:
// GOP header
parser_global_data_move(st, buf, len);
if(!st->st_buffer_errors)
parser_global_data_move(st, buf, len);
return 2;
case 0x000001b2:
@ -831,7 +855,7 @@ parse_h264(th_transport_t *t, th_stream_t *st, size_t len,
uint8_t *buf = st->st_buffer + sc_offset;
uint32_t sc = st->st_startcode;
int64_t d;
int l2, pkttype, duration;
int l2, pkttype, duration, isfield;
bitstream_t bs;
int ret = 0;
@ -840,7 +864,7 @@ parse_h264(th_transport_t *t, th_stream_t *st, size_t len,
if(len >= 9)
parse_pes_header(t, st, buf + 6, len - 6);
if(st->st_prevdts != AV_NOPTS_VALUE) {
if(st->st_prevdts != AV_NOPTS_VALUE && st->st_curdts != AV_NOPTS_VALUE) {
d = (st->st_curdts - st->st_prevdts) & 0x1ffffffffLL;
if(d < 90000)
@ -863,40 +887,39 @@ parse_h264(th_transport_t *t, th_stream_t *st, size_t len,
switch(sc & 0x1f) {
case 7:
h264_nal_deescape(&bs, buf + 3, len - 3);
h264_decode_seq_parameter_set(st, &bs);
parser_global_data_move(st, buf, len);
if(!st->st_buffer_errors) {
h264_nal_deescape(&bs, buf + 3, len - 3);
h264_decode_seq_parameter_set(st, &bs);
parser_global_data_move(st, buf, len);
}
ret = 2;
break;
case 8:
h264_nal_deescape(&bs, buf + 3, len - 3);
h264_decode_pic_parameter_set(st, &bs);
parser_global_data_move(st, buf, len);
if(!st->st_buffer_errors) {
h264_nal_deescape(&bs, buf + 3, len - 3);
h264_decode_pic_parameter_set(st, &bs);
parser_global_data_move(st, buf, len);
}
ret = 2;
break;
case 5: /* IDR+SLICE */
case 1:
if(st->st_curpkt != NULL || st->st_frame_duration == 0 ||
st->st_curdts == AV_NOPTS_VALUE)
if(st->st_curpkt != NULL || st->st_frame_duration == 0)
break;
if(t->tht_dts_start == AV_NOPTS_VALUE)
t->tht_dts_start = st->st_curdts;
l2 = len - 3 > 64 ? 64 : len - 3;
h264_nal_deescape(&bs, buf + 3, l2); /* we just want the first stuff */
duration = 0;
if(h264_decode_slice_header(st, &bs, &pkttype, &duration)) {
if(h264_decode_slice_header(st, &bs, &pkttype, &duration, &isfield)) {
free(bs.data);
return 1;
}
st->st_curpkt = pkt_alloc(NULL, 0, st->st_curpts, st->st_curdts);
st->st_curpkt->pkt_frametype = pkttype;
st->st_curpkt->pkt_field = isfield;
st->st_curpkt->pkt_duration = duration ?: st->st_frame_duration;
st->st_curpkt->pkt_commercial = t->tht_tt_commercial_advice;
break;
@ -923,10 +946,13 @@ parse_h264(th_transport_t *t, th_stream_t *st, size_t len,
pkt->pkt_payload = st->st_buffer;
pkt->pkt_payloadlen = st->st_buffer_ptr - 4;
parser_deliver(t, st, pkt, 1);
parser_deliver(t, st, pkt);
st->st_curpkt = NULL;
st->st_buffer = malloc(st->st_buffer_size);
st->st_curdts = AV_NOPTS_VALUE;
st->st_curpts = AV_NOPTS_VALUE;
}
return 1;
}
@ -948,7 +974,7 @@ parse_subtitles(th_transport_t *t, th_stream_t *st, const uint8_t *data,
if(start) {
/* Payload unit start */
st->st_parser_state = 1;
st->st_buffer_ptr = 0;
st->st_buffer_errors = 0;
}
if(st->st_parser_state == 0)
@ -1007,184 +1033,20 @@ parse_subtitles(th_transport_t *t, th_stream_t *st, const uint8_t *data,
if(buf[psize - 1] == 0xff) {
pkt = pkt_alloc(buf, psize - 1, st->st_curpts, st->st_curdts);
pkt->pkt_commercial = t->tht_tt_commercial_advice;
parser_deliver(t, st, pkt, 0);
parser_deliver(t, st, pkt);
}
}
}
/**
* Compute PTS (if not known)
*
* We do this by placing packets on a queue and wait for next I/P
* frame to appear
*/
static void
parse_compute_pts(th_transport_t *t, th_stream_t *st, th_pkt_t *pkt)
parser_deliver(th_transport_t *t, th_stream_t *st, th_pkt_t *pkt)
{
th_pktref_t *pr;
int validpts = pkt->pkt_pts != AV_NOPTS_VALUE && st->st_ptsq_len == 0;
/* PTS known and no other packets in queue, deliver at once */
if(validpts && pkt->pkt_duration)
return parser_deliver(t, st, pkt, 1);
/* Reference count is transfered to queue */
pr = malloc(sizeof(th_pktref_t));
pr->pr_pkt = pkt;
if(validpts)
return parser_compute_duration(t, st, pr);
TAILQ_INSERT_TAIL(&st->st_ptsq, pr, pr_link);
st->st_ptsq_len++;
/* */
while((pr = TAILQ_FIRST(&st->st_ptsq)) != NULL) {
pkt = pr->pr_pkt;
switch(pkt->pkt_frametype) {
case PKT_B_FRAME:
/* B-frames have same PTS as DTS, pass them on */
pkt->pkt_pts = pkt->pkt_dts;
break;
case PKT_I_FRAME:
case PKT_P_FRAME:
/* Presentation occures at DTS of next I or P frame,
try to find it */
pr = TAILQ_NEXT(pr, pr_link);
while(1) {
if(pr == NULL)
return; /* not arrived yet, wait */
if(pr->pr_pkt->pkt_frametype <= PKT_P_FRAME) {
pkt->pkt_pts = pr->pr_pkt->pkt_dts;
break;
}
pr = TAILQ_NEXT(pr, pr_link);
}
break;
}
pr = TAILQ_FIRST(&st->st_ptsq);
TAILQ_REMOVE(&st->st_ptsq, pr, pr_link);
st->st_ptsq_len--;
if(pkt->pkt_duration == 0) {
parser_compute_duration(t, st, pr);
} else {
parser_deliver(t, st, pkt, 1);
free(pr);
}
}
}
/**
* Compute duration of a packet, we do this by keeping a packet
* until the next one arrives, then we release it
*/
static void
parser_compute_duration(th_transport_t *t, th_stream_t *st, th_pktref_t *pr)
{
th_pktref_t *next;
int64_t d;
TAILQ_INSERT_TAIL(&st->st_durationq, pr, pr_link);
pr = TAILQ_FIRST(&st->st_durationq);
if((next = TAILQ_NEXT(pr, pr_link)) == NULL)
return;
d = next->pr_pkt->pkt_dts - pr->pr_pkt->pkt_dts;
TAILQ_REMOVE(&st->st_durationq, pr, pr_link);
if(d < 10) {
pkt_ref_dec(pr->pr_pkt);
} else {
pr->pr_pkt->pkt_duration = d;
parser_deliver(t, st, pr->pr_pkt, 1);
}
free(pr);
}
/**
* De-wrap and normalize PTS/DTS to 1MHz clock domain
*/
static void
parser_deliver(th_transport_t *t, th_stream_t *st, th_pkt_t *pkt,
int checkts)
{
int64_t dts, pts, ptsoff, d;
assert(pkt->pkt_dts != AV_NOPTS_VALUE);
assert(pkt->pkt_pts != AV_NOPTS_VALUE);
if(t->tht_dts_start == AV_NOPTS_VALUE) {
pkt_ref_dec(pkt);
return;
}
dts = pkt->pkt_dts;
pts = pkt->pkt_pts;
/* Compute delta between PTS and DTS (and watch out for 33 bit wrap) */
ptsoff = (pts - dts) & PTS_MASK;
/* Subtract the transport wide start offset */
dts -= t->tht_dts_start;
if(st->st_last_dts == AV_NOPTS_VALUE) {
if(dts < 0) {
/* Early packet with negative time stamp, drop those */
pkt_ref_dec(pkt);
return;
}
} else if(checkts) {
d = dts + st->st_dts_epoch - st->st_last_dts;
if(d < 0 || d > 90000) {
if(d < -PTS_MASK || d > -PTS_MASK + 180000) {
st->st_bad_dts++;
if(st->st_bad_dts < 5) {
tvhlog(LOG_ERR, "parser",
"transport %s stream %s, DTS discontinuity. "
"DTS = %" PRId64 ", last = %" PRId64,
t->tht_identifier, streaming_component_type2txt(st->st_type),
dts, st->st_last_dts);
}
} else {
/* DTS wrapped, increase upper bits */
st->st_dts_epoch += PTS_MASK + 1;
st->st_bad_dts = 0;
}
} else {
st->st_bad_dts = 0;
}
}
st->st_bad_dts++;
dts += st->st_dts_epoch;
st->st_last_dts = dts;
pts = dts + ptsoff;
/* Rescale to tvheadned internal 1MHz clock */
pkt->pkt_dts = dts;
pkt->pkt_pts = pts;
#if 0
printf("%-12s %d %10"PRId64" %10"PRId64" %10d %10d\n",
printf("PARSE: %-12s %d %10"PRId64" %10"PRId64" %10d %10d\n",
streaming_component_type2txt(st->st_type),
pkt->pkt_frametype,
pkt->pkt_dts,
@ -1192,7 +1054,7 @@ parser_deliver(th_transport_t *t, th_stream_t *st, th_pkt_t *pkt,
pkt->pkt_duration,
pkt->pkt_payloadlen);
#endif
avgstat_add(&st->st_rate, pkt->pkt_payloadlen, dispatch_clock);
/**

409
src/plumbing/tsfix.c Normal file
View file

@ -0,0 +1,409 @@
/**
* Timestamp fixup
* Copyright (C) 2010 Andreas Öman
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tvhead.h"
#include "streaming.h"
#include "tsfix.h"
LIST_HEAD(tfstream_list, tfstream);
#define tsfixprintf(fmt...) // printf(fmt)
/**
*
*/
typedef struct tfstream {
LIST_ENTRY(tfstream) tfs_link;
int tfs_index;
struct th_pktref_queue tfs_ptsq;
int tfs_ptsq_len;
streaming_component_type_t tfs_type;
int tfs_bad_dts;
int64_t tfs_last_dts_norm;
int64_t tfs_dts_epoch;
int64_t tfs_last_dts_in;
} tfstream_t;
/**
*
*/
typedef struct tsfix {
streaming_target_t tf_input;
streaming_target_t *tf_output;
struct tfstream_list tf_streams;
int tf_hasvideo;
int64_t tf_tsref;
} tsfix_t;
/**
*
*/
static void
tsfix_destroy_stream(tfstream_t *tfs)
{
LIST_REMOVE(tfs, tfs_link);
pktref_clear_queue(&tfs->tfs_ptsq);
free(tfs);
}
/**
*
*/
static void
tsfix_destroy_streams(tsfix_t *tf)
{
tfstream_t *tfs;
while((tfs = LIST_FIRST(&tf->tf_streams)) != NULL)
tsfix_destroy_stream(tfs);
}
/**
*
*/
static void
tsfix_add_stream(tsfix_t *tf, int index, streaming_component_type_t type)
{
tfstream_t *tfs = calloc(1, sizeof(tfstream_t));
tfs->tfs_type = type;
tfs->tfs_index = index;
tfs->tfs_last_dts_norm = AV_NOPTS_VALUE;
tfs->tfs_last_dts_in = AV_NOPTS_VALUE;
tfs->tfs_dts_epoch = 0;
TAILQ_INIT(&tfs->tfs_ptsq);
LIST_INSERT_HEAD(&tf->tf_streams, tfs, tfs_link);
}
/**
*
*/
static void
tsfix_start(tsfix_t *tf, streaming_start_t *ss)
{
int i;
int hasvideo = 0;
for(i = 0; i < ss->ss_num_components; i++) {
const streaming_start_component_t *ssc = &ss->ss_components[i];
tsfix_add_stream(tf, ssc->ssc_index, ssc->ssc_type);
hasvideo |= SCT_ISVIDEO(ssc->ssc_type);
}
tf->tf_tsref = AV_NOPTS_VALUE;
tf->tf_hasvideo = hasvideo;
}
/**
*
*/
static void
tsfix_stop(tsfix_t *tf)
{
tsfix_destroy_streams(tf);
}
#define PTS_MASK 0x1ffffffffLL
/**
*
*/
static void
normalize_ts(tsfix_t *tf, tfstream_t *tfs, th_pkt_t *pkt)
{
int64_t dts, d;
int checkts = SCT_ISAUDIO(tfs->tfs_type) || SCT_ISVIDEO(tfs->tfs_type);
if(tf->tf_tsref == AV_NOPTS_VALUE) {
pkt_ref_dec(pkt);
return;
}
/* Subtract the transport wide start offset */
dts = pkt->pkt_dts - tf->tf_tsref;
if(tfs->tfs_last_dts_norm == AV_NOPTS_VALUE) {
if(dts < 0) {
/* Early packet with negative time stamp, drop those */
pkt_ref_dec(pkt);
return;
}
} else if(checkts) {
d = dts + tfs->tfs_dts_epoch - tfs->tfs_last_dts_norm;
if(d < 0 || d > 90000) {
if(d < -PTS_MASK || d > -PTS_MASK + 180000) {
tfs->tfs_bad_dts++;
if(tfs->tfs_bad_dts < 5) {
tvhlog(LOG_ERR, "parser",
"transport stream %s, DTS discontinuity. "
"DTS = %" PRId64 ", last = %" PRId64,
streaming_component_type2txt(tfs->tfs_type),
dts, tfs->tfs_last_dts_norm);
}
} else {
/* DTS wrapped, increase upper bits */
tfs->tfs_dts_epoch += PTS_MASK + 1;
tfs->tfs_bad_dts = 0;
}
} else {
tfs->tfs_bad_dts = 0;
}
}
dts += tfs->tfs_dts_epoch;
tfs->tfs_last_dts_norm = dts;
if(pkt->pkt_pts != AV_NOPTS_VALUE) {
/* Compute delta between PTS and DTS (and watch out for 33 bit wrap) */
int64_t ptsoff = (pkt->pkt_pts - pkt->pkt_dts) & PTS_MASK;
pkt->pkt_pts = dts + ptsoff;
}
pkt->pkt_dts = dts;
tsfixprintf("TSFIX: %-12s %d %10"PRId64" %10"PRId64" %10d %10d\n",
streaming_component_type2txt(tfs->tfs_type),
pkt->pkt_frametype,
pkt->pkt_dts,
pkt->pkt_pts,
pkt->pkt_duration,
pkt->pkt_payloadlen);
streaming_message_t *sm = streaming_msg_create_pkt(pkt);
streaming_target_deliver2(tf->tf_output, sm);
}
static void
recover_pts_mpeg2video(tsfix_t *tf, tfstream_t *tfs, th_pkt_t *pkt)
{
th_pktref_t *pr, *srch;
/* Reference count is transfered to queue */
pr = malloc(sizeof(th_pktref_t));
pr->pr_pkt = pkt;
TAILQ_INSERT_TAIL(&tfs->tfs_ptsq, pr, pr_link);
tfs->tfs_ptsq_len++;
/* */
while((pr = TAILQ_FIRST(&tfs->tfs_ptsq)) != NULL) {
pkt = pr->pr_pkt;
switch(pkt->pkt_frametype) {
case PKT_B_FRAME:
/* B-frames have same PTS as DTS, pass them on */
pkt->pkt_pts = pkt->pkt_dts;
tsfixprintf("TSFIX: %-12s PTS b-frame set to %lld\n",
streaming_component_type2txt(tfs->tfs_type),
pkt->pkt_dts);
break;
case PKT_I_FRAME:
case PKT_P_FRAME:
/* Presentation occures at DTS of next I or P frame,
try to find it */
srch = TAILQ_NEXT(pr, pr_link);
while(1) {
if(srch == NULL)
return; /* not arrived yet, wait */
if(srch->pr_pkt->pkt_frametype <= PKT_P_FRAME) {
pkt->pkt_pts = srch->pr_pkt->pkt_dts;
tsfixprintf("TSFIX: %-12s PTS *-frame set to %lld\n",
streaming_component_type2txt(tfs->tfs_type),
pkt->pkt_pts);
break;
}
srch = TAILQ_NEXT(srch, pr_link);
}
break;
}
TAILQ_REMOVE(&tfs->tfs_ptsq, pr, pr_link);
tfs->tfs_ptsq_len--;
normalize_ts(tf, tfs, pkt);
free(pr);
}
}
/**
* Compute PTS (if not known)
*
* We do this by placing packets on a queue and wait for next I/P
* frame to appear
*/
static void
compute_pts(tsfix_t *tf, tfstream_t *tfs, th_pkt_t *pkt)
{
int validpts;
// If PTS is missing, set it to DTS is not video
if(pkt->pkt_pts == AV_NOPTS_VALUE && !SCT_ISVIDEO(tfs->tfs_type)) {
pkt->pkt_pts = pkt->pkt_dts;
tsfixprintf("TSFIX: %-12s PTS set to %lld\n",
streaming_component_type2txt(tfs->tfs_type),
pkt->pkt_pts);
}
validpts = pkt->pkt_pts != AV_NOPTS_VALUE && tfs->tfs_ptsq_len == 0;
/* PTS known and no other packets in queue, deliver at once */
if(validpts)
return normalize_ts(tf, tfs, pkt);
if(tfs->tfs_type == SCT_MPEG2VIDEO)
return recover_pts_mpeg2video(tf, tfs, pkt);
else
return normalize_ts(tf, tfs, pkt);
}
/**
*
*/
static void
tsfix_input_packet(tsfix_t *tf, streaming_message_t *sm)
{
tfstream_t *tfs;
th_pkt_t *pkt = pkt_copy(sm->sm_data);
streaming_msg_free(sm);
LIST_FOREACH(tfs, &tf->tf_streams, tfs_link)
if(pkt->pkt_componentindex == tfs->tfs_index)
break;
if(tfs == NULL) {
pkt_ref_dec(pkt);
return;
}
if(tf->tf_tsref == AV_NOPTS_VALUE &&
(!tf->tf_hasvideo ||
(SCT_ISVIDEO(tfs->tfs_type) && pkt->pkt_frametype == PKT_I_FRAME))) {
tf->tf_tsref = pkt->pkt_dts;
tsfixprintf("reference clock set to %lld\n", tf->tf_tsref);
}
if(pkt->pkt_dts == AV_NOPTS_VALUE) {
int pdur = pkt->pkt_duration >> pkt->pkt_field;
if(tfs->tfs_last_dts_in == AV_NOPTS_VALUE) {
pkt_ref_dec(pkt);
return;
}
pkt->pkt_dts = (tfs->tfs_last_dts_in + pdur) & PTS_MASK;
tsfixprintf("TSFIX: %-12s DTS set to last %lld +%d == %lld\n",
streaming_component_type2txt(tfs->tfs_type),
tfs->tfs_last_dts_in, pdur, pkt->pkt_dts);
}
tfs->tfs_last_dts_in = pkt->pkt_dts;
compute_pts(tf, tfs, pkt);
}
/**
*
*/
static void
tsfix_input(void *opaque, streaming_message_t *sm)
{
tsfix_t *tf = opaque;
switch(sm->sm_type) {
case SMT_PACKET:
tsfix_input_packet(tf, sm);
return;
case SMT_START:
tsfix_start(tf, sm->sm_data);
break;
case SMT_STOP:
tsfix_stop(tf);
break;
case SMT_EXIT:
case SMT_TRANSPORT_STATUS:
case SMT_NOSTART:
case SMT_MPEGTS:
break;
}
streaming_target_deliver2(tf->tf_output, sm);
}
/**
*
*/
streaming_target_t *
tsfix_create(streaming_target_t *output)
{
tsfix_t *tf = calloc(1, sizeof(tsfix_t));
tf->tf_output = output;
streaming_target_init(&tf->tf_input, tsfix_input, tf, 0);
return &tf->tf_input;
}
/**
*
*/
void
tsfix_destroy(streaming_target_t *pad)
{
tsfix_t *tf = (tsfix_t *)pad;
tsfix_destroy_streams(tf);
free(tf);
}

29
src/plumbing/tsfix.h Normal file
View file

@ -0,0 +1,29 @@
/**
* Timestamp fixup
* Copyright (C) 2010 Andreas Öman
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TSFIX_H__
#define TSFIX_H__
#include "tvhead.h"
streaming_target_t *tsfix_create(streaming_target_t *output);
void tsfix_destroy(streaming_target_t *gh);
#endif // TSFIX_H__

View file

@ -70,9 +70,6 @@ stream_init(th_stream_t *st)
st->st_curpts = AV_NOPTS_VALUE;
st->st_prevdts = AV_NOPTS_VALUE;
st->st_last_dts = AV_NOPTS_VALUE;
st->st_dts_epoch = 0;
st->st_pcr_real_last = AV_NOPTS_VALUE;
st->st_pcr_last = AV_NOPTS_VALUE;
st->st_pcr_drift = 0;
@ -157,15 +154,6 @@ stream_clean(th_stream_t *st)
free(st->st_global_data);
st->st_global_data = NULL;
st->st_global_data_len = 0;
/* Clear PTS queue */
pktref_clear_queue(&st->st_ptsq);
st->st_ptsq_len = 0;
/* Clear durationq */
pktref_clear_queue(&st->st_durationq);
}
@ -256,7 +244,6 @@ transport_start(th_transport_t *t, unsigned int weight, int force_start)
assert(t->tht_status != TRANSPORT_RUNNING);
t->tht_streaming_status = 0;
t->tht_dts_start = AV_NOPTS_VALUE;
t->tht_pcr_drift = 0;
if((r = t->tht_start_feed(t, weight, force_start)))
@ -657,9 +644,6 @@ transport_stream_create(th_transport_t *t, int pid,
st->st_pid = pid;
st->st_demuxer_fd = -1;
TAILQ_INIT(&st->st_ptsq);
TAILQ_INIT(&st->st_durationq);
avgstat_init(&st->st_rate, 10);
avgstat_init(&st->st_cc_errors, 10);

View file

@ -163,7 +163,9 @@ typedef enum {
SCT_TEXTSUB,
} streaming_component_type_t;
#define SCT_ISVIDEO(t) ((t) == SCT_MPEG2VIDEO || (t) == SCT_H264)
#define SCT_ISAUDIO(t) ((t) == SCT_MPEG2AUDIO || (t) == SCT_AC3 || \
(t) == SCT_AAC)
/**
* A streaming pad generates data.
* It has one or more streaming targets attached to it.
@ -428,26 +430,13 @@ typedef struct th_stream {
int st_height;
int st_meta_change;
/* DTS generator */
int64_t st_dts_epoch; /* upper bits (auto generated) */
int64_t st_last_dts;
int st_bad_dts;
/* Codec */
struct AVCodecContext *st_ctx;
struct AVCodecParserContext *st_parser;
/* Temporary frame store for calculating PTS */
struct th_pktref_queue st_ptsq;
int st_ptsq_len;
/* Temporary frame store for calculating duration */
struct th_pktref_queue st_durationq;
/* CA ID's on this stream */
struct caid_list st_caids;
@ -753,11 +742,6 @@ typedef struct th_transport {
int tht_scrambled_seen;
int tht_caid;
/**
* Used by parsing code to normalize timestamp to zero
*/
int64_t tht_dts_start;
/**
* PCR drift compensation. This should really be per-packet.
*/