*/const initPTSFn=(timestamp,timeOffset,initPTS)=>{if(isFiniteNumber(timestamp)){return timestamp*90;}const init90kHz=initPTS?initPTS.baseTime*90000/initPTS.timescale:0;return timeOffset*90000+init90kHz;};/**
* ADTS parser helper
* @link https://wiki.multimedia.cx/index.php?title=ADTS
*/function getAudioConfig(observer,data,offset,audioCodec){let adtsObjectType;let adtsExtensionSamplingIndex;let adtsChannelConfig;let config;const userAgent=navigator.userAgent.toLowerCase();const manifestCodec=audioCodec;const adtsSamplingRates=[96000,88200,64000,48000,44100,32000,24000,22050,16000,12000,11025,8000,7350];// byte 2
adtsObjectType=((data[offset+2]&0xc0)>>>6)+1;const adtsSamplingIndex=(data[offset+2]&0x3c)>>>2;if(adtsSamplingIndex>adtsSamplingRates.length-1){const error=new Error(`invalid ADTS sampling index:${adtsSamplingIndex}`);observer.emit(Events.ERROR,Events.ERROR,{type:ErrorTypes.MEDIA_ERROR,details:ErrorDetails.FRAG_PARSING_ERROR,fatal:true,error,reason:error.message});return;}adtsChannelConfig=(data[offset+2]&0x01)<<2;// byte 3
adtsChannelConfig|=(data[offset+3]&0xc0)>>>6;logger.log(`manifest codec:${audioCodec}, ADTS type:${adtsObjectType}, samplingIndex:${adtsSamplingIndex}`);// firefox: freq less than 24kHz = AAC SBR (HE-AAC)
if(/firefox/i.test(userAgent)){if(adtsSamplingIndex>=6){adtsObjectType=5;config=new Array(4);// HE-AAC uses SBR (Spectral Band Replication) , high frequencies are constructed from low frequencies
// there is a factor 2 between frame sample rate and output sample rate
// multiply frequency by 2 (see table below, equivalent to substract 3)
adtsExtensionSamplingIndex=adtsSamplingIndex-3;}else {adtsObjectType=2;config=new Array(2);adtsExtensionSamplingIndex=adtsSamplingIndex;}// Android : always use AAC
}else if(userAgent.indexOf('android')!==-1){adtsObjectType=2;config=new Array(2);adtsExtensionSamplingIndex=adtsSamplingIndex;}else {/* for other browsers (Chrome/Vivaldi/Opera ...)
always force audio type to be HE-AAC SBR, as some browsers do not support audio codec switch properly (like Chrome ...)
*/adtsObjectType=5;config=new Array(4);// if (manifest codec is HE-AAC or HE-AACv2) OR (manifest codec not specified AND frequency less than 24kHz)
if(audioCodec&&(audioCodec.indexOf('mp4a.40.29')!==-1||audioCodec.indexOf('mp4a.40.5')!==-1)||!audioCodec&&adtsSamplingIndex>=6){// HE-AAC uses SBR (Spectral Band Replication) , high frequencies are constructed from low frequencies
// there is a factor 2 between frame sample rate and output sample rate
// multiply frequency by 2 (see table below, equivalent to substract 3)
adtsExtensionSamplingIndex=adtsSamplingIndex-3;}else {// if (manifest codec is AAC) AND (frequency less than 24kHz AND nb channel is 1) OR (manifest codec not specified and mono audio)
// Chrome fails to play back with low frequency AAC LC mono when initialized with HE-AAC. This is not a problem with stereo.
if(audioCodec&&audioCodec.indexOf('mp4a.40.2')!==-1&&(adtsSamplingIndex>=6&&adtsChannelConfig===1||/vivaldi/i.test(userAgent))||!audioCodec&&adtsChannelConfig===1){adtsObjectType=2;config=new Array(2);}adtsExtensionSamplingIndex=adtsSamplingIndex;}}/* refer to http://wiki.multimedia.cx/index.php?title=MPEG-4_Audio#Audio_Specific_Config
ISO 14496-3 (AAC).pdf - Table 1.13 — Syntax of AudioSpecificConfig()
Audio Profile / Audio Object Type
0: Null
1: AAC Main
2: AAC LC (Low Complexity)
3: AAC SSR (Scalable Sample Rate)
4: AAC LTP (Long Term Prediction)
5: SBR (Spectral Band Replication)
6: AAC Scalable
sampling freq
0: 96000 Hz
1: 88200 Hz
2: 64000 Hz
3: 48000 Hz
4: 44100 Hz
5: 32000 Hz
6: 24000 Hz
7: 22050 Hz
8: 16000 Hz
9: 12000 Hz
10: 11025 Hz
11: 8000 Hz
12: 7350 Hz
13: Reserved
14: Reserved
15: frequency is written explictly
Channel Configurations
These are the channel configurations:
0: Defined in AOT Specifc Config
1: 1 channel: front-center
2: 2 channels: front-left, front-right
*/ // audioObjectType = profile => profile, the MPEG-4 Audio Object Type minus 1
config[0]=adtsObjectType<<3;// samplingFrequencyIndex
config[0]|=(adtsSamplingIndex&0x0e)>>1;config[1]|=(adtsSamplingIndex&0x01)<<7;// channelConfiguration
config[1]|=adtsChannelConfig<<3;if(adtsObjectType===5){// adtsExtensionSamplingIndex
config[1]|=(adtsExtensionSamplingIndex&0x0e)>>1;config[2]=(adtsExtensionSamplingIndex&0x01)<<7;// adtsObjectType (force to 2, chrome is checking that object type is less than 5 ???
// https://chromium.googlesource.com/chromium/src.git/+/master/media/formats/mp4/aac.cc
config[2]|=2<<2;config[3]=0;}return {config,samplerate:adtsSamplingRates[adtsSamplingIndex],channelCount:adtsChannelConfig,codec:'mp4a.40.'+adtsObjectType,manifestCodec};}function isHeaderPattern$1(data,offset){return data[offset]===0xff&&(data[offset+1]&0xf6)===0xf0;}function getHeaderLength(data,offset){return data[offset+1]&0x01?7:9;}function getFullFrameLength(data,offset){return (data[offset+3]&0x03)<<11|data[offset+4]<<3|(data[offset+5]&0xe0)>>>5;}function canGetFrameLength(data,offset){return offset+5=data.length){return false;}// ADTS frame Length
const frameLength=getFullFrameLength(data,offset);if(frameLength<=headerLength){return false;}const newOffset=offset+frameLength;return newOffset===data.length||isHeader$1(data,newOffset);}return false;}function initTrackConfig(track,observer,data,offset,audioCodec){if(!track.samplerate){const config=getAudioConfig(observer,data,offset,audioCodec);if(!config){return;}track.config=config.config;track.samplerate=config.samplerate;track.channelCount=config.channelCount;track.codec=config.codec;track.manifestCodec=config.manifestCodec;logger.log(`parsed codec:${track.codec}, rate:${config.samplerate}, channels:${config.channelCount}`);}}function getFrameDuration(samplerate){return 1024*90000/samplerate;}function parseFrameHeader(data,offset){// The protection skip bit tells us if we have 2 bytes of CRC data at the end of the ADTS header
const headerLength=getHeaderLength(data,offset);if(offset+headerLength<=data.length){// retrieve frame size
const frameLength=getFullFrameLength(data,offset)-headerLength;if(frameLength>0){// logger.log(`AAC frame, offset/length/total/pts:${offset+headerLength}/${frameLength}/${data.byteLength}`);
return {headerLength,frameLength};}}}function appendFrame$2(track,data,offset,pts,frameIndex){const frameDuration=getFrameDuration(track.samplerate);const stamp=pts+frameIndex*frameDuration;const header=parseFrameHeader(data,offset);let unit;if(header){const{frameLength,headerLength}=header;const _length=headerLength+frameLength;const missing=Math.max(0,offset+_length-data.length);// logger.log(`AAC frame ${frameIndex}, pts:${stamp} length@offset/total: ${frameLength}@${offset+headerLength}/${data.byteLength} missing: ${missing}`);
if(missing){unit=new Uint8Array(_length-headerLength);unit.set(data.subarray(offset+headerLength,data.length),0);}else {unit=data.subarray(offset+headerLength,offset+_length);}const _sample={unit,pts:stamp};if(!missing){track.samples.push(_sample);}return {sample:_sample,length:_length,missing};}// overflow incomplete header
const length=data.length-offset;unit=new Uint8Array(length);unit.set(data.subarray(offset,data.length),0);const sample={unit,pts:stamp};return {sample,length,missing:-1};}/**
* MPEG parser helper
*/let chromeVersion$1=null;const BitratesMap=[32,64,96,128,160,192,224,256,288,320,352,384,416,448,32,48,56,64,80,96,112,128,160,192,224,256,320,384,32,40,48,56,64,80,96,112,128,160,192,224,256,320,32,48,56,64,80,96,112,128,144,160,176,192,224,256,8,16,24,32,40,48,56,64,80,96,112,128,144,160];const SamplingRateMap=[44100,48000,32000,22050,24000,16000,11025,12000,8000];const SamplesCoefficients=[// MPEG 2.5
[0,// Reserved
72,// Layer3
144,// Layer2
12// Layer1
],// Reserved
[0,// Reserved
0,// Layer3
0,// Layer2
0// Layer1
],// MPEG 2
[0,// Reserved
72,// Layer3
144,// Layer2
12// Layer1
],// MPEG 1
[0,// Reserved
144,// Layer3
144,// Layer2
12// Layer1
]];const BytesInSlot=[0,// Reserved
1,// Layer3
1,// Layer2
4// Layer1
];function appendFrame$1(track,data,offset,pts,frameIndex){// Using http://www.datavoyage.com/mpgscript/mpeghdr.htm as a reference
if(offset+24>data.length){return;}const header=parseHeader(data,offset);if(header&&offset+header.frameLength<=data.length){const frameDuration=header.samplesPerFrame*90000/header.sampleRate;const stamp=pts+frameIndex*frameDuration;const sample={unit:data.subarray(offset,offset+header.frameLength),pts:stamp,dts:stamp};track.config=[];track.channelCount=header.channelCount;track.samplerate=header.sampleRate;track.samples.push(sample);return {sample,length:header.frameLength,missing:0};}}function parseHeader(data,offset){const mpegVersion=data[offset+1]>>3&3;const mpegLayer=data[offset+1]>>1&3;const bitRateIndex=data[offset+2]>>4&15;const sampleRateIndex=data[offset+2]>>2&3;if(mpegVersion!==1&&bitRateIndex!==0&&bitRateIndex!==15&&sampleRateIndex!==3){const paddingBit=data[offset+2]>>1&1;const channelMode=data[offset+3]>>6;const columnInBitrates=mpegVersion===3?3-mpegLayer:mpegLayer===3?3:4;const bitRate=BitratesMap[columnInBitrates*14+bitRateIndex-1]*1000;const columnInSampleRates=mpegVersion===3?0:mpegVersion===2?1:2;const sampleRate=SamplingRateMap[columnInSampleRates*3+sampleRateIndex];const channelCount=channelMode===3?1:2;// If bits of channel mode are `11` then it is a single channel (Mono)
const sampleCoefficient=SamplesCoefficients[mpegVersion][mpegLayer];const bytesInSlot=BytesInSlot[mpegLayer];const samplesPerFrame=sampleCoefficient*8*bytesInSlot;const frameLength=Math.floor(sampleCoefficient*bitRate/sampleRate+paddingBit)*bytesInSlot;if(chromeVersion$1===null){const userAgent=navigator.userAgent||'';const result=userAgent.match(/Chrome\/(\d+)/i);chromeVersion$1=result?parseInt(result[1]):0;}const needChromeFix=!!chromeVersion$1&&chromeVersion$1<=87;if(needChromeFix&&mpegLayer===2&&bitRate>=224000&&channelMode===0){// Work around bug in Chromium by setting channelMode to dual-channel (01) instead of stereo (00)
data[offset+3]=data[offset+3]|0x80;}return {sampleRate,channelCount,frameLength,samplesPerFrame};}}function isHeaderPattern(data,offset){return data[offset]===0xff&&(data[offset+1]&0xe0)===0xe0&&(data[offset+1]&0x06)!==0x00;}function isHeader(data,offset){// Look for MPEG header | 1111 1111 | 111X XYZX | where X can be either 0 or 1 and Y or Z should be 1
// Layer bits (position 14 and 15) in header should be always different from 0 (Layer I or Layer II or Layer III)
// More info http://www.mp3-tech.org/programmer/frame_header.html
return offset+1{const emsgInfo=parseEmsg(data);if(emsgSchemePattern.test(emsgInfo.schemeIdUri)){const pts=isFiniteNumber(emsgInfo.presentationTime)?emsgInfo.presentationTime/emsgInfo.timeScale:timeOffset+emsgInfo.presentationTimeDelta/emsgInfo.timeScale;let duration=emsgInfo.eventDuration===0xffffffff?Number.POSITIVE_INFINITY:emsgInfo.eventDuration/emsgInfo.timeScale;// Safari takes anything <= 0.001 seconds and maps it to Infinity
if(duration<=0.001){duration=Number.POSITIVE_INFINITY;}const payload=emsgInfo.payload;id3Track.samples.push({data:payload,len:payload.byteLength,dts:pts,pts:pts,type:MetadataSchema.emsg,duration:duration});}});}}return id3Track;}demuxSampleAes(data,keyData,timeOffset){return Promise.reject(new Error('The MP4 demuxer does not support SAMPLE-AES decryption'));}destroy(){}}const getAudioBSID=(data,offset)=>{// check the bsid to confirm ac-3 | ec-3
let bsid=0;let numBits=5;offset+=numBits;const temp=new Uint32Array(1);// unsigned 32 bit for temporary storage
const mask=new Uint32Array(1);// unsigned 32 bit mask value
const byte=new Uint8Array(1);// unsigned 8 bit for temporary storage
while(numBits>0){byte[0]=data[offset];// read remaining bits, upto 8 bits at a time
const bits=Math.min(numBits,8);const shift=8-bits;mask[0]=0xff000000>>>24+shift<>shift;bsid=!bsid?temp[0]:bsid<data.length){return -1;// not enough bytes left
}if(data[start]!==0x0b||data[start+1]!==0x77){return -1;// invalid magic
}// get sample rate
const samplingRateCode=data[start+4]>>6;if(samplingRateCode>=3){return -1;// invalid sampling rate
}const samplingRateMap=[48000,44100,32000];const sampleRate=samplingRateMap[samplingRateCode];// get frame size
const frameSizeCode=data[start+4]&0x3f;const frameSizeMap=[64,69,96,64,70,96,80,87,120,80,88,120,96,104,144,96,105,144,112,121,168,112,122,168,128,139,192,128,140,192,160,174,240,160,175,240,192,208,288,192,209,288,224,243,336,224,244,336,256,278,384,256,279,384,320,348,480,320,349,480,384,417,576,384,418,576,448,487,672,448,488,672,512,557,768,512,558,768,640,696,960,640,697,960,768,835,1152,768,836,1152,896,975,1344,896,976,1344,1024,1114,1536,1024,1115,1536,1152,1253,1728,1152,1254,1728,1280,1393,1920,1280,1394,1920];const frameLength=frameSizeMap[frameSizeCode*3+samplingRateCode]*2;if(start+frameLength>data.length){return -1;}// get channel count
const channelMode=data[start+6]>>5;let skipCount=0;if(channelMode===2){skipCount+=2;}else {if(channelMode&1&&channelMode!==1){skipCount+=2;}if(channelMode&4){skipCount+=2;}}const lfeon=(data[start+6]<<8|data[start+7])>>12-skipCount&1;const channelsMap=[2,1,2,3,3,4,4,5];const channelCount=channelsMap[channelMode]+lfeon;// build dac3 box
const bsid=data[start+5]>>3;const bsmod=data[start+5]&7;const config=new Uint8Array([samplingRateCode<<6|bsid<<1|bsmod>>2,(bsmod&3)<<6|channelMode<<3|lfeon<<2|frameSizeCode>>4,frameSizeCode<<4&0xe0]);const frameDuration=1536/sampleRate*90000;const stamp=pts+frameIndex*frameDuration;const unit=data.subarray(start,start+frameLength);track.config=config;track.channelCount=channelCount;track.samplerate=sampleRate;track.samples.push({unit,pts:stamp});return frameLength;}class BaseVideoParser{constructor(){this.VideoSample=null;}createVideoSample(key,pts,dts,debug){return {key,frame:false,pts,dts,units:[],debug,length:0};}getLastNalUnit(samples){var _VideoSample;let VideoSample=this.VideoSample;let lastUnit;// try to fallback to previous sample if current one is empty
if(!VideoSample||VideoSample.units.length===0){VideoSample=samples[samples.length-1];}if((_VideoSample=VideoSample)!=null&&_VideoSample.units){const units=VideoSample.units;lastUnit=units[units.length-1];}return lastUnit;}pushAccessUnit(VideoSample,videoTrack){if(VideoSample.units.length&&VideoSample.frame){// if sample does not have PTS/DTS, patch with last sample PTS/DTS
if(VideoSample.pts===undefined){const samples=videoTrack.samples;const nbSamples=samples.length;if(nbSamples){const lastSample=samples[nbSamples-1];VideoSample.pts=lastSample.pts;VideoSample.dts=lastSample.dts;}else {// dropping samples, no timestamp found
videoTrack.dropped++;return;}}videoTrack.samples.push(VideoSample);}if(VideoSample.debug.length){logger.log(VideoSample.pts+'/'+VideoSample.dts+':'+VideoSample.debug);}}}/**
* Parser for exponential Golomb codes, a variable-bitwidth number encoding scheme used by h264.
*/class ExpGolomb{constructor(data){this.data=void 0;this.bytesAvailable=void 0;this.word=void 0;this.bitsAvailable=void 0;this.data=data;// the number of bytes left to examine in this.data
this.bytesAvailable=data.byteLength;// the current word being examined
this.word=0;// :uint
// the number of bits left to examine in the current word
this.bitsAvailable=0;// :uint
}// ():void
loadWord(){const data=this.data;const bytesAvailable=this.bytesAvailable;const position=data.byteLength-bytesAvailable;const workingBytes=new Uint8Array(4);const availableBytes=Math.min(4,bytesAvailable);if(availableBytes===0){throw new Error('no bytes available');}workingBytes.set(data.subarray(position,position+availableBytes));this.word=new DataView(workingBytes.buffer).getUint32(0);// track the amount of this.data that has been processed
this.bitsAvailable=availableBytes*8;this.bytesAvailable-=availableBytes;}// (count:int):void
skipBits(count){let skipBytes;// :int
count=Math.min(count,this.bytesAvailable*8+this.bitsAvailable);if(this.bitsAvailable>count){this.word<<=count;this.bitsAvailable-=count;}else {count-=this.bitsAvailable;skipBytes=count>>3;count-=skipBytes<<3;this.bytesAvailable-=skipBytes;this.loadWord();this.word<<=count;this.bitsAvailable-=count;}}// (size:int):uint
readBits(size){let bits=Math.min(this.bitsAvailable,size);// :uint
const valu=this.word>>>32-bits;// :uint
if(size>32){logger.error('Cannot read more than 32 bits at a time');}this.bitsAvailable-=bits;if(this.bitsAvailable>0){this.word<<=bits;}else if(this.bytesAvailable>0){this.loadWord();}else {throw new Error('no bits available');}bits=size-bits;if(bits>0&&this.bitsAvailable){return valu<>>leadingZeroCount)!==0){// the first bit of working word is 1
this.word<<=leadingZeroCount;this.bitsAvailable-=leadingZeroCount;return leadingZeroCount;}}// we exhausted word and still have not found a 1
this.loadWord();return leadingZeroCount+this.skipLZ();}// ():void
skipUEG(){this.skipBits(1+this.skipLZ());}// ():void
skipEG(){this.skipBits(1+this.skipLZ());}// ():uint
readUEG(){const clz=this.skipLZ();// :uint
return this.readBits(clz+1)-1;}// ():int
readEG(){const valu=this.readUEG();// :int
if(0x01&valu){// the number is odd if the low order bit is set
return 1+valu>>>1;// add 1 to make it even, and divide by 2
}else {return -1*(valu>>>1);// divide by two then make it negative
}}// Some convenience functions
// :Boolean
readBoolean(){return this.readBits(1)===1;}// ():int
readUByte(){return this.readBits(8);}// ():int
readUShort(){return this.readBits(16);}// ():int
readUInt(){return this.readBits(32);}/**
* Advance the ExpGolomb decoder past a scaling list. The scaling
* list is optionally transmitted as part of a sequence parameter
* set and is not relevant to transmuxing.
* @param count the number of entries in this scaling list
* @see Recommendation ITU-T H.264, Section 7.3.2.1.1.1
*/skipScalingList(count){let lastScale=8;let nextScale=8;let deltaScale;for(let j=0;j{var _VideoSample2;switch(unit.type){// NDR
case 1:{let iskey=false;push=true;const data=unit.data;// only check slice type to detect KF in case SPS found in same packet (any keyframe is preceded by SPS ...)
if(spsfound&&data.length>4){// retrieve slice type by parsing beginning of NAL unit (follow H264 spec, slice_header definition) to detect keyframe embedded in NDR
const sliceType=new ExpGolomb(data).readSliceType();// 2 : I slice, 4 : SI slice, 7 : I slice, 9: SI slice
// SI slice : A slice that is coded using intra prediction only and using quantisation of the prediction samples.
// An SI slice can be coded such that its decoded samples can be constructed identically to an SP slice.
// I slice: A slice that is not an SI slice that is decoded using intra prediction only.
// if (sliceType === 2 || sliceType === 7) {
if(sliceType===2||sliceType===4||sliceType===7||sliceType===9){iskey=true;}}if(iskey){var _VideoSample;// if we have non-keyframe data already, that cannot belong to the same frame as a keyframe, so force a push
if((_VideoSample=VideoSample)!=null&&_VideoSample.frame&&!VideoSample.key){this.pushAccessUnit(VideoSample,track);VideoSample=this.VideoSample=null;}}if(!VideoSample){VideoSample=this.VideoSample=this.createVideoSample(true,pes.pts,pes.dts,'');}VideoSample.frame=true;VideoSample.key=iskey;break;// IDR
}case 5:push=true;// handle PES not starting with AUD
// if we have frame data already, that cannot belong to the same frame, so force a push
if((_VideoSample2=VideoSample)!=null&&_VideoSample2.frame&&!VideoSample.key){this.pushAccessUnit(VideoSample,track);VideoSample=this.VideoSample=null;}if(!VideoSample){VideoSample=this.VideoSample=this.createVideoSample(true,pes.pts,pes.dts,'');}VideoSample.key=true;VideoSample.frame=true;break;// SEI
case 6:{push=true;parseSEIMessageFromNALu(unit.data,1,pes.pts,textTrack.samples);break;// SPS
}case 7:{var _track$pixelRatio,_track$pixelRatio2;push=true;spsfound=true;const sps=unit.data;const expGolombDecoder=new ExpGolomb(sps);const config=expGolombDecoder.readSPS();if(!track.sps||track.width!==config.width||track.height!==config.height||((_track$pixelRatio=track.pixelRatio)==null?void 0:_track$pixelRatio[0])!==config.pixelRatio[0]||((_track$pixelRatio2=track.pixelRatio)==null?void 0:_track$pixelRatio2[1])!==config.pixelRatio[1]){track.width=config.width;track.height=config.height;track.pixelRatio=config.pixelRatio;track.sps=[sps];track.duration=duration;const codecarray=sps.subarray(1,4);let codecstring='avc1.';for(let i=0;i<3;i++){let h=codecarray[i].toString(16);if(h.length<2){h='0'+h;}codecstring+=h;}track.codec=codecstring;}break;}// PPS
case 8:push=true;track.pps=[unit.data];break;// AUD
case 9:push=true;track.audFound=true;if(VideoSample){this.pushAccessUnit(VideoSample,track);}VideoSample=this.VideoSample=this.createVideoSample(false,pes.pts,pes.dts,'');break;// Filler Data
case 12:push=true;break;default:push=false;if(VideoSample){VideoSample.debug+='unknown NAL '+unit.type+' ';}break;}if(VideoSample&&push){const units=VideoSample.units;units.push(unit);}});// if last PES packet, push samples
if(last&&VideoSample){this.pushAccessUnit(VideoSample,track);this.VideoSample=null;}}parseAVCNALu(track,array){const len=array.byteLength;let state=track.naluState||0;const lastState=state;const units=[];let i=0;let value;let overflow;let unitType;let lastUnitStart=-1;let lastUnitType=0;// logger.log('PES:' + Hex.hexDump(array));
if(state===-1){// special use case where we found 3 or 4-byte start codes exactly at the end of previous PES packet
lastUnitStart=0;// NALu type is value read from offset 0
lastUnitType=array[0]&0x1f;state=0;i=1;}while(i=0){const unit={data:array.subarray(lastUnitStart,overflow),type:lastUnitType};// logger.log('pushing NALU, type/size:' + unit.type + '/' + unit.data.byteLength);
units.push(unit);}else {// lastUnitStart is undefined => this is the first start code found in this PES packet
// first check if start code delimiter is overlapping between 2 PES packets,
// ie it started in last packet (lastState not zero)
// and ended at the beginning of this PES packet (i <= 4 - lastState)
const lastUnit=this.getLastNalUnit(track.samples);if(lastUnit){if(lastState&&i<=4-lastState){// start delimiter overlapping between PES packets
// strip start delimiter bytes from the end of last NAL unit
// check if lastUnit had a state different from zero
if(lastUnit.state){// strip last bytes
lastUnit.data=lastUnit.data.subarray(0,lastUnit.data.byteLength-lastState);}}// If NAL units are not starting right at the beginning of the PES packet, push preceding data into previous NAL unit.
if(overflow>0){// logger.log('first NALU found with overflow:' + overflow);
lastUnit.data=appendUint8Array(lastUnit.data,array.subarray(0,overflow));lastUnit.state=0;}}}// check if we can read unit type
if(i=0&&state>=0){const unit={data:array.subarray(lastUnitStart,len),type:lastUnitType,state:state};units.push(unit);// logger.log('pushing NALU, type/size/state:' + unit.type + '/' + unit.data.byteLength + '/' + state);
}// no NALu found
if(units.length===0){// append pes.data to previous NAL unit
const lastUnit=this.getLastNalUnit(track.samples);if(lastUnit){lastUnit.data=appendUint8Array(lastUnit.data,array);}}track.naluState=state;return units;}}/**
* SAMPLE-AES decrypter
*/class SampleAesDecrypter{constructor(observer,config,keyData){this.keyData=void 0;this.decrypter=void 0;this.keyData=keyData;this.decrypter=new Decrypter(config,{removePKCS7Padding:false});}decryptBuffer(encryptedData){return this.decrypter.decrypt(encryptedData,this.keyData.key.buffer,this.keyData.iv.buffer);}// AAC - encrypt all full 16 bytes blocks starting from offset 16
decryptAacSample(samples,sampleIndex,callback){const curUnit=samples[sampleIndex].unit;if(curUnit.length<=16){// No encrypted portion in this sample (first 16 bytes is not
// encrypted, see https://developer.apple.com/library/archive/documentation/AudioVideo/Conceptual/HLS_Sample_Encryption/Encryption/Encryption.html),
return;}const encryptedData=curUnit.subarray(16,curUnit.length-curUnit.length%16);const encryptedBuffer=encryptedData.buffer.slice(encryptedData.byteOffset,encryptedData.byteOffset+encryptedData.length);this.decryptBuffer(encryptedBuffer).then(decryptedBuffer=>{const decryptedData=new Uint8Array(decryptedBuffer);curUnit.set(decryptedData,16);if(!this.decrypter.isSync()){this.decryptAacSamples(samples,sampleIndex+1,callback);}});}decryptAacSamples(samples,sampleIndex,callback){for(;;sampleIndex++){if(sampleIndex>=samples.length){callback();return;}if(samples[sampleIndex].unit.length<32){continue;}this.decryptAacSample(samples,sampleIndex,callback);if(!this.decrypter.isSync()){return;}}}// AVC - encrypt one 16 bytes block out of ten, starting from offset 32
getAvcEncryptedData(decodedData){const encryptedDataLen=Math.floor((decodedData.length-48)/160)*16+16;const encryptedData=new Int8Array(encryptedDataLen);let outputPos=0;for(let inputPos=32;inputPos{curUnit.data=this.getAvcDecryptedUnit(decodedData,decryptedBuffer);if(!this.decrypter.isSync()){this.decryptAvcSamples(samples,sampleIndex,unitIndex+1,callback);}});}decryptAvcSamples(samples,sampleIndex,unitIndex,callback){if(samples instanceof Uint8Array){throw new Error('Cannot decrypt samples of type Uint8Array');}for(;;sampleIndex++,unitIndex=0){if(sampleIndex>=samples.length){callback();return;}const curUnits=samples[sampleIndex].units;for(;;unitIndex++){if(unitIndex>=curUnits.length){break;}const curUnit=curUnits[unitIndex];if(curUnit.data.length<=48||curUnit.type!==1&&curUnit.type!==5){continue;}this.decryptAvcSample(samples,sampleIndex,unitIndex,callback,curUnit);if(!this.decrypter.isSync()){return;}}}}}const PACKET_LENGTH=188;class TSDemuxer{constructor(observer,config,typeSupported){this.observer=void 0;this.config=void 0;this.typeSupported=void 0;this.sampleAes=null;this.pmtParsed=false;this.audioCodec=void 0;this.videoCodec=void 0;this._duration=0;this._pmtId=-1;this._videoTrack=void 0;this._audioTrack=void 0;this._id3Track=void 0;this._txtTrack=void 0;this.aacOverFlow=null;this.remainderData=null;this.videoParser=void 0;this.observer=observer;this.config=config;this.typeSupported=typeSupported;this.videoParser=new AvcVideoParser();}static probe(data){const syncOffset=TSDemuxer.syncOffset(data);if(syncOffset>0){logger.warn(`MPEG2-TS detected but first sync word found @ offset ${syncOffset}`);}return syncOffset!==-1;}static syncOffset(data){const length=data.length;let scanwindow=Math.min(PACKET_LENGTH*5,length-PACKET_LENGTH)+1;let i=0;while(i1&&(packetStart===0&&tsPackets>2||j+PACKET_LENGTH>scanwindow)){return packetStart;}}else if(tsPackets){// Exit if sync word found, but does not contain contiguous packets
return -1;}else {break;}}i++;}return -1;}/**
* Creates a track model internal to demuxer used to drive remuxing input
*/static createTrack(type,duration){return {container:type==='video'||type==='audio'?'video/mp2t':undefined,type,id:RemuxerTrackIdConfig[type],pid:-1,inputTimeScale:90000,sequenceNumber:0,samples:[],dropped:0,duration:type==='audio'?duration:undefined};}/**
* Initializes a new init segment on the demuxer/remuxer interface. Needed for discontinuities/track-switches (or at stream start)
* Resets all internal track instances of the demuxer.
*/resetInitSegment(initSegment,audioCodec,videoCodec,trackDuration){this.pmtParsed=false;this._pmtId=-1;this._videoTrack=TSDemuxer.createTrack('video');this._audioTrack=TSDemuxer.createTrack('audio',trackDuration);this._id3Track=TSDemuxer.createTrack('id3');this._txtTrack=TSDemuxer.createTrack('text');this._audioTrack.segmentCodec='aac';// flush any partial content
this.aacOverFlow=null;this.remainderData=null;this.audioCodec=audioCodec;this.videoCodec=videoCodec;this._duration=trackDuration;}resetTimeStamp(){}resetContiguity(){const{_audioTrack,_videoTrack,_id3Track}=this;if(_audioTrack){_audioTrack.pesData=null;}if(_videoTrack){_videoTrack.pesData=null;}if(_id3Track){_id3Track.pesData=null;}this.aacOverFlow=null;this.remainderData=null;}demux(data,timeOffset,isSampleAes=false,flush=false){if(!isSampleAes){this.sampleAes=null;}let pes;const videoTrack=this._videoTrack;const audioTrack=this._audioTrack;const id3Track=this._id3Track;const textTrack=this._txtTrack;let videoPid=videoTrack.pid;let videoData=videoTrack.pesData;let audioPid=audioTrack.pid;let id3Pid=id3Track.pid;let audioData=audioTrack.pesData;let id3Data=id3Track.pesData;let unknownPID=null;let pmtParsed=this.pmtParsed;let pmtId=this._pmtId;let len=data.length;if(this.remainderData){data=appendUint8Array(this.remainderData,data);len=data.length;this.remainderData=null;}if(len>4;// if an adaption field is present, its length is specified by the fifth byte of the TS packet header.
let offset;if(atf>1){offset=start+5+data[start+4];// continue if there is only adaptation field
if(offset===start+PACKET_LENGTH){continue;}}else {offset=start+4;}switch(pid){case videoPid:if(stt){if(videoData&&(pes=parsePES(videoData))){this.videoParser.parseAVCPES(videoTrack,textTrack,pes,false,this._duration);}videoData={data:[],size:0};}if(videoData){videoData.data.push(data.subarray(offset,start+PACKET_LENGTH));videoData.size+=start+PACKET_LENGTH-offset;}break;case audioPid:if(stt){if(audioData&&(pes=parsePES(audioData))){switch(audioTrack.segmentCodec){case'aac':this.parseAACPES(audioTrack,pes);break;case'mp3':this.parseMPEGPES(audioTrack,pes);break;case'ac3':{this.parseAC3PES(audioTrack,pes);}break;}}audioData={data:[],size:0};}if(audioData){audioData.data.push(data.subarray(offset,start+PACKET_LENGTH));audioData.size+=start+PACKET_LENGTH-offset;}break;case id3Pid:if(stt){if(id3Data&&(pes=parsePES(id3Data))){this.parseID3PES(id3Track,pes);}id3Data={data:[],size:0};}if(id3Data){id3Data.data.push(data.subarray(offset,start+PACKET_LENGTH));id3Data.size+=start+PACKET_LENGTH-offset;}break;case 0:if(stt){offset+=data[offset]+1;}pmtId=this._pmtId=parsePAT(data,offset);// logger.log('PMT PID:' + this._pmtId);
break;case pmtId:{if(stt){offset+=data[offset]+1;}const parsedPIDs=parsePMT(data,offset,this.typeSupported,isSampleAes,this.observer);// only update track id if track PID found while parsing PMT
// this is to avoid resetting the PID to -1 in case
// track PID transiently disappears from the stream
// this could happen in case of transient missing audio samples for example
// NOTE this is only the PID of the track as found in TS,
// but we are not using this for MP4 track IDs.
videoPid=parsedPIDs.videoPid;if(videoPid>0){videoTrack.pid=videoPid;videoTrack.segmentCodec=parsedPIDs.segmentVideoCodec;}audioPid=parsedPIDs.audioPid;if(audioPid>0){audioTrack.pid=audioPid;audioTrack.segmentCodec=parsedPIDs.segmentAudioCodec;}id3Pid=parsedPIDs.id3Pid;if(id3Pid>0){id3Track.pid=id3Pid;}if(unknownPID!==null&&!pmtParsed){logger.warn(`MPEG-TS PMT found at ${start} after unknown PID '${unknownPID}'. Backtracking to sync byte @${syncOffset} to parse all TS packets.`);unknownPID=null;// we set it to -188, the += 188 in the for loop will reset start to 0
start=syncOffset-188;}pmtParsed=this.pmtParsed=true;break;}case 0x11:case 0x1fff:break;default:unknownPID=pid;break;}}else {tsPacketErrors++;}}if(tsPacketErrors>0){emitParsingError(this.observer,new Error(`Found ${tsPacketErrors} TS packet/s that do not start with 0x47`));}videoTrack.pesData=videoData;audioTrack.pesData=audioData;id3Track.pesData=id3Data;const demuxResult={audioTrack,videoTrack,id3Track,textTrack};if(flush){this.extractRemainingSamples(demuxResult);}return demuxResult;}flush(){const{remainderData}=this;this.remainderData=null;let result;if(remainderData){result=this.demux(remainderData,-1,false,true);}else {result={videoTrack:this._videoTrack,audioTrack:this._audioTrack,id3Track:this._id3Track,textTrack:this._txtTrack};}this.extractRemainingSamples(result);if(this.sampleAes){return this.decrypt(result,this.sampleAes);}return result;}extractRemainingSamples(demuxResult){const{audioTrack,videoTrack,id3Track,textTrack}=demuxResult;const videoData=videoTrack.pesData;const audioData=audioTrack.pesData;const id3Data=id3Track.pesData;// try to parse last PES packets
let pes;if(videoData&&(pes=parsePES(videoData))){this.videoParser.parseAVCPES(videoTrack,textTrack,pes,true,this._duration);videoTrack.pesData=null;}else {// either avcData null or PES truncated, keep it for next frag parsing
videoTrack.pesData=videoData;}if(audioData&&(pes=parsePES(audioData))){switch(audioTrack.segmentCodec){case'aac':this.parseAACPES(audioTrack,pes);break;case'mp3':this.parseMPEGPES(audioTrack,pes);break;case'ac3':{this.parseAC3PES(audioTrack,pes);}break;}audioTrack.pesData=null;}else {if(audioData!=null&&audioData.size){logger.log('last AAC PES packet truncated,might overlap between fragments');}// either audioData null or PES truncated, keep it for next frag parsing
audioTrack.pesData=audioData;}if(id3Data&&(pes=parsePES(id3Data))){this.parseID3PES(id3Track,pes);id3Track.pesData=null;}else {// either id3Data null or PES truncated, keep it for next frag parsing
id3Track.pesData=id3Data;}}demuxSampleAes(data,keyData,timeOffset){const demuxResult=this.demux(data,timeOffset,true,!this.config.progressive);const sampleAes=this.sampleAes=new SampleAesDecrypter(this.observer,this.config,keyData);return this.decrypt(demuxResult,sampleAes);}decrypt(demuxResult,sampleAes){return new Promise(resolve=>{const{audioTrack,videoTrack}=demuxResult;if(audioTrack.samples&&audioTrack.segmentCodec==='aac'){sampleAes.decryptAacSamples(audioTrack.samples,0,()=>{if(videoTrack.samples){sampleAes.decryptAvcSamples(videoTrack.samples,0,0,()=>{resolve(demuxResult);});}else {resolve(demuxResult);}});}else if(videoTrack.samples){sampleAes.decryptAvcSamples(videoTrack.samples,0,0,()=>{resolve(demuxResult);});}});}destroy(){this._duration=0;}parseAACPES(track,pes){let startOffset=0;const aacOverFlow=this.aacOverFlow;let data=pes.data;if(aacOverFlow){this.aacOverFlow=null;const frameMissingBytes=aacOverFlow.missing;const sampleLength=aacOverFlow.sample.unit.byteLength;// logger.log(`AAC: append overflowing ${sampleLength} bytes to beginning of new PES`);
if(frameMissingBytes===-1){data=appendUint8Array(aacOverFlow.sample.unit,data);}else {const frameOverflowBytes=sampleLength-frameMissingBytes;aacOverFlow.sample.unit.set(data.subarray(0,frameMissingBytes),frameOverflowBytes);track.samples.push(aacOverFlow.sample);startOffset=aacOverFlow.missing;}}// look for ADTS header (0xFFFx)
let offset;let len;for(offset=startOffset,len=data.length;offset0){offset+=parsed;}}}parseID3PES(id3Track,pes){if(pes.pts===undefined){logger.warn('[tsdemuxer]: ID3 PES unknown PTS');return;}const id3Sample=_extends({},pes,{type:this._videoTrack?MetadataSchema.emsg:MetadataSchema.audioId3,duration:Number.POSITIVE_INFINITY});id3Track.samples.push(id3Sample);}}function parsePID(data,offset){// pid is a 13-bit field starting at the last bit of TS[1]
return ((data[offset+1]&0x1f)<<8)+data[offset+2];}function parsePAT(data,offset){// skip the PSI header and parse the first PMT entry
return (data[offset+10]&0x1f)<<8|data[offset+11];}function parsePMT(data,offset,typeSupported,isSampleAes,observer){const result={audioPid:-1,videoPid:-1,id3Pid:-1,segmentVideoCodec:'avc',segmentAudioCodec:'aac'};const sectionLength=(data[offset+1]&0x0f)<<8|data[offset+2];const tableEnd=offset+3+sectionLength-4;// to determine where the table is, we have to figure out how
// long the program info descriptors are
const programInfoLength=(data[offset+10]&0x0f)<<8|data[offset+11];// advance the offset to the first entry in the mapping table
offset+=12+programInfoLength;while(offset0){let parsePos=offset+5;let remaining=esInfoLength;while(remaining>2){const descriptorId=data[parsePos];switch(descriptorId){case 0x6a:// DVB Descriptor for AC-3
{if(typeSupported.ac3!==true){logger.log('AC-3 audio found, not supported in this browser for now');}else {result.audioPid=pid;result.segmentAudioCodec='ac3';}}break;}const descriptorLen=data[parsePos+1]+2;parsePos+=descriptorLen;remaining-=descriptorLen;}}break;case 0xc2:// SAMPLE-AES EC3
/* falls through */case 0x87:emitParsingError(observer,new Error('Unsupported EC-3 in M2TS found'));return result;case 0x24:emitParsingError(observer,new Error('Unsupported HEVC in M2TS found'));return result;}// move to the next table entry
// skip past the elementary stream descriptors, if present
offset+=esInfoLength+5;}return result;}function emitParsingError(observer,error,levelRetry){logger.warn(`parsing error: ${error.message}`);observer.emit(Events.ERROR,Events.ERROR,{type:ErrorTypes.MEDIA_ERROR,details:ErrorDetails.FRAG_PARSING_ERROR,fatal:false,levelRetry,error,reason:error.message});}function logEncryptedSamplesFoundInUnencryptedStream(type){logger.log(`${type} with AES-128-CBC encryption found in unencrypted stream`);}function parsePES(stream){let i=0;let frag;let pesLen;let pesHdrLen;let pesPts;let pesDts;const data=stream.data;// safety check
if(!stream||stream.size===0){return null;}// we might need up to 19 bytes to read PES header
// if first chunk of data is less than 19 bytes, let's merge it with following ones until we get 19 bytes
// usually only one merge is needed (and this is rare ...)
while(data[0].length<19&&data.length>1){data[0]=appendUint8Array(data[0],data[1]);data.splice(1,1);}// retrieve PTS/DTS from first fragment
frag=data[0];const pesPrefix=(frag[0]<<16)+(frag[1]<<8)+frag[2];if(pesPrefix===1){pesLen=(frag[4]<<8)+frag[5];// if PES parsed length is not zero and greater than total received length, stop parsing. PES might be truncated
// minus 6 : PES header size
if(pesLen&&pesLen>stream.size-6){return null;}const pesFlags=frag[7];if(pesFlags&0xc0){/* PES header described here : http://dvd.sourceforge.net/dvdinfo/pes-hdr.html
as PTS / DTS is 33 bit we cannot use bitwise operator in JS,
as Bitwise operators treat their operands as a sequence of 32 bits */pesPts=(frag[9]&0x0e)*536870912+// 1 << 29
(frag[10]&0xff)*4194304+// 1 << 22
(frag[11]&0xfe)*16384+// 1 << 14
(frag[12]&0xff)*128+// 1 << 7
(frag[13]&0xfe)/2;if(pesFlags&0x40){pesDts=(frag[14]&0x0e)*536870912+// 1 << 29
(frag[15]&0xff)*4194304+// 1 << 22
(frag[16]&0xfe)*16384+// 1 << 14
(frag[17]&0xff)*128+// 1 << 7
(frag[18]&0xfe)/2;if(pesPts-pesDts>60*90000){logger.warn(`${Math.round((pesPts-pesDts)/90000)}s delta between PTS and DTS, align them`);pesPts=pesDts;}}else {pesDts=pesPts;}}pesHdrLen=frag[8];// 9 bytes : 6 bytes for PES header + 3 bytes for PES extension
let payloadStartOffset=pesHdrLen+9;if(stream.size<=payloadStartOffset){return null;}stream.size-=payloadStartOffset;// reassemble PES packet
const pesData=new Uint8Array(stream.size);for(let j=0,dataLen=data.length;jlen){// trim full frag if PES header bigger than frag
payloadStartOffset-=len;continue;}else {// trim partial frag if PES header smaller than frag
frag=frag.subarray(payloadStartOffset);len-=payloadStartOffset;payloadStartOffset=0;}}pesData.set(frag,i);i+=len;}if(pesLen){// payload size : remove PES header + PES extension
pesLen-=pesHdrLen+3;}return {data:pesData,pts:pesPts,dts:pesDts,len:pesLen};}return null;}/**
* MP3 demuxer
*/class MP3Demuxer extends BaseAudioDemuxer{resetInitSegment(initSegment,audioCodec,videoCodec,trackDuration){super.resetInitSegment(initSegment,audioCodec,videoCodec,trackDuration);this._audioTrack={container:'audio/mpeg',type:'audio',id:2,pid:-1,sequenceNumber:0,segmentCodec:'mp3',samples:[],manifestCodec:audioCodec,duration:trackDuration,inputTimeScale:90000,dropped:0};}static probe(data){if(!data){return false;}// check if data contains ID3 timestamp and MPEG sync word
// Look for MPEG header | 1111 1111 | 111X XYZX | where X can be either 0 or 1 and Y or Z should be 1
// Layer bits (position 14 and 15) in header should be always different from 0 (Layer I or Layer II or Layer III)
// More info http://www.mp3-tech.org/programmer/frame_header.html
const id3Data=getID3Data(data,0);let offset=(id3Data==null?void 0:id3Data.length)||0;// Check for ac-3|ec-3 sync bytes and return false if present
if(id3Data&&data[offset]===0x0b&&data[offset+1]===0x77&&getTimeStamp(id3Data)!==undefined&&// check the bsid to confirm ac-3 or ec-3 (not mp3)
getAudioBSID(data,offset)<=16){return false;}for(let length=data.length;offset>24&0xff;result[1]=size>>16&0xff;result[2]=size>>8&0xff;result[3]=size&0xff;result.set(type,4);// copy the payload into the result
for(i=0,size=8;i>24&0xff,timescale>>16&0xff,timescale>>8&0xff,timescale&0xff,// timescale
upperWordDuration>>24,upperWordDuration>>16&0xff,upperWordDuration>>8&0xff,upperWordDuration&0xff,lowerWordDuration>>24,lowerWordDuration>>16&0xff,lowerWordDuration>>8&0xff,lowerWordDuration&0xff,0x55,0xc4,// 'und' language (undetermined)
0x00,0x00]));}static mdia(track){return MP4.box(MP4.types.mdia,MP4.mdhd(track.timescale,track.duration),MP4.hdlr(track.type),MP4.minf(track));}static mfhd(sequenceNumber){return MP4.box(MP4.types.mfhd,new Uint8Array([0x00,0x00,0x00,0x00,// flags
sequenceNumber>>24,sequenceNumber>>16&0xff,sequenceNumber>>8&0xff,sequenceNumber&0xff// sequence_number
]));}static minf(track){if(track.type==='audio'){return MP4.box(MP4.types.minf,MP4.box(MP4.types.smhd,MP4.SMHD),MP4.DINF,MP4.stbl(track));}else {return MP4.box(MP4.types.minf,MP4.box(MP4.types.vmhd,MP4.VMHD),MP4.DINF,MP4.stbl(track));}}static moof(sn,baseMediaDecodeTime,track){return MP4.box(MP4.types.moof,MP4.mfhd(sn),MP4.traf(track,baseMediaDecodeTime));}static moov(tracks){let i=tracks.length;const boxes=[];while(i--){boxes[i]=MP4.trak(tracks[i]);}return MP4.box.apply(null,[MP4.types.moov,MP4.mvhd(tracks[0].timescale,tracks[0].duration)].concat(boxes).concat(MP4.mvex(tracks)));}static mvex(tracks){let i=tracks.length;const boxes=[];while(i--){boxes[i]=MP4.trex(tracks[i]);}return MP4.box.apply(null,[MP4.types.mvex,...boxes]);}static mvhd(timescale,duration){duration*=timescale;const upperWordDuration=Math.floor(duration/(UINT32_MAX+1));const lowerWordDuration=Math.floor(duration%(UINT32_MAX+1));const bytes=new Uint8Array([0x01,// version 1
0x00,0x00,0x00,// flags
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,// creation_time
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,// modification_time
timescale>>24&0xff,timescale>>16&0xff,timescale>>8&0xff,timescale&0xff,// timescale
upperWordDuration>>24,upperWordDuration>>16&0xff,upperWordDuration>>8&0xff,upperWordDuration&0xff,lowerWordDuration>>24,lowerWordDuration>>16&0xff,lowerWordDuration>>8&0xff,lowerWordDuration&0xff,0x00,0x01,0x00,0x00,// 1.0 rate
0x01,0x00,// 1.0 volume
0x00,0x00,// reserved
0x00,0x00,0x00,0x00,// reserved
0x00,0x00,0x00,0x00,// reserved
0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,// transformation: unity matrix
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,// pre_defined
0xff,0xff,0xff,0xff// next_track_ID
]);return MP4.box(MP4.types.mvhd,bytes);}static sdtp(track){const samples=track.samples||[];const bytes=new Uint8Array(4+samples.length);let i;let flags;// leave the full box header (4 bytes) all zero
// write the sample table
for(i=0;i>>8&0xff);sps.push(len&0xff);// SPS
sps=sps.concat(Array.prototype.slice.call(data));}// assemble the PPSs
for(i=0;i>>8&0xff);pps.push(len&0xff);pps=pps.concat(Array.prototype.slice.call(data));}const avcc=MP4.box(MP4.types.avcC,new Uint8Array([0x01,// version
sps[3],// profile
sps[4],// profile compat
sps[5],// level
0xfc|3,// lengthSizeMinusOne, hard-coded to 4 bytes
0xe0|track.sps.length// 3bit reserved (111) + numOfSequenceParameterSets
].concat(sps).concat([track.pps.length// numOfPictureParameterSets
]).concat(pps)));// "PPS"
const width=track.width;const height=track.height;const hSpacing=track.pixelRatio[0];const vSpacing=track.pixelRatio[1];return MP4.box(MP4.types.avc1,new Uint8Array([0x00,0x00,0x00,// reserved
0x00,0x00,0x00,// reserved
0x00,0x01,// data_reference_index
0x00,0x00,// pre_defined
0x00,0x00,// reserved
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,// pre_defined
width>>8&0xff,width&0xff,// width
height>>8&0xff,height&0xff,// height
0x00,0x48,0x00,0x00,// horizresolution
0x00,0x48,0x00,0x00,// vertresolution
0x00,0x00,0x00,0x00,// reserved
0x00,0x01,// frame_count
0x12,0x64,0x61,0x69,0x6c,// dailymotion/hls.js
0x79,0x6d,0x6f,0x74,0x69,0x6f,0x6e,0x2f,0x68,0x6c,0x73,0x2e,0x6a,0x73,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,// compressorname
0x00,0x18,// depth = 24
0x11,0x11]),// pre_defined = -1
avcc,MP4.box(MP4.types.btrt,new Uint8Array([0x00,0x1c,0x9c,0x80,// bufferSizeDB
0x00,0x2d,0xc6,0xc0,// maxBitrate
0x00,0x2d,0xc6,0xc0])),// avgBitrate
MP4.box(MP4.types.pasp,new Uint8Array([hSpacing>>24,// hSpacing
hSpacing>>16&0xff,hSpacing>>8&0xff,hSpacing&0xff,vSpacing>>24,// vSpacing
vSpacing>>16&0xff,vSpacing>>8&0xff,vSpacing&0xff])));}static esds(track){const configlen=track.config.length;return new Uint8Array([0x00,// version 0
0x00,0x00,0x00,// flags
0x03,// descriptor_type
0x17+configlen,// length
0x00,0x01,// es_id
0x00,// stream_priority
0x04,// descriptor_type
0x0f+configlen,// length
0x40,// codec : mpeg4_audio
0x15,// stream_type
0x00,0x00,0x00,// buffer_size
0x00,0x00,0x00,0x00,// maxBitrate
0x00,0x00,0x00,0x00,// avgBitrate
0x05// descriptor_type
].concat([configlen]).concat(track.config).concat([0x06,0x01,0x02]));// GASpecificConfig)); // length + audio config descriptor
}static audioStsd(track){const samplerate=track.samplerate;return new Uint8Array([0x00,0x00,0x00,// reserved
0x00,0x00,0x00,// reserved
0x00,0x01,// data_reference_index
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,// reserved
0x00,track.channelCount,// channelcount
0x00,0x10,// sampleSize:16bits
0x00,0x00,0x00,0x00,// reserved2
samplerate>>8&0xff,samplerate&0xff,//
0x00,0x00]);}static mp4a(track){return MP4.box(MP4.types.mp4a,MP4.audioStsd(track),MP4.box(MP4.types.esds,MP4.esds(track)));}static mp3(track){return MP4.box(MP4.types['.mp3'],MP4.audioStsd(track));}static ac3(track){return MP4.box(MP4.types['ac-3'],MP4.audioStsd(track),MP4.box(MP4.types.dac3,track.config));}static stsd(track){if(track.type==='audio'){if(track.segmentCodec==='mp3'&&track.codec==='mp3'){return MP4.box(MP4.types.stsd,MP4.STSD,MP4.mp3(track));}if(track.segmentCodec==='ac3'){return MP4.box(MP4.types.stsd,MP4.STSD,MP4.ac3(track));}return MP4.box(MP4.types.stsd,MP4.STSD,MP4.mp4a(track));}else {return MP4.box(MP4.types.stsd,MP4.STSD,MP4.avc1(track));}}static tkhd(track){const id=track.id;const duration=track.duration*track.timescale;const width=track.width;const height=track.height;const upperWordDuration=Math.floor(duration/(UINT32_MAX+1));const lowerWordDuration=Math.floor(duration%(UINT32_MAX+1));return MP4.box(MP4.types.tkhd,new Uint8Array([0x01,// version 1
0x00,0x00,0x07,// flags
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x02,// creation_time
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x03,// modification_time
id>>24&0xff,id>>16&0xff,id>>8&0xff,id&0xff,// track_ID
0x00,0x00,0x00,0x00,// reserved
upperWordDuration>>24,upperWordDuration>>16&0xff,upperWordDuration>>8&0xff,upperWordDuration&0xff,lowerWordDuration>>24,lowerWordDuration>>16&0xff,lowerWordDuration>>8&0xff,lowerWordDuration&0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,// reserved
0x00,0x00,// layer
0x00,0x00,// alternate_group
0x00,0x00,// non-audio track volume
0x00,0x00,// reserved
0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x40,0x00,0x00,0x00,// transformation: unity matrix
width>>8&0xff,width&0xff,0x00,0x00,// width
height>>8&0xff,height&0xff,0x00,0x00// height
]));}static traf(track,baseMediaDecodeTime){const sampleDependencyTable=MP4.sdtp(track);const id=track.id;const upperWordBaseMediaDecodeTime=Math.floor(baseMediaDecodeTime/(UINT32_MAX+1));const lowerWordBaseMediaDecodeTime=Math.floor(baseMediaDecodeTime%(UINT32_MAX+1));return MP4.box(MP4.types.traf,MP4.box(MP4.types.tfhd,new Uint8Array([0x00,// version 0
0x00,0x00,0x00,// flags
id>>24,id>>16&0xff,id>>8&0xff,id&0xff// track_ID
])),MP4.box(MP4.types.tfdt,new Uint8Array([0x01,// version 1
0x00,0x00,0x00,// flags
upperWordBaseMediaDecodeTime>>24,upperWordBaseMediaDecodeTime>>16&0xff,upperWordBaseMediaDecodeTime>>8&0xff,upperWordBaseMediaDecodeTime&0xff,lowerWordBaseMediaDecodeTime>>24,lowerWordBaseMediaDecodeTime>>16&0xff,lowerWordBaseMediaDecodeTime>>8&0xff,lowerWordBaseMediaDecodeTime&0xff])),MP4.trun(track,sampleDependencyTable.length+16+// tfhd
20+// tfdt
8+// traf header
16+// mfhd
8+// moof header
8),// mdat header
sampleDependencyTable);}/**
* Generate a track box.
* @param track a track definition
*/static trak(track){track.duration=track.duration||0xffffffff;return MP4.box(MP4.types.trak,MP4.tkhd(track),MP4.mdia(track));}static trex(track){const id=track.id;return MP4.box(MP4.types.trex,new Uint8Array([0x00,// version 0
0x00,0x00,0x00,// flags
id>>24,id>>16&0xff,id>>8&0xff,id&0xff,// track_ID
0x00,0x00,0x00,0x01,// default_sample_description_index
0x00,0x00,0x00,0x00,// default_sample_duration
0x00,0x00,0x00,0x00,// default_sample_size
0x00,0x01,0x00,0x01// default_sample_flags
]));}static trun(track,offset){const samples=track.samples||[];const len=samples.length;const arraylen=12+16*len;const array=new Uint8Array(arraylen);let i;let sample;let duration;let size;let flags;let cts;offset+=8+arraylen;array.set([track.type==='video'?0x01:0x00,// version 1 for video with signed-int sample_composition_time_offset
0x00,0x0f,0x01,// flags
len>>>24&0xff,len>>>16&0xff,len>>>8&0xff,len&0xff,// sample_count
offset>>>24&0xff,offset>>>16&0xff,offset>>>8&0xff,offset&0xff// data_offset
],0);for(i=0;i>>24&0xff,duration>>>16&0xff,duration>>>8&0xff,duration&0xff,// sample_duration
size>>>24&0xff,size>>>16&0xff,size>>>8&0xff,size&0xff,// sample_size
flags.isLeading<<2|flags.dependsOn,flags.isDependedOn<<6|flags.hasRedundancy<<4|flags.paddingValue<<1|flags.isNonSync,flags.degradPrio&0xf0<<8,flags.degradPrio&0x0f,// sample_flags
cts>>>24&0xff,cts>>>16&0xff,cts>>>8&0xff,cts&0xff// sample_composition_time_offset
],12+16*i);}return MP4.box(MP4.types.trun,array);}static initSegment(tracks){if(!MP4.types){MP4.init();}const movie=MP4.moov(tracks);const result=appendUint8Array(MP4.FTYP,movie);return result;}}MP4.types=void 0;MP4.HDLR_TYPES=void 0;MP4.STTS=void 0;MP4.STSC=void 0;MP4.STCO=void 0;MP4.STSZ=void 0;MP4.VMHD=void 0;MP4.SMHD=void 0;MP4.STSD=void 0;MP4.FTYP=void 0;MP4.DINF=void 0;const MPEG_TS_CLOCK_FREQ_HZ=90000;function toTimescaleFromBase(baseTime,destScale,srcBase=1,round=false){const result=baseTime*destScale*srcBase;// equivalent to `(value * scale) / (1 / base)`
return round?Math.round(result):result;}function toTimescaleFromScale(baseTime,destScale,srcScale=1,round=false){return toTimescaleFromBase(baseTime,destScale,1/srcScale,round);}function toMsFromMpegTsClock(baseTime,round=false){return toTimescaleFromBase(baseTime,1000,1/MPEG_TS_CLOCK_FREQ_HZ,round);}function toMpegTsClockFromTimescale(baseTime,srcScale=1){return toTimescaleFromBase(baseTime,MPEG_TS_CLOCK_FREQ_HZ,1/srcScale);}const MAX_SILENT_FRAME_DURATION=10*1000;// 10 seconds
const AAC_SAMPLES_PER_FRAME=1024;const MPEG_AUDIO_SAMPLE_PER_FRAME=1152;const AC3_SAMPLES_PER_FRAME=1536;let chromeVersion=null;let safariWebkitVersion=null;class MP4Remuxer{constructor(observer,config,typeSupported,vendor=''){this.observer=void 0;this.config=void 0;this.typeSupported=void 0;this.ISGenerated=false;this._initPTS=null;this._initDTS=null;this.nextAvcDts=null;this.nextAudioPts=null;this.videoSampleDuration=null;this.isAudioContiguous=false;this.isVideoContiguous=false;this.videoTrackConfig=void 0;this.observer=observer;this.config=config;this.typeSupported=typeSupported;this.ISGenerated=false;if(chromeVersion===null){const userAgent=navigator.userAgent||'';const result=userAgent.match(/Chrome\/(\d+)/i);chromeVersion=result?parseInt(result[1]):0;}if(safariWebkitVersion===null){const result=navigator.userAgent.match(/Safari\/(\d+)/i);safariWebkitVersion=result?parseInt(result[1]):0;}}destroy(){// @ts-ignore
this.config=this.videoTrackConfig=this._initPTS=this._initDTS=null;}resetTimeStamp(defaultTimeStamp){logger.log('[mp4-remuxer]: initPTS & initDTS reset');this._initPTS=this._initDTS=defaultTimeStamp;}resetNextTimestamp(){logger.log('[mp4-remuxer]: reset next timestamp');this.isVideoContiguous=false;this.isAudioContiguous=false;}resetInitSegment(){logger.log('[mp4-remuxer]: ISGenerated flag reset');this.ISGenerated=false;this.videoTrackConfig=undefined;}getVideoStartPts(videoSamples){let rolloverDetected=false;const startPTS=videoSamples.reduce((minPTS,sample)=>{const delta=sample.pts-minPTS;if(delta<-4294967296){// 2^32, see PTSNormalize for reasoning, but we're hitting a rollover here, and we don't want that to impact the timeOffset calculation
rolloverDetected=true;return normalizePts(minPTS,sample.pts);}else if(delta>0){return minPTS;}else {return sample.pts;}},videoSamples[0].pts);if(rolloverDetected){logger.debug('PTS rollover detected');}return startPTS;}remux(audioTrack,videoTrack,id3Track,textTrack,timeOffset,accurateTimeOffset,flush,playlistType){let video;let audio;let initSegment;let text;let id3;let independent;let audioTimeOffset=timeOffset;let videoTimeOffset=timeOffset;// If we're remuxing audio and video progressively, wait until we've received enough samples for each track before proceeding.
// This is done to synchronize the audio and video streams. We know if the current segment will have samples if the "pid"
// parameter is greater than -1. The pid is set when the PMT is parsed, which contains the tracks list.
// However, if the initSegment has already been generated, or we've reached the end of a segment (flush),
// then we can remux one track without waiting for the other.
const hasAudio=audioTrack.pid>-1;const hasVideo=videoTrack.pid>-1;const length=videoTrack.samples.length;const enoughAudioSamples=audioTrack.samples.length>0;const enoughVideoSamples=flush&&length>0||length>1;const canRemuxAvc=(!hasAudio||enoughAudioSamples)&&(!hasVideo||enoughVideoSamples)||this.ISGenerated||flush;if(canRemuxAvc){if(this.ISGenerated){var _videoTrack$pixelRati,_config$pixelRatio,_videoTrack$pixelRati2,_config$pixelRatio2;const config=this.videoTrackConfig;if(config&&(videoTrack.width!==config.width||videoTrack.height!==config.height||((_videoTrack$pixelRati=videoTrack.pixelRatio)==null?void 0:_videoTrack$pixelRati[0])!==((_config$pixelRatio=config.pixelRatio)==null?void 0:_config$pixelRatio[0])||((_videoTrack$pixelRati2=videoTrack.pixelRatio)==null?void 0:_videoTrack$pixelRati2[1])!==((_config$pixelRatio2=config.pixelRatio)==null?void 0:_config$pixelRatio2[1]))){this.resetInitSegment();}}else {initSegment=this.generateIS(audioTrack,videoTrack,timeOffset,accurateTimeOffset);}const isVideoContiguous=this.isVideoContiguous;let firstKeyFrameIndex=-1;let firstKeyFramePTS;if(enoughVideoSamples){firstKeyFrameIndex=findKeyframeIndex(videoTrack.samples);if(!isVideoContiguous&&this.config.forceKeyFrameOnDiscontinuity){independent=true;if(firstKeyFrameIndex>0){logger.warn(`[mp4-remuxer]: Dropped ${firstKeyFrameIndex} out of ${length} video samples due to a missing keyframe`);const startPTS=this.getVideoStartPts(videoTrack.samples);videoTrack.samples=videoTrack.samples.slice(firstKeyFrameIndex);videoTrack.dropped+=firstKeyFrameIndex;videoTimeOffset+=(videoTrack.samples[0].pts-startPTS)/videoTrack.inputTimeScale;firstKeyFramePTS=videoTimeOffset;}else if(firstKeyFrameIndex===-1){logger.warn(`[mp4-remuxer]: No keyframe found out of ${length} video samples`);independent=false;}}}if(this.ISGenerated){if(enoughAudioSamples&&enoughVideoSamples){// timeOffset is expected to be the offset of the first timestamp of this fragment (first DTS)
// if first audio DTS is not aligned with first video DTS then we need to take that into account
// when providing timeOffset to remuxAudio / remuxVideo. if we don't do that, there might be a permanent / small
// drift between audio and video streams
const startPTS=this.getVideoStartPts(videoTrack.samples);const tsDelta=normalizePts(audioTrack.samples[0].pts,startPTS)-startPTS;const audiovideoTimestampDelta=tsDelta/videoTrack.inputTimeScale;audioTimeOffset+=Math.max(0,audiovideoTimestampDelta);videoTimeOffset+=Math.max(0,-audiovideoTimestampDelta);}// Purposefully remuxing audio before video, so that remuxVideo can use nextAudioPts, which is calculated in remuxAudio.
if(enoughAudioSamples){// if initSegment was generated without audio samples, regenerate it again
if(!audioTrack.samplerate){logger.warn('[mp4-remuxer]: regenerate InitSegment as audio detected');initSegment=this.generateIS(audioTrack,videoTrack,timeOffset,accurateTimeOffset);}audio=this.remuxAudio(audioTrack,audioTimeOffset,this.isAudioContiguous,accurateTimeOffset,hasVideo||enoughVideoSamples||playlistType===PlaylistLevelType.AUDIO?videoTimeOffset:undefined);if(enoughVideoSamples){const audioTrackLength=audio?audio.endPTS-audio.startPTS:0;// if initSegment was generated without video samples, regenerate it again
if(!videoTrack.inputTimeScale){logger.warn('[mp4-remuxer]: regenerate InitSegment as video detected');initSegment=this.generateIS(audioTrack,videoTrack,timeOffset,accurateTimeOffset);}video=this.remuxVideo(videoTrack,videoTimeOffset,isVideoContiguous,audioTrackLength);}}else if(enoughVideoSamples){video=this.remuxVideo(videoTrack,videoTimeOffset,isVideoContiguous,0);}if(video){video.firstKeyFrame=firstKeyFrameIndex;video.independent=firstKeyFrameIndex!==-1;video.firstKeyFramePTS=firstKeyFramePTS;}}}// Allow ID3 and text to remux, even if more audio/video samples are required
if(this.ISGenerated&&this._initPTS&&this._initDTS){if(id3Track.samples.length){id3=flushTextTrackMetadataCueSamples(id3Track,timeOffset,this._initPTS,this._initDTS);}if(textTrack.samples.length){text=flushTextTrackUserdataCueSamples(textTrack,timeOffset,this._initPTS);}}return {audio,video,initSegment,independent,text,id3};}generateIS(audioTrack,videoTrack,timeOffset,accurateTimeOffset){const audioSamples=audioTrack.samples;const videoSamples=videoTrack.samples;const typeSupported=this.typeSupported;const tracks={};const _initPTS=this._initPTS;let computePTSDTS=!_initPTS||accurateTimeOffset;let container='audio/mp4';let initPTS;let initDTS;let timescale;if(computePTSDTS){initPTS=initDTS=Infinity;}if(audioTrack.config&&audioSamples.length){// let's use audio sampling rate as MP4 time scale.
// rationale is that there is a integer nb of audio frames per audio sample (1024 for AAC)
// using audio sampling rate here helps having an integer MP4 frame duration
// this avoids potential rounding issue and AV sync issue
audioTrack.timescale=audioTrack.samplerate;switch(audioTrack.segmentCodec){case'mp3':if(typeSupported.mpeg){// Chrome and Safari
container='audio/mpeg';audioTrack.codec='';}else if(typeSupported.mp3){// Firefox
audioTrack.codec='mp3';}break;case'ac3':audioTrack.codec='ac-3';break;}tracks.audio={id:'audio',container:container,codec:audioTrack.codec,initSegment:audioTrack.segmentCodec==='mp3'&&typeSupported.mpeg?new Uint8Array(0):MP4.initSegment([audioTrack]),metadata:{channelCount:audioTrack.channelCount}};if(computePTSDTS){timescale=audioTrack.inputTimeScale;if(!_initPTS||timescale!==_initPTS.timescale){// remember first PTS of this demuxing context. for audio, PTS = DTS
initPTS=initDTS=audioSamples[0].pts-Math.round(timescale*timeOffset);}else {computePTSDTS=false;}}}if(videoTrack.sps&&videoTrack.pps&&videoSamples.length){// let's use input time scale as MP4 video timescale
// we use input time scale straight away to avoid rounding issues on frame duration / cts computation
videoTrack.timescale=videoTrack.inputTimeScale;tracks.video={id:'main',container:'video/mp4',codec:videoTrack.codec,initSegment:MP4.initSegment([videoTrack]),metadata:{width:videoTrack.width,height:videoTrack.height}};if(computePTSDTS){timescale=videoTrack.inputTimeScale;if(!_initPTS||timescale!==_initPTS.timescale){const startPTS=this.getVideoStartPts(videoSamples);const startOffset=Math.round(timescale*timeOffset);initDTS=Math.min(initDTS,normalizePts(videoSamples[0].dts,startPTS)-startOffset);initPTS=Math.min(initPTS,startPTS-startOffset);}else {computePTSDTS=false;}}this.videoTrackConfig={width:videoTrack.width,height:videoTrack.height,pixelRatio:videoTrack.pixelRatio};}if(Object.keys(tracks).length){this.ISGenerated=true;if(computePTSDTS){this._initPTS={baseTime:initPTS,timescale:timescale};this._initDTS={baseTime:initDTS,timescale:timescale};}else {initPTS=timescale=undefined;}return {tracks,initPTS,timescale};}}remuxVideo(track,timeOffset,contiguous,audioTrackLength){const timeScale=track.inputTimeScale;const inputSamples=track.samples;const outputSamples=[];const nbSamples=inputSamples.length;const initPTS=this._initPTS;let nextAvcDts=this.nextAvcDts;let offset=8;let mp4SampleDuration=this.videoSampleDuration;let firstDTS;let lastDTS;let minPTS=Number.POSITIVE_INFINITY;let maxPTS=Number.NEGATIVE_INFINITY;let sortSamples=false;// if parsed fragment is contiguous with last one, let's use last DTS value as reference
if(!contiguous||nextAvcDts===null){const pts=timeOffset*timeScale;const cts=inputSamples[0].pts-normalizePts(inputSamples[0].dts,inputSamples[0].pts);if(chromeVersion&&nextAvcDts!==null&&Math.abs(pts-cts-nextAvcDts)<15000){// treat as contigous to adjust samples that would otherwise produce video buffer gaps in Chrome
contiguous=true;}else {// if not contiguous, let's use target timeOffset
nextAvcDts=pts-cts;}}// PTS is coded on 33bits, and can loop from -2^32 to 2^32
// PTSNormalize will make PTS/DTS value monotonic, we use last known DTS value as reference value
const initTime=initPTS.baseTime*timeScale/initPTS.timescale;for(let i=0;i0?i-1:i].dts){sortSamples=true;}}// sort video samples by DTS then PTS then demux id order
if(sortSamples){inputSamples.sort(function(a,b){const deltadts=a.dts-b.dts;const deltapts=a.pts-b.pts;return deltadts||deltapts;});}// Get first/last DTS
firstDTS=inputSamples[0].dts;lastDTS=inputSamples[inputSamples.length-1].dts;// Sample duration (as expected by trun MP4 boxes), should be the delta between sample DTS
// set this constant duration as being the avg delta between consecutive DTS.
const inputDuration=lastDTS-firstDTS;const averageSampleDuration=inputDuration?Math.round(inputDuration/(nbSamples-1)):mp4SampleDuration||track.inputTimeScale/30;// if fragment are contiguous, detect hole/overlapping between fragments
if(contiguous){// check timestamp continuity across consecutive fragments (this is to remove inter-fragment gap/hole)
const delta=firstDTS-nextAvcDts;const foundHole=delta>averageSampleDuration;const foundOverlap=delta<-1;if(foundHole||foundOverlap){if(foundHole){logger.warn(`AVC: ${toMsFromMpegTsClock(delta,true)} ms (${delta}dts) hole between fragments detected at ${timeOffset.toFixed(3)}`);}else {logger.warn(`AVC: ${toMsFromMpegTsClock(-delta,true)} ms (${delta}dts) overlapping between fragments detected at ${timeOffset.toFixed(3)}`);}if(!foundOverlap||nextAvcDts>=inputSamples[0].pts||chromeVersion){firstDTS=nextAvcDts;const firstPTS=inputSamples[0].pts-delta;if(foundHole){inputSamples[0].dts=firstDTS;inputSamples[0].pts=firstPTS;}else {for(let i=0;ifirstPTS){break;}inputSamples[i].dts-=delta;inputSamples[i].pts-=delta;}}logger.log(`Video: Initial PTS/DTS adjusted: ${toMsFromMpegTsClock(firstPTS,true)}/${toMsFromMpegTsClock(firstDTS,true)}, delta: ${toMsFromMpegTsClock(delta,true)} ms`);}}}firstDTS=Math.max(0,firstDTS);let nbNalu=0;let naluLen=0;let dtsStep=firstDTS;for(let i=0;i0?VideoSample.dts-inputSamples[i-1].dts:averageSampleDuration;ptsDelta=i>0?VideoSample.pts-inputSamples[i-1].pts:averageSampleDuration;if(config.stretchShortVideoTrack&&this.nextAudioPts!==null){// In some cases, a segment's audio track duration may exceed the video track duration.
// Since we've already remuxed audio, and we know how long the audio track is, we look to
// see if the delta to the next segment is longer than maxBufferHole.
// If so, playback would potentially get stuck, so we artificially inflate
// the duration of the last frame to minimize any potential gap between segments.
const gapTolerance=Math.floor(config.maxBufferHole*timeScale);const deltaToFrameEnd=(audioTrackLength?minPTS+audioTrackLength*timeScale:this.nextAudioPts)-VideoSample.pts;if(deltaToFrameEnd>gapTolerance){// We subtract lastFrameDuration from deltaToFrameEnd to try to prevent any video
// frame overlap. maxBufferHole should be >> lastFrameDuration anyway.
mp4SampleDuration=deltaToFrameEnd-lastFrameDuration;if(mp4SampleDuration<0){mp4SampleDuration=lastFrameDuration;}else {stretchedLastFrame=true;}logger.log(`[mp4-remuxer]: It is approximately ${deltaToFrameEnd/90} ms to the next segment; using duration ${mp4SampleDuration/90} ms for the last video frame.`);}else {mp4SampleDuration=lastFrameDuration;}}else {mp4SampleDuration=lastFrameDuration;}}const compositionTimeOffset=Math.round(VideoSample.pts-VideoSample.dts);minDtsDelta=Math.min(minDtsDelta,mp4SampleDuration);maxDtsDelta=Math.max(maxDtsDelta,mp4SampleDuration);minPtsDelta=Math.min(minPtsDelta,ptsDelta);maxPtsDelta=Math.max(maxPtsDelta,ptsDelta);outputSamples.push(new Mp4Sample(VideoSample.key,mp4SampleDuration,mp4SampleLength,compositionTimeOffset));}if(outputSamples.length){if(chromeVersion){if(chromeVersion<70){// Chrome workaround, mark first sample as being a Random Access Point (keyframe) to avoid sourcebuffer append issue
// https://code.google.com/p/chromium/issues/detail?id=229412
const flags=outputSamples[0].flags;flags.dependsOn=2;flags.isNonSync=0;}}else if(safariWebkitVersion){// Fix for "CNN special report, with CC" in test-streams (Safari browser only)
// Ignore DTS when frame durations are irregular. Safari MSE does not handle this leading to gaps.
if(maxPtsDelta-minPtsDelta s.pts)) : (window.audioSamples = [inputSamples.map(s => s.pts)]);
// for audio samples, also consider consecutive fragments as being contiguous (even if a level switch occurs),
// for sake of clarity:
// consecutive fragments are frags with
// - less than 100ms gaps between new time offset (if accurate) and next expected PTS OR
// - less than 20 audio frames distance
// contiguous fragments are consecutive fragments from same quality level (same level, new SN = old SN + 1)
// this helps ensuring audio continuity
// and this also avoids audio glitches/cut when switching quality, or reporting wrong duration on first audio frame
const timeOffsetMpegTS=timeOffset*inputTimeScale;const initTime=initPTS.baseTime*inputTimeScale/initPTS.timescale;this.isAudioContiguous=contiguous=contiguous||inputSamples.length&&nextAudioPts>0&&(accurateTimeOffset&&Math.abs(timeOffsetMpegTS-nextAudioPts)<9000||Math.abs(normalizePts(inputSamples[0].pts-initTime,timeOffsetMpegTS)-nextAudioPts)<20*inputSampleDuration);// compute normalized PTS
inputSamples.forEach(function(sample){sample.pts=normalizePts(sample.pts-initTime,timeOffsetMpegTS);});if(!contiguous||nextAudioPts<0){// filter out sample with negative PTS that are not playable anyway
// if we don't remove these negative samples, they will shift all audio samples forward.
// leading to audio overlap between current / next fragment
inputSamples=inputSamples.filter(sample=>sample.pts>=0);// in case all samples have negative PTS, and have been filtered out, return now
if(!inputSamples.length){return;}if(videoTimeOffset===0){// Set the start to 0 to match video so that start gaps larger than inputSampleDuration are filled with silence
nextAudioPts=0;}else if(accurateTimeOffset&&!alignedWithVideo){// When not seeking, not live, and LevelDetails.PTSKnown, use fragment start as predicted next audio PTS
nextAudioPts=Math.max(0,timeOffsetMpegTS);}else {// if frags are not contiguous and if we cant trust time offset, let's use first sample PTS as next audio PTS
nextAudioPts=inputSamples[0].pts;}}// If the audio track is missing samples, the frames seem to get "left-shifted" within the
// resulting mp4 segment, causing sync issues and leaving gaps at the end of the audio segment.
// In an effort to prevent this from happening, we inject frames here where there are gaps.
// When possible, we inject a silent frame; when that's not possible, we duplicate the last
// frame.
if(track.segmentCodec==='aac'){const maxAudioFramesDrift=this.config.maxAudioFramesDrift;for(let i=0,nextPts=nextAudioPts;i=maxAudioFramesDrift*inputSampleDuration&&duration0){/* concatenate the audio data and construct the mdat in place
(need 8 more bytes to fill length and mdat type) */mdatSize+=offset;try{mdat=new Uint8Array(mdatSize);}catch(err){this.observer.emit(Events.ERROR,Events.ERROR,{type:ErrorTypes.MUX_ERROR,details:ErrorDetails.REMUX_ALLOC_ERROR,fatal:false,error:err,bytes:mdatSize,reason:`fail allocating audio mdat ${mdatSize}`});return;}if(!rawMPEG){const view=new DataView(mdat.buffer);view.setUint32(0,mdatSize);mdat.set(MP4.types.mdat,4);}}else {// no audio samples
return;}}mdat.set(unit,offset);const unitLen=unit.byteLength;offset+=unitLen;// Default the sample's duration to the computed mp4SampleDuration, which will either be 1024 for AAC or 1152 for MPEG
// In the case that we have 1 sample, this will be the duration. If we have more than one sample, the duration
// becomes the PTS diff with the previous sample
outputSamples.push(new Mp4Sample(true,mp4SampleDuration,unitLen,0));lastPTS=pts;}// We could end up with no audio samples if all input samples were overlapping with the previously remuxed ones
const nbSamples=outputSamples.length;if(!nbSamples){return;}// The next audio sample PTS should be equal to last sample PTS + duration
const lastSample=outputSamples[outputSamples.length-1];this.nextAudioPts=nextAudioPts=lastPTS+scaleFactor*lastSample.duration;// Set the track samples from inputSamples to outputSamples before remuxing
const moof=rawMPEG?new Uint8Array(0):MP4.moof(track.sequenceNumber++,firstPTS/scaleFactor,_extends({},track,{samples:outputSamples}));// Clear the track samples. This also clears the samples array in the demuxer, since the reference is shared
track.samples=[];const start=firstPTS/inputTimeScale;const end=nextAudioPts/inputTimeScale;const type='audio';const audioData={data1:moof,data2:mdat,startPTS:start,endPTS:end,startDTS:start,endDTS:end,type,hasAudio:true,hasVideo:false,nb:nbSamples};this.isAudioContiguous=true;return audioData;}remuxEmptyAudio(track,timeOffset,contiguous,videoData){const inputTimeScale=track.inputTimeScale;const mp4timeScale=track.samplerate?track.samplerate:inputTimeScale;const scaleFactor=inputTimeScale/mp4timeScale;const nextAudioPts=this.nextAudioPts;// sync with video's timestamp
const initDTS=this._initDTS;const init90kHz=initDTS.baseTime*90000/initDTS.timescale;const startDTS=(nextAudioPts!==null?nextAudioPts:videoData.startDTS*inputTimeScale)+init90kHz;const endDTS=videoData.endDTS*inputTimeScale+init90kHz;// one sample's duration value
const frameDuration=scaleFactor*AAC_SAMPLES_PER_FRAME;// samples count of this segment's duration
const nbSamples=Math.ceil((endDTS-startDTS)/frameDuration);// silent frame
const silentFrame=AAC.getSilentFrame(track.manifestCodec||track.codec,track.channelCount);logger.warn('[mp4-remuxer]: remux empty Audio');// Can't remux if we can't generate a silent frame...
if(!silentFrame){logger.trace('[mp4-remuxer]: Unable to remuxEmptyAudio since we were unable to get a silent frame for given audio codec');return;}const samples=[];for(let i=0;i4294967296){value+=offset;}return value;}function findKeyframeIndex(samples){for(let i=0;ia.pts-b.pts);const samples=track.samples;track.samples=[];return {samples};}class Mp4Sample{constructor(isKeyframe,duration,size,cts){this.size=void 0;this.duration=void 0;this.cts=void 0;this.flags=void 0;this.duration=duration;this.size=size;this.cts=cts;this.flags={isLeading:0,isDependedOn:0,hasRedundancy:0,degradPrio:0,dependsOn:isKeyframe?2:1,isNonSync:isKeyframe?0:1};}}class PassThroughRemuxer{constructor(){this.emitInitSegment=false;this.audioCodec=void 0;this.videoCodec=void 0;this.initData=void 0;this.initPTS=null;this.initTracks=void 0;this.lastEndTime=null;}destroy(){}resetTimeStamp(defaultInitPTS){this.initPTS=defaultInitPTS;this.lastEndTime=null;}resetNextTimestamp(){this.lastEndTime=null;}resetInitSegment(initSegment,audioCodec,videoCodec,decryptdata){this.audioCodec=audioCodec;this.videoCodec=videoCodec;this.generateInitSegment(patchEncyptionData(initSegment,decryptdata));this.emitInitSegment=true;}generateInitSegment(initSegment){let{audioCodec,videoCodec}=this;if(!(initSegment!=null&&initSegment.byteLength)){this.initTracks=undefined;this.initData=undefined;return;}const initData=this.initData=parseInitSegment(initSegment);// Get codec from initSegment or fallback to default
if(initData.audio){audioCodec=getParsedTrackCodec(initData.audio,ElementaryStreamTypes.AUDIO);}if(initData.video){videoCodec=getParsedTrackCodec(initData.video,ElementaryStreamTypes.VIDEO);}const tracks={};if(initData.audio&&initData.video){tracks.audiovideo={container:'video/mp4',codec:audioCodec+','+videoCodec,initSegment,id:'main'};}else if(initData.audio){tracks.audio={container:'audio/mp4',codec:audioCodec,initSegment,id:'audio'};}else if(initData.video){tracks.video={container:'video/mp4',codec:videoCodec,initSegment,id:'main'};}else {logger.warn('[passthrough-remuxer.ts]: initSegment does not contain moov or trak boxes.');}this.initTracks=tracks;}remux(audioTrack,videoTrack,id3Track,textTrack,timeOffset,accurateTimeOffset){var _initData,_initData2;let{initPTS,lastEndTime}=this;const result={audio:undefined,video:undefined,text:textTrack,id3:id3Track,initSegment:undefined};// If we haven't yet set a lastEndDTS, or it was reset, set it to the provided timeOffset. We want to use the
// lastEndDTS over timeOffset whenever possible; during progressive playback, the media source will not update
// the media duration (which is what timeOffset is provided as) before we need to process the next chunk.
if(!isFiniteNumber(lastEndTime)){lastEndTime=this.lastEndTime=timeOffset||0;}// The binary segment data is added to the videoTrack in the mp4demuxer. We don't check to see if the data is only
// audio or video (or both); adding it to video was an arbitrary choice.
const data=videoTrack.samples;if(!(data!=null&&data.length)){return result;}const initSegment={initPTS:undefined,timescale:1};let initData=this.initData;if(!((_initData=initData)!=null&&_initData.length)){this.generateInitSegment(data);initData=this.initData;}if(!((_initData2=initData)!=null&&_initData2.length)){// We can't remux if the initSegment could not be generated
logger.warn('[passthrough-remuxer.ts]: Failed to generate initSegment.');return result;}if(this.emitInitSegment){initSegment.tracks=this.initTracks;this.emitInitSegment=false;}const duration=getDuration(data,initData);const startDTS=getStartDTS(initData,data);const decodeTime=startDTS===null?timeOffset:startDTS;if(isInvalidInitPts(initPTS,decodeTime,timeOffset,duration)||initSegment.timescale!==initPTS.timescale&&accurateTimeOffset){initSegment.initPTS=decodeTime-timeOffset;if(initPTS&&initPTS.timescale===1){logger.warn(`Adjusting initPTS by ${initSegment.initPTS-initPTS.baseTime}`);}this.initPTS=initPTS={baseTime:initSegment.initPTS,timescale:1};}const startTime=audioTrack?decodeTime-initPTS.baseTime/initPTS.timescale:lastEndTime;const endTime=startTime+duration;offsetStartDTS(initData,data,initPTS.baseTime/initPTS.timescale);if(duration>0){this.lastEndTime=endTime;}else {logger.warn('Duration parsed from mp4 should be greater than zero');this.resetNextTimestamp();}const hasAudio=!!initData.audio;const hasVideo=!!initData.video;let type='';if(hasAudio){type+='audio';}if(hasVideo){type+='video';}const track={data1:data,startPTS:startTime,startDTS:startTime,endPTS:endTime,endDTS:endTime,type,hasAudio,hasVideo,nb:1,dropped:0};result.audio=track.type==='audio'?track:undefined;result.video=track.type!=='audio'?track:undefined;result.initSegment=initSegment;result.id3=flushTextTrackMetadataCueSamples(id3Track,timeOffset,initPTS,initPTS);if(textTrack.samples.length){result.text=flushTextTrackUserdataCueSamples(textTrack,timeOffset,initPTS);}return result;}}function isInvalidInitPts(initPTS,startDTS,timeOffset,duration){if(initPTS===null){return true;}// InitPTS is invalid when distance from program would be more than segment duration or a minimum of one second
const minDuration=Math.max(duration,1);const startTime=startDTS-initPTS.baseTime/initPTS.timescale;return Math.abs(startTime-timeOffset)>minDuration;}function getParsedTrackCodec(track,type){const parsedCodec=track==null?void 0:track.codec;if(parsedCodec&&parsedCodec.length>4){return parsedCodec;}if(type===ElementaryStreamTypes.AUDIO){if(parsedCodec==='ec-3'||parsedCodec==='ac-3'||parsedCodec==='alac'){return parsedCodec;}if(parsedCodec==='fLaC'||parsedCodec==='Opus'){// Opting not to get `preferManagedMediaSource` from player config for isSupported() check for simplicity
const preferManagedMediaSource=false;return getCodecCompatibleName(parsedCodec,preferManagedMediaSource);}const result='mp4a.40.5';logger.info(`Parsed audio codec "${parsedCodec}" or audio object type not handled. Using "${result}"`);return result;}// Provide defaults based on codec type
// This allows for some playback of some fmp4 playlists without CODECS defined in manifest
logger.warn(`Unhandled video codec "${parsedCodec}"`);if(parsedCodec==='hvc1'||parsedCodec==='hev1'){return 'hvc1.1.6.L120.90';}if(parsedCodec==='av01'){return 'av01.0.04M.08';}return 'avc1.42e01e';}let now;// performance.now() not available on WebWorker, at least on Safari Desktop
try{now=self.performance.now.bind(self.performance);}catch(err){logger.debug('Unable to use Performance API on this environment');now=optionalSelf==null?void 0:optionalSelf.Date.now;}const muxConfig=[{demux:MP4Demuxer,remux:PassThroughRemuxer},{demux:TSDemuxer,remux:MP4Remuxer},{demux:AACDemuxer,remux:MP4Remuxer},{demux:MP3Demuxer,remux:MP4Remuxer}];{muxConfig.splice(2,0,{demux:AC3Demuxer,remux:MP4Remuxer});}class Transmuxer{constructor(observer,typeSupported,config,vendor,id){this.async=false;this.observer=void 0;this.typeSupported=void 0;this.config=void 0;this.vendor=void 0;this.id=void 0;this.demuxer=void 0;this.remuxer=void 0;this.decrypter=void 0;this.probe=void 0;this.decryptionPromise=null;this.transmuxConfig=void 0;this.currentTransmuxState=void 0;this.observer=observer;this.typeSupported=typeSupported;this.config=config;this.vendor=vendor;this.id=id;}configure(transmuxConfig){this.transmuxConfig=transmuxConfig;if(this.decrypter){this.decrypter.reset();}}push(data,decryptdata,chunkMeta,state){const stats=chunkMeta.transmuxing;stats.executeStart=now();let uintData=new Uint8Array(data);const{currentTransmuxState,transmuxConfig}=this;if(state){this.currentTransmuxState=state;}const{contiguous,discontinuity,trackSwitch,accurateTimeOffset,timeOffset,initSegmentChange}=state||currentTransmuxState;const{audioCodec,videoCodec,defaultInitPts,duration,initSegmentData}=transmuxConfig;const keyData=getEncryptionType(uintData,decryptdata);if(keyData&&keyData.method==='AES-128'){const decrypter=this.getDecrypter();// Software decryption is synchronous; webCrypto is not
if(decrypter.isSync()){// Software decryption is progressive. Progressive decryption may not return a result on each call. Any cached
// data is handled in the flush() call
let decryptedData=decrypter.softwareDecrypt(uintData,keyData.key.buffer,keyData.iv.buffer);// For Low-Latency HLS Parts, decrypt in place, since part parsing is expected on push progress
const loadingParts=chunkMeta.part>-1;if(loadingParts){decryptedData=decrypter.flush();}if(!decryptedData){stats.executeEnd=now();return emptyResult(chunkMeta);}uintData=new Uint8Array(decryptedData);}else {this.decryptionPromise=decrypter.webCryptoDecrypt(uintData,keyData.key.buffer,keyData.iv.buffer).then(decryptedData=>{// Calling push here is important; if flush() is called while this is still resolving, this ensures that
// the decrypted data has been transmuxed
const result=this.push(decryptedData,null,chunkMeta);this.decryptionPromise=null;return result;});return this.decryptionPromise;}}const resetMuxers=this.needsProbing(discontinuity,trackSwitch);if(resetMuxers){const error=this.configureTransmuxer(uintData);if(error){logger.warn(`[transmuxer] ${error.message}`);this.observer.emit(Events.ERROR,Events.ERROR,{type:ErrorTypes.MEDIA_ERROR,details:ErrorDetails.FRAG_PARSING_ERROR,fatal:false,error,reason:error.message});stats.executeEnd=now();return emptyResult(chunkMeta);}}if(discontinuity||trackSwitch||initSegmentChange||resetMuxers){this.resetInitSegment(initSegmentData,audioCodec,videoCodec,duration,decryptdata);}if(discontinuity||initSegmentChange||resetMuxers){this.resetInitialTimestamp(defaultInitPts);}if(!contiguous){this.resetContiguity();}const result=this.transmux(uintData,keyData,timeOffset,accurateTimeOffset,chunkMeta);const currentState=this.currentTransmuxState;currentState.contiguous=true;currentState.discontinuity=false;currentState.trackSwitch=false;stats.executeEnd=now();return result;}// Due to data caching, flush calls can produce more than one TransmuxerResult (hence the Array type)
flush(chunkMeta){const stats=chunkMeta.transmuxing;stats.executeStart=now();const{decrypter,currentTransmuxState,decryptionPromise}=this;if(decryptionPromise){// Upon resolution, the decryption promise calls push() and returns its TransmuxerResult up the stack. Therefore
// only flushing is required for async decryption
return decryptionPromise.then(()=>{return this.flush(chunkMeta);});}const transmuxResults=[];const{timeOffset}=currentTransmuxState;if(decrypter){// The decrypter may have data cached, which needs to be demuxed. In this case we'll have two TransmuxResults
// This happens in the case that we receive only 1 push call for a segment (either for non-progressive downloads,
// or for progressive downloads with small segments)
const decryptedData=decrypter.flush();if(decryptedData){// Push always returns a TransmuxerResult if decryptdata is null
transmuxResults.push(this.push(decryptedData,null,chunkMeta));}}const{demuxer,remuxer}=this;if(!demuxer||!remuxer){// If probing failed, then Hls.js has been given content its not able to handle
stats.executeEnd=now();return [emptyResult(chunkMeta)];}const demuxResultOrPromise=demuxer.flush(timeOffset);if(isPromise(demuxResultOrPromise)){// Decrypt final SAMPLE-AES samples
return demuxResultOrPromise.then(demuxResult=>{this.flushRemux(transmuxResults,demuxResult,chunkMeta);return transmuxResults;});}this.flushRemux(transmuxResults,demuxResultOrPromise,chunkMeta);return transmuxResults;}flushRemux(transmuxResults,demuxResult,chunkMeta){const{audioTrack,videoTrack,id3Track,textTrack}=demuxResult;const{accurateTimeOffset,timeOffset}=this.currentTransmuxState;logger.log(`[transmuxer.ts]: Flushed fragment ${chunkMeta.sn}${chunkMeta.part>-1?' p: '+chunkMeta.part:''} of level ${chunkMeta.level}`);const remuxResult=this.remuxer.remux(audioTrack,videoTrack,id3Track,textTrack,timeOffset,accurateTimeOffset,true,this.id);transmuxResults.push({remuxResult,chunkMeta});chunkMeta.transmuxing.executeEnd=now();}resetInitialTimestamp(defaultInitPts){const{demuxer,remuxer}=this;if(!demuxer||!remuxer){return;}demuxer.resetTimeStamp(defaultInitPts);remuxer.resetTimeStamp(defaultInitPts);}resetContiguity(){const{demuxer,remuxer}=this;if(!demuxer||!remuxer){return;}demuxer.resetContiguity();remuxer.resetNextTimestamp();}resetInitSegment(initSegmentData,audioCodec,videoCodec,trackDuration,decryptdata){const{demuxer,remuxer}=this;if(!demuxer||!remuxer){return;}demuxer.resetInitSegment(initSegmentData,audioCodec,videoCodec,trackDuration);remuxer.resetInitSegment(initSegmentData,audioCodec,videoCodec,decryptdata);}destroy(){if(this.demuxer){this.demuxer.destroy();this.demuxer=undefined;}if(this.remuxer){this.remuxer.destroy();this.remuxer=undefined;}}transmux(data,keyData,timeOffset,accurateTimeOffset,chunkMeta){let result;if(keyData&&keyData.method==='SAMPLE-AES'){result=this.transmuxSampleAes(data,keyData,timeOffset,accurateTimeOffset,chunkMeta);}else {result=this.transmuxUnencrypted(data,timeOffset,accurateTimeOffset,chunkMeta);}return result;}transmuxUnencrypted(data,timeOffset,accurateTimeOffset,chunkMeta){const{audioTrack,videoTrack,id3Track,textTrack}=this.demuxer.demux(data,timeOffset,false,!this.config.progressive);const remuxResult=this.remuxer.remux(audioTrack,videoTrack,id3Track,textTrack,timeOffset,accurateTimeOffset,false,this.id);return {remuxResult,chunkMeta};}transmuxSampleAes(data,decryptData,timeOffset,accurateTimeOffset,chunkMeta){return this.demuxer.demuxSampleAes(data,decryptData,timeOffset).then(demuxResult=>{const remuxResult=this.remuxer.remux(demuxResult.audioTrack,demuxResult.videoTrack,demuxResult.id3Track,demuxResult.textTrack,timeOffset,accurateTimeOffset,false,this.id);return {remuxResult,chunkMeta};});}configureTransmuxer(data){const{config,observer,typeSupported,vendor}=this;// probe for content type
let mux;for(let i=0,len=muxConfig.length;i0&&(decryptData==null?void 0:decryptData.key)!=null&&decryptData.iv!==null&&decryptData.method!=null){encryptionType=decryptData;}return encryptionType;}const emptyResult=chunkMeta=>({remuxResult:{},chunkMeta});function isPromise(p){return 'then'in p&&p.then instanceof Function;}class TransmuxConfig{constructor(audioCodec,videoCodec,initSegmentData,duration,defaultInitPts){this.audioCodec=void 0;this.videoCodec=void 0;this.initSegmentData=void 0;this.duration=void 0;this.defaultInitPts=void 0;this.audioCodec=audioCodec;this.videoCodec=videoCodec;this.initSegmentData=initSegmentData;this.duration=duration;this.defaultInitPts=defaultInitPts||null;}}class TransmuxState{constructor(discontinuity,contiguous,accurateTimeOffset,trackSwitch,timeOffset,initSegmentChange){this.discontinuity=void 0;this.contiguous=void 0;this.accurateTimeOffset=void 0;this.trackSwitch=void 0;this.timeOffset=void 0;this.initSegmentChange=void 0;this.discontinuity=discontinuity;this.contiguous=contiguous;this.accurateTimeOffset=accurateTimeOffset;this.trackSwitch=trackSwitch;this.timeOffset=timeOffset;this.initSegmentChange=initSegmentChange;}}var eventemitter3={exports:{}};(function(module){var has=Object.prototype.hasOwnProperty,prefix='~';/**
* Constructor to create a storage for our `EE` objects.
* An `Events` instance is a plain object whose properties are event names.
*
* @constructor
* @private
*/function Events(){}//
// We try to not inherit from `Object.prototype`. In some engines creating an
// instance in this way is faster than calling `Object.create(null)` directly.
// If `Object.create(null)` is not supported we prefix the event names with a
// character to make sure that the built-in object properties are not
// overridden or used as an attack vector.
//
if(Object.create){Events.prototype=Object.create(null);//
// This hack is needed because the `__proto__` property is still inherited in
// some old browsers like Android 4, iPhone 5.1, Opera 11 and Safari 5.
//
if(!new Events().__proto__)prefix=false;}/**
* Representation of a single event listener.
*
* @param {Function} fn The listener function.
* @param {*} context The context to invoke the listener with.
* @param {Boolean} [once=false] Specify if the listener is a one-time listener.
* @constructor
* @private
*/function EE(fn,context,once){this.fn=fn;this.context=context;this.once=once||false;}/**
* Add a listener for a given event.
*
* @param {EventEmitter} emitter Reference to the `EventEmitter` instance.
* @param {(String|Symbol)} event The event name.
* @param {Function} fn The listener function.
* @param {*} context The context to invoke the listener with.
* @param {Boolean} once Specify if the listener is a one-time listener.
* @returns {EventEmitter}
* @private
*/function addListener(emitter,event,fn,context,once){if(typeof fn!=='function'){throw new TypeError('The listener must be a function');}var listener=new EE(fn,context||emitter,once),evt=prefix?prefix+event:event;if(!emitter._events[evt])emitter._events[evt]=listener,emitter._eventsCount++;else if(!emitter._events[evt].fn)emitter._events[evt].push(listener);else emitter._events[evt]=[emitter._events[evt],listener];return emitter;}/**
* Clear event by name.
*
* @param {EventEmitter} emitter Reference to the `EventEmitter` instance.
* @param {(String|Symbol)} evt The Event name.
* @private
*/function clearEvent(emitter,evt){if(--emitter._eventsCount===0)emitter._events=new Events();else delete emitter._events[evt];}/**
* Minimal `EventEmitter` interface that is molded against the Node.js
* `EventEmitter` interface.
*
* @constructor
* @public
*/function EventEmitter(){this._events=new Events();this._eventsCount=0;}/**
* Return an array listing the events for which the emitter has registered
* listeners.
*
* @returns {Array}
* @public
*/EventEmitter.prototype.eventNames=function eventNames(){var names=[],events,name;if(this._eventsCount===0)return names;for(name in events=this._events){if(has.call(events,name))names.push(prefix?name.slice(1):name);}if(Object.getOwnPropertySymbols){return names.concat(Object.getOwnPropertySymbols(events));}return names;};/**
* Return the listeners registered for a given event.
*
* @param {(String|Symbol)} event The event name.
* @returns {Array} The registered listeners.
* @public
*/EventEmitter.prototype.listeners=function listeners(event){var evt=prefix?prefix+event:event,handlers=this._events[evt];if(!handlers)return [];if(handlers.fn)return [handlers.fn];for(var i=0,l=handlers.length,ee=new Array(l);i{data=data||{};data.frag=this.frag;data.id=this.id;if(ev===Events.ERROR){this.error=data.error;}this.hls.trigger(ev,data);};// forward events to main thread
this.observer=new EventEmitter();this.observer.on(Events.FRAG_DECRYPTED,forwardMessage);this.observer.on(Events.ERROR,forwardMessage);const MediaSource=getMediaSource(config.preferManagedMediaSource)||{isTypeSupported:()=>false};const m2tsTypeSupported={mpeg:MediaSource.isTypeSupported('audio/mpeg'),mp3:MediaSource.isTypeSupported('audio/mp4; codecs="mp3"'),ac3:MediaSource.isTypeSupported('audio/mp4; codecs="ac-3"')};if(this.useWorker&&typeof Worker!=='undefined'){const canCreateWorker=config.workerPath||hasUMDWorker();if(canCreateWorker){try{if(config.workerPath){logger.log(`loading Web Worker ${config.workerPath} for "${id}"`);this.workerContext=loadWorker(config.workerPath);}else {logger.log(`injecting Web Worker for "${id}"`);this.workerContext=injectWorker();}this.onwmsg=event=>this.onWorkerMessage(event);const{worker}=this.workerContext;worker.addEventListener('message',this.onwmsg);worker.onerror=event=>{const error=new Error(`${event.message} (${event.filename}:${event.lineno})`);config.enableWorker=false;logger.warn(`Error in "${id}" Web Worker, fallback to inline`);this.hls.trigger(Events.ERROR,{type:ErrorTypes.OTHER_ERROR,details:ErrorDetails.INTERNAL_EXCEPTION,fatal:false,event:'demuxerWorker',error});};worker.postMessage({cmd:'init',typeSupported:m2tsTypeSupported,vendor:'',id:id,config:JSON.stringify(config)});}catch(err){logger.warn(`Error setting up "${id}" Web Worker, fallback to inline`,err);this.resetWorker();this.error=null;this.transmuxer=new Transmuxer(this.observer,m2tsTypeSupported,config,'',id);}return;}}this.transmuxer=new Transmuxer(this.observer,m2tsTypeSupported,config,'',id);}resetWorker(){if(this.workerContext){const{worker,objectURL}=this.workerContext;if(objectURL){// revoke the Object URL that was used to create transmuxer worker, so as not to leak it
self.URL.revokeObjectURL(objectURL);}worker.removeEventListener('message',this.onwmsg);worker.onerror=null;worker.terminate();this.workerContext=null;}}destroy(){if(this.workerContext){this.resetWorker();this.onwmsg=undefined;}else {const transmuxer=this.transmuxer;if(transmuxer){transmuxer.destroy();this.transmuxer=null;}}const observer=this.observer;if(observer){observer.removeAllListeners();}this.frag=null;// @ts-ignore
this.observer=null;// @ts-ignore
this.hls=null;}push(data,initSegmentData,audioCodec,videoCodec,frag,part,duration,accurateTimeOffset,chunkMeta,defaultInitPTS){var _frag$initSegment,_lastFrag$initSegment;chunkMeta.transmuxing.start=self.performance.now();const{transmuxer}=this;const timeOffset=part?part.start:frag.start;// TODO: push "clear-lead" decrypt data for unencrypted fragments in streams with encrypted ones
const decryptdata=frag.decryptdata;const lastFrag=this.frag;const discontinuity=!(lastFrag&&frag.cc===lastFrag.cc);const trackSwitch=!(lastFrag&&chunkMeta.level===lastFrag.level);const snDiff=lastFrag?chunkMeta.sn-lastFrag.sn:-1;const partDiff=this.part?chunkMeta.part-this.part.index:-1;const progressive=snDiff===0&&chunkMeta.id>1&&chunkMeta.id===(lastFrag==null?void 0:lastFrag.stats.chunkCount);const contiguous=!trackSwitch&&(snDiff===1||snDiff===0&&(partDiff===1||progressive&&partDiff<=0));const now=self.performance.now();if(trackSwitch||snDiff||frag.stats.parsing.start===0){frag.stats.parsing.start=now;}if(part&&(partDiff||!contiguous)){part.stats.parsing.start=now;}const initSegmentChange=!(lastFrag&&((_frag$initSegment=frag.initSegment)==null?void 0:_frag$initSegment.url)===((_lastFrag$initSegment=lastFrag.initSegment)==null?void 0:_lastFrag$initSegment.url));const state=new TransmuxState(discontinuity,contiguous,accurateTimeOffset,trackSwitch,timeOffset,initSegmentChange);if(!contiguous||discontinuity||initSegmentChange){logger.log(`[transmuxer-interface, ${frag.type}]: Starting new transmux session for sn: ${chunkMeta.sn} p: ${chunkMeta.part} level: ${chunkMeta.level} id: ${chunkMeta.id}
discontinuity: ${discontinuity}
trackSwitch: ${trackSwitch}
contiguous: ${contiguous}
accurateTimeOffset: ${accurateTimeOffset}
timeOffset: ${timeOffset}
initSegmentChange: ${initSegmentChange}`);const config=new TransmuxConfig(audioCodec,videoCodec,initSegmentData,duration,defaultInitPTS);this.configureTransmuxer(config);}this.frag=frag;this.part=part;// Frags with sn of 'initSegment' are not transmuxed
if(this.workerContext){// post fragment payload as transferable objects for ArrayBuffer (no copy)
this.workerContext.worker.postMessage({cmd:'demux',data,decryptdata,chunkMeta,state},data instanceof ArrayBuffer?[data]:[]);}else if(transmuxer){const transmuxResult=transmuxer.push(data,decryptdata,chunkMeta,state);if(isPromise(transmuxResult)){transmuxer.async=true;transmuxResult.then(data=>{this.handleTransmuxComplete(data);}).catch(error=>{this.transmuxerError(error,chunkMeta,'transmuxer-interface push error');});}else {transmuxer.async=false;this.handleTransmuxComplete(transmuxResult);}}}flush(chunkMeta){chunkMeta.transmuxing.start=self.performance.now();const{transmuxer}=this;if(this.workerContext){this.workerContext.worker.postMessage({cmd:'flush',chunkMeta});}else if(transmuxer){let transmuxResult=transmuxer.flush(chunkMeta);const asyncFlush=isPromise(transmuxResult);if(asyncFlush||transmuxer.async){if(!isPromise(transmuxResult)){transmuxResult=Promise.resolve(transmuxResult);}transmuxResult.then(data=>{this.handleFlushResult(data,chunkMeta);}).catch(error=>{this.transmuxerError(error,chunkMeta,'transmuxer-interface flush error');});}else {this.handleFlushResult(transmuxResult,chunkMeta);}}}transmuxerError(error,chunkMeta,reason){if(!this.hls){return;}this.error=error;this.hls.trigger(Events.ERROR,{type:ErrorTypes.MEDIA_ERROR,details:ErrorDetails.FRAG_PARSING_ERROR,chunkMeta,frag:this.frag||undefined,fatal:false,error,err:error,reason});}handleFlushResult(results,chunkMeta){results.forEach(result=>{this.handleTransmuxComplete(result);});this.onFlush(chunkMeta);}onWorkerMessage(event){const data=event.data;if(!(data!=null&&data.event)){logger.warn(`worker message received with no ${data?'event name':'data'}`);return;}const hls=this.hls;if(!this.hls){return;}switch(data.event){case'init':{var _this$workerContext;const objectURL=(_this$workerContext=this.workerContext)==null?void 0:_this$workerContext.objectURL;if(objectURL){// revoke the Object URL that was used to create transmuxer worker, so as not to leak it
self.URL.revokeObjectURL(objectURL);}break;}case'transmuxComplete':{this.handleTransmuxComplete(data.data);break;}case'flush':{this.onFlush(data.data);break;}// pass logs from the worker thread to the main logger
case'workerLog':if(logger[data.data.logType]){logger[data.data.logType](data.data.message);}break;default:{data.data=data.data||{};data.data.frag=this.frag;data.data.id=this.id;hls.trigger(data.event,data.data);break;}}}configureTransmuxer(config){const{transmuxer}=this;if(this.workerContext){this.workerContext.worker.postMessage({cmd:'configure',config});}else if(transmuxer){transmuxer.configure(config);}}handleTransmuxComplete(result){result.chunkMeta.transmuxing.end=self.performance.now();this.onTransmuxComplete(result);}}function subtitleOptionsIdentical(trackList1,trackList2){if(trackList1.length!==trackList2.length){return false;}for(let i=0;iattrs1[subtitleAttribute]!==attrs2[subtitleAttribute]);}function subtitleTrackMatchesTextTrack(subtitleTrack,textTrack){return textTrack.label.toLowerCase()===subtitleTrack.name.toLowerCase()&&(!textTrack.language||textTrack.language.toLowerCase()===(subtitleTrack.lang||'').toLowerCase());}const TICK_INTERVAL$2=100;// how often to tick in ms
class AudioStreamController extends BaseStreamController{constructor(hls,fragmentTracker,keyLoader){super(hls,fragmentTracker,keyLoader,'[audio-stream-controller]',PlaylistLevelType.AUDIO);this.videoBuffer=null;this.videoTrackCC=-1;this.waitingVideoCC=-1;this.bufferedTrack=null;this.switchingTrack=null;this.trackId=-1;this.waitingData=null;this.mainDetails=null;this.flushing=false;this.bufferFlushed=false;this.cachedTrackLoadedData=null;this._registerListeners();}onHandlerDestroying(){this._unregisterListeners();super.onHandlerDestroying();this.mainDetails=null;this.bufferedTrack=null;this.switchingTrack=null;}_registerListeners(){const{hls}=this;hls.on(Events.MEDIA_ATTACHED,this.onMediaAttached,this);hls.on(Events.MEDIA_DETACHING,this.onMediaDetaching,this);hls.on(Events.MANIFEST_LOADING,this.onManifestLoading,this);hls.on(Events.LEVEL_LOADED,this.onLevelLoaded,this);hls.on(Events.AUDIO_TRACKS_UPDATED,this.onAudioTracksUpdated,this);hls.on(Events.AUDIO_TRACK_SWITCHING,this.onAudioTrackSwitching,this);hls.on(Events.AUDIO_TRACK_LOADED,this.onAudioTrackLoaded,this);hls.on(Events.ERROR,this.onError,this);hls.on(Events.BUFFER_RESET,this.onBufferReset,this);hls.on(Events.BUFFER_CREATED,this.onBufferCreated,this);hls.on(Events.BUFFER_FLUSHING,this.onBufferFlushing,this);hls.on(Events.BUFFER_FLUSHED,this.onBufferFlushed,this);hls.on(Events.INIT_PTS_FOUND,this.onInitPtsFound,this);hls.on(Events.FRAG_BUFFERED,this.onFragBuffered,this);}_unregisterListeners(){const{hls}=this;hls.off(Events.MEDIA_ATTACHED,this.onMediaAttached,this);hls.off(Events.MEDIA_DETACHING,this.onMediaDetaching,this);hls.off(Events.MANIFEST_LOADING,this.onManifestLoading,this);hls.off(Events.LEVEL_LOADED,this.onLevelLoaded,this);hls.off(Events.AUDIO_TRACKS_UPDATED,this.onAudioTracksUpdated,this);hls.off(Events.AUDIO_TRACK_SWITCHING,this.onAudioTrackSwitching,this);hls.off(Events.AUDIO_TRACK_LOADED,this.onAudioTrackLoaded,this);hls.off(Events.ERROR,this.onError,this);hls.off(Events.BUFFER_RESET,this.onBufferReset,this);hls.off(Events.BUFFER_CREATED,this.onBufferCreated,this);hls.off(Events.BUFFER_FLUSHING,this.onBufferFlushing,this);hls.off(Events.BUFFER_FLUSHED,this.onBufferFlushed,this);hls.off(Events.INIT_PTS_FOUND,this.onInitPtsFound,this);hls.off(Events.FRAG_BUFFERED,this.onFragBuffered,this);}// INIT_PTS_FOUND is triggered when the video track parsed in the stream-controller has a new PTS value
onInitPtsFound(event,{frag,id,initPTS,timescale}){// Always update the new INIT PTS
// Can change due level switch
if(id==='main'){const cc=frag.cc;this.initPTS[frag.cc]={baseTime:initPTS,timescale};this.log(`InitPTS for cc: ${cc} found from main: ${initPTS}`);this.videoTrackCC=cc;// If we are waiting, tick immediately to unblock audio fragment transmuxing
if(this.state===State.WAITING_INIT_PTS){this.tick();}}}startLoad(startPosition){if(!this.levels){this.startPosition=startPosition;this.state=State.STOPPED;return;}const lastCurrentTime=this.lastCurrentTime;this.stopLoad();this.setInterval(TICK_INTERVAL$2);if(lastCurrentTime>0&&startPosition===-1){this.log(`Override startPosition with lastCurrentTime @${lastCurrentTime.toFixed(3)}`);startPosition=lastCurrentTime;this.state=State.IDLE;}else {this.loadedmetadata=false;this.state=State.WAITING_TRACK;}this.nextLoadPosition=this.startPosition=this.lastCurrentTime=startPosition;this.tick();}doTick(){switch(this.state){case State.IDLE:this.doTickIdle();break;case State.WAITING_TRACK:{var _levels$trackId;const{levels,trackId}=this;const details=levels==null?void 0:(_levels$trackId=levels[trackId])==null?void 0:_levels$trackId.details;if(details){if(this.waitForCdnTuneIn(details)){break;}this.state=State.WAITING_INIT_PTS;}break;}case State.FRAG_LOADING_WAITING_RETRY:{var _this$media;const now=performance.now();const retryDate=this.retryDate;// if current time is gt than retryDate, or if media seeking let's switch to IDLE state to retry loading
if(!retryDate||now>=retryDate||(_this$media=this.media)!=null&&_this$media.seeking){const{levels,trackId}=this;this.log('RetryDate reached, switch back to IDLE state');this.resetStartWhenNotLoaded((levels==null?void 0:levels[trackId])||null);this.state=State.IDLE;}break;}case State.WAITING_INIT_PTS:{// Ensure we don't get stuck in the WAITING_INIT_PTS state if the waiting frag CC doesn't match any initPTS
const waitingData=this.waitingData;if(waitingData){const{frag,part,cache,complete}=waitingData;if(this.initPTS[frag.cc]!==undefined){this.waitingData=null;this.waitingVideoCC=-1;this.state=State.FRAG_LOADING;const payload=cache.flush();const data={frag,part,payload,networkDetails:null};this._handleFragmentLoadProgress(data);if(complete){super._handleFragmentLoadComplete(data);}}else if(this.videoTrackCC!==this.waitingVideoCC){// Drop waiting fragment if videoTrackCC has changed since waitingFragment was set and initPTS was not found
this.log(`Waiting fragment cc (${frag.cc}) cancelled because video is at cc ${this.videoTrackCC}`);this.clearWaitingFragment();}else {// Drop waiting fragment if an earlier fragment is needed
const pos=this.getLoadPosition();const bufferInfo=BufferHelper.bufferInfo(this.mediaBuffer,pos,this.config.maxBufferHole);const waitingFragmentAtPosition=fragmentWithinToleranceTest(bufferInfo.end,this.config.maxFragLookUpTolerance,frag);if(waitingFragmentAtPosition<0){this.log(`Waiting fragment cc (${frag.cc}) @ ${frag.start} cancelled because another fragment at ${bufferInfo.end} is needed`);this.clearWaitingFragment();}}}else {this.state=State.IDLE;}}}this.onTickEnd();}clearWaitingFragment(){const waitingData=this.waitingData;if(waitingData){this.fragmentTracker.removeFragment(waitingData.frag);this.waitingData=null;this.waitingVideoCC=-1;this.state=State.IDLE;}}resetLoadingState(){this.clearWaitingFragment();super.resetLoadingState();}onTickEnd(){const{media}=this;if(!(media!=null&&media.readyState)){// Exit early if we don't have media or if the media hasn't buffered anything yet (readyState 0)
return;}this.lastCurrentTime=media.currentTime;}doTickIdle(){const{hls,levels,media,trackId}=this;const config=hls.config;// 1. if video not attached AND
// start fragment already requested OR start frag prefetch not enabled
// 2. if tracks or track not loaded and selected
// then exit loop
// => if media not attached but start frag prefetch is enabled and start frag not requested yet, we will not exit loop
if(!media&&(this.startFragRequested||!config.startFragPrefetch)||!(levels!=null&&levels[trackId])){return;}const levelInfo=levels[trackId];const trackDetails=levelInfo.details;if(!trackDetails||trackDetails.live&&this.levelLastLoaded!==levelInfo||this.waitForCdnTuneIn(trackDetails)){this.state=State.WAITING_TRACK;return;}const bufferable=this.mediaBuffer?this.mediaBuffer:this.media;if(this.bufferFlushed&&bufferable){this.bufferFlushed=false;this.afterBufferFlushed(bufferable,ElementaryStreamTypes.AUDIO,PlaylistLevelType.AUDIO);}const bufferInfo=this.getFwdBufferInfo(bufferable,PlaylistLevelType.AUDIO);if(bufferInfo===null){return;}const{bufferedTrack,switchingTrack}=this;if(!switchingTrack&&this._streamEnded(bufferInfo,trackDetails)){hls.trigger(Events.BUFFER_EOS,{type:'audio'});this.state=State.ENDED;return;}const mainBufferInfo=this.getFwdBufferInfo(this.videoBuffer?this.videoBuffer:this.media,PlaylistLevelType.MAIN);const bufferLen=bufferInfo.len;const maxBufLen=this.getMaxBufferLength(mainBufferInfo==null?void 0:mainBufferInfo.len);const fragments=trackDetails.fragments;const start=fragments[0].start;let targetBufferTime=this.flushing?this.getLoadPosition():bufferInfo.end;if(switchingTrack&&media){const pos=this.getLoadPosition();// STABLE
if(bufferedTrack&&!mediaAttributesIdentical(switchingTrack.attrs,bufferedTrack.attrs)){targetBufferTime=pos;}// if currentTime (pos) is less than alt audio playlist start time, it means that alt audio is ahead of currentTime
if(trackDetails.PTSKnown&&posstart||bufferInfo.nextStart){this.log('Alt audio track ahead of main track, seek to start of alt audio track');media.currentTime=start+0.05;}}}// if buffer length is less than maxBufLen, or near the end, find a fragment to load
if(bufferLen>=maxBufLen&&!switchingTrack&&targetBufferTimemainBufferInfo.end+trackDetails.targetduration;if(atBufferSyncLimit||// Or wait for main buffer after buffing some audio
!(mainBufferInfo!=null&&mainBufferInfo.len)&&bufferInfo.len){// Check fragment-tracker for main fragments since GAP segments do not show up in bufferInfo
const mainFrag=this.getAppendedFrag(frag.start,PlaylistLevelType.MAIN);if(mainFrag===null){return;}// Bridge gaps in main buffer
atGap||(atGap=!!mainFrag.gap||!!atBufferSyncLimit&&mainBufferInfo.len===0);if(atBufferSyncLimit&&!atGap||atGap&&bufferInfo.nextStart&&bufferInfo.nextStartnew Level(mediaPlaylist));}onAudioTrackSwitching(event,data){// if any URL found on new audio track, it is an alternate audio track
const altAudio=!!data.url;this.trackId=data.id;const{fragCurrent}=this;if(fragCurrent){fragCurrent.abortRequests();this.removeUnbufferedFrags(fragCurrent.start);}this.resetLoadingState();// destroy useless transmuxer when switching audio to main
if(!altAudio){this.resetTransmuxer();}else {// switching to audio track, start timer if not already started
this.setInterval(TICK_INTERVAL$2);}// should we switch tracks ?
if(altAudio){this.switchingTrack=data;// main audio track are handled by stream-controller, just do something if switching to alt audio track
this.state=State.IDLE;this.flushAudioIfNeeded(data);}else {this.switchingTrack=null;this.bufferedTrack=data;this.state=State.STOPPED;}this.tick();}onManifestLoading(){this.fragmentTracker.removeAllFragments();this.startPosition=this.lastCurrentTime=0;this.bufferFlushed=this.flushing=false;this.levels=this.mainDetails=this.waitingData=this.bufferedTrack=this.cachedTrackLoadedData=this.switchingTrack=null;this.startFragRequested=false;this.trackId=this.videoTrackCC=this.waitingVideoCC=-1;}onLevelLoaded(event,data){this.mainDetails=data.details;if(this.cachedTrackLoadedData!==null){this.hls.trigger(Events.AUDIO_TRACK_LOADED,this.cachedTrackLoadedData);this.cachedTrackLoadedData=null;}}onAudioTrackLoaded(event,data){var _track$details;if(this.mainDetails==null){this.cachedTrackLoadedData=data;return;}const{levels}=this;const{details:newDetails,id:trackId}=data;if(!levels){this.warn(`Audio tracks were reset while loading level ${trackId}`);return;}this.log(`Audio track ${trackId} loaded [${newDetails.startSN},${newDetails.endSN}]${newDetails.lastPartSn?`[part-${newDetails.lastPartSn}-${newDetails.lastPartIndex}]`:''},duration:${newDetails.totalduration}`);const track=levels[trackId];let sliding=0;if(newDetails.live||(_track$details=track.details)!=null&&_track$details.live){this.checkLiveUpdate(newDetails);const mainDetails=this.mainDetails;if(newDetails.deltaUpdateFailed||!mainDetails){return;}if(!track.details&&newDetails.hasProgramDateTime&&mainDetails.hasProgramDateTime){// Make sure our audio rendition is aligned with the "main" rendition, using
// pdt as our reference times.
alignMediaPlaylistByPDT(newDetails,mainDetails);sliding=newDetails.fragments[0].start;}else {var _this$levelLastLoaded;sliding=this.alignPlaylists(newDetails,track.details,(_this$levelLastLoaded=this.levelLastLoaded)==null?void 0:_this$levelLastLoaded.details);}}track.details=newDetails;this.levelLastLoaded=track;// compute start position if we are aligned with the main playlist
if(!this.startFragRequested&&(this.mainDetails||!newDetails.live)){this.setStartPosition(this.mainDetails||newDetails,sliding);}// only switch back to IDLE state if we were waiting for track to start downloading a new fragment
if(this.state===State.WAITING_TRACK&&!this.waitForCdnTuneIn(newDetails)){this.state=State.IDLE;}// trigger handler right now
this.tick();}_handleFragmentLoadProgress(data){var _frag$initSegment;const{frag,part,payload}=data;const{config,trackId,levels}=this;if(!levels){this.warn(`Audio tracks were reset while fragment load was in progress. Fragment ${frag.sn} of level ${frag.level} will not be buffered`);return;}const track=levels[trackId];if(!track){this.warn('Audio track is undefined on fragment load progress');return;}const details=track.details;if(!details){this.warn('Audio track details undefined on fragment load progress');this.removeUnbufferedFrags(frag.start);return;}const audioCodec=config.defaultAudioCodec||track.audioCodec||'mp4a.40.2';let transmuxer=this.transmuxer;if(!transmuxer){transmuxer=this.transmuxer=new TransmuxerInterface(this.hls,PlaylistLevelType.AUDIO,this._handleTransmuxComplete.bind(this),this._handleTransmuxerFlush.bind(this));}// Check if we have video initPTS
// If not we need to wait for it
const initPTS=this.initPTS[frag.cc];const initSegmentData=(_frag$initSegment=frag.initSegment)==null?void 0:_frag$initSegment.data;if(initPTS!==undefined){// this.log(`Transmuxing ${sn} of [${details.startSN} ,${details.endSN}],track ${trackId}`);
// time Offset is accurate if level PTS is known, or if playlist is not sliding (not live)
const accurateTimeOffset=false;// details.PTSKnown || !details.live;
const partIndex=part?part.index:-1;const partial=partIndex!==-1;const chunkMeta=new ChunkMetadata(frag.level,frag.sn,frag.stats.chunkCount,payload.byteLength,partIndex,partial);transmuxer.push(payload,initSegmentData,audioCodec,'',frag,part,details.totalduration,accurateTimeOffset,chunkMeta,initPTS);}else {this.log(`Unknown video PTS for cc ${frag.cc}, waiting for video PTS before demuxing audio frag ${frag.sn} of [${details.startSN} ,${details.endSN}],track ${trackId}`);const{cache}=this.waitingData=this.waitingData||{frag,part,cache:new ChunkCache(),complete:false};cache.push(new Uint8Array(payload));this.waitingVideoCC=this.videoTrackCC;this.state=State.WAITING_INIT_PTS;}}_handleFragmentLoadComplete(fragLoadedData){if(this.waitingData){this.waitingData.complete=true;return;}super._handleFragmentLoadComplete(fragLoadedData);}onBufferReset(/* event: Events.BUFFER_RESET */){// reset reference to sourcebuffers
this.mediaBuffer=this.videoBuffer=null;this.loadedmetadata=false;}onBufferCreated(event,data){const audioTrack=data.tracks.audio;if(audioTrack){this.mediaBuffer=audioTrack.buffer||null;}if(data.tracks.video){this.videoBuffer=data.tracks.video.buffer||null;}}onFragBuffered(event,data){const{frag,part}=data;if(frag.type!==PlaylistLevelType.AUDIO){if(!this.loadedmetadata&&frag.type===PlaylistLevelType.MAIN){const bufferable=this.videoBuffer||this.media;if(bufferable){const bufferedTimeRanges=BufferHelper.getBuffered(bufferable);if(bufferedTimeRanges.length){this.loadedmetadata=true;}}}return;}if(this.fragContextChanged(frag)){// If a level switch was requested while a fragment was buffering, it will emit the FRAG_BUFFERED event upon completion
// Avoid setting state back to IDLE or concluding the audio switch; otherwise, the switched-to track will not buffer
this.warn(`Fragment ${frag.sn}${part?' p: '+part.index:''} of level ${frag.level} finished buffering, but was aborted. state: ${this.state}, audioSwitch: ${this.switchingTrack?this.switchingTrack.name:'false'}`);return;}if(frag.sn!=='initSegment'){this.fragPrevious=frag;const track=this.switchingTrack;if(track){this.bufferedTrack=track;this.switchingTrack=null;this.hls.trigger(Events.AUDIO_TRACK_SWITCHED,_objectSpread2({},track));}}this.fragBufferedComplete(frag,part);}onError(event,data){var _data$context;if(data.fatal){this.state=State.ERROR;return;}switch(data.details){case ErrorDetails.FRAG_GAP:case ErrorDetails.FRAG_PARSING_ERROR:case ErrorDetails.FRAG_DECRYPT_ERROR:case ErrorDetails.FRAG_LOAD_ERROR:case ErrorDetails.FRAG_LOAD_TIMEOUT:case ErrorDetails.KEY_LOAD_ERROR:case ErrorDetails.KEY_LOAD_TIMEOUT:this.onFragmentOrKeyLoadError(PlaylistLevelType.AUDIO,data);break;case ErrorDetails.AUDIO_TRACK_LOAD_ERROR:case ErrorDetails.AUDIO_TRACK_LOAD_TIMEOUT:case ErrorDetails.LEVEL_PARSING_ERROR:// in case of non fatal error while loading track, if not retrying to load track, switch back to IDLE
if(!data.levelRetry&&this.state===State.WAITING_TRACK&&((_data$context=data.context)==null?void 0:_data$context.type)===PlaylistContextType.AUDIO_TRACK){this.state=State.IDLE;}break;case ErrorDetails.BUFFER_APPEND_ERROR:case ErrorDetails.BUFFER_FULL_ERROR:if(!data.parent||data.parent!=='audio'){return;}if(data.details===ErrorDetails.BUFFER_APPEND_ERROR){this.resetLoadingState();return;}if(this.reduceLengthAndFlushBuffer(data)){this.bufferedTrack=null;super.flushMainBuffer(0,Number.POSITIVE_INFINITY,'audio');}break;case ErrorDetails.INTERNAL_EXCEPTION:this.recoverWorkerError(data);break;}}onBufferFlushing(event,{type}){if(type!==ElementaryStreamTypes.VIDEO){this.flushing=true;}}onBufferFlushed(event,{type}){if(type!==ElementaryStreamTypes.VIDEO){this.flushing=false;this.bufferFlushed=true;if(this.state===State.ENDED){this.state=State.IDLE;}const mediaBuffer=this.mediaBuffer||this.media;if(mediaBuffer){this.afterBufferFlushed(mediaBuffer,type,PlaylistLevelType.AUDIO);this.tick();}}}_handleTransmuxComplete(transmuxResult){var _id3$samples;const id='audio';const{hls}=this;const{remuxResult,chunkMeta}=transmuxResult;const context=this.getCurrentContext(chunkMeta);if(!context){this.resetWhenMissingContext(chunkMeta);return;}const{frag,part,level}=context;const{details}=level;const{audio,text,id3,initSegment}=remuxResult;// Check if the current fragment has been aborted. We check this by first seeing if we're still playing the current level.
// If we are, subsequently check if the currently loading fragment (fragCurrent) has changed.
if(this.fragContextChanged(frag)||!details){this.fragmentTracker.removeFragment(frag);return;}this.state=State.PARSING;if(this.switchingTrack&&audio){this.completeAudioSwitch(this.switchingTrack);}if(initSegment!=null&&initSegment.tracks){const mapFragment=frag.initSegment||frag;this._bufferInitSegment(level,initSegment.tracks,mapFragment,chunkMeta);hls.trigger(Events.FRAG_PARSING_INIT_SEGMENT,{frag:mapFragment,id,tracks:initSegment.tracks});// Only flush audio from old audio tracks when PTS is known on new audio track
}if(audio){const{startPTS,endPTS,startDTS,endDTS}=audio;if(part){part.elementaryStreams[ElementaryStreamTypes.AUDIO]={startPTS,endPTS,startDTS,endDTS};}frag.setElementaryStreamInfo(ElementaryStreamTypes.AUDIO,startPTS,endPTS,startDTS,endDTS);this.bufferFragmentData(audio,frag,part,chunkMeta);}if(id3!=null&&(_id3$samples=id3.samples)!=null&&_id3$samples.length){const emittedID3=_extends({id,frag,details},id3);hls.trigger(Events.FRAG_PARSING_METADATA,emittedID3);}if(text){const emittedText=_extends({id,frag,details},text);hls.trigger(Events.FRAG_PARSING_USERDATA,emittedText);}}_bufferInitSegment(currentLevel,tracks,frag,chunkMeta){if(this.state!==State.PARSING){return;}// delete any video track found on audio transmuxer
if(tracks.video){delete tracks.video;}// include levelCodec in audio and video tracks
const track=tracks.audio;if(!track){return;}track.id='audio';const variantAudioCodecs=currentLevel.audioCodec;this.log(`Init audio buffer, container:${track.container}, codecs[level/parsed]=[${variantAudioCodecs}/${track.codec}]`);// SourceBuffer will use track.levelCodec if defined
if(variantAudioCodecs&&variantAudioCodecs.split(',').length===1){track.levelCodec=variantAudioCodecs;}this.hls.trigger(Events.BUFFER_CODECS,tracks);const initSegment=track.initSegment;if(initSegment!=null&&initSegment.byteLength){const segment={type:'audio',frag,part:null,chunkMeta,parent:frag.type,data:initSegment};this.hls.trigger(Events.BUFFER_APPENDING,segment);}// trigger handler right now
this.tickImmediate();}loadFragment(frag,track,targetBufferTime){// only load if fragment is not loaded or if in audio switch
const fragState=this.fragmentTracker.getState(frag);this.fragCurrent=frag;// we force a frag loading in audio switch as fragment tracker might not have evicted previous frags in case of quick audio switch
if(this.switchingTrack||fragState===FragmentState.NOT_LOADED||fragState===FragmentState.PARTIAL){var _track$details2;if(frag.sn==='initSegment'){this._loadInitSegment(frag,track);}else if((_track$details2=track.details)!=null&&_track$details2.live&&!this.initPTS[frag.cc]){this.log(`Waiting for video PTS in continuity counter ${frag.cc} of live stream before loading audio fragment ${frag.sn} of level ${this.trackId}`);this.state=State.WAITING_INIT_PTS;const mainDetails=this.mainDetails;if(mainDetails&&mainDetails.fragments[0].start!==track.details.fragments[0].start){alignMediaPlaylistByPDT(track.details,mainDetails);}}else {this.startFragRequested=true;super.loadFragment(frag,track,targetBufferTime);}}else {this.clearTrackerIfNeeded(frag);}}flushAudioIfNeeded(switchingTrack){const{media,bufferedTrack}=this;const bufferedAttributes=bufferedTrack==null?void 0:bufferedTrack.attrs;const switchAttributes=switchingTrack.attrs;if(media&&bufferedAttributes&&(bufferedAttributes.CHANNELS!==switchAttributes.CHANNELS||bufferedTrack.name!==switchingTrack.name||bufferedTrack.lang!==switchingTrack.lang)){this.log('Switching audio track : flushing all audio');super.flushMainBuffer(0,Number.POSITIVE_INFINITY,'audio');this.bufferedTrack=null;}}completeAudioSwitch(switchingTrack){const{hls}=this;this.flushAudioIfNeeded(switchingTrack);this.bufferedTrack=switchingTrack;this.switchingTrack=null;hls.trigger(Events.AUDIO_TRACK_SWITCHED,_objectSpread2({},switchingTrack));}}class AudioTrackController extends BasePlaylistController{constructor(hls){super(hls,'[audio-track-controller]');this.tracks=[];this.groupIds=null;this.tracksInGroup=[];this.trackId=-1;this.currentTrack=null;this.selectDefaultTrack=true;this.registerListeners();}registerListeners(){const{hls}=this;hls.on(Events.MANIFEST_LOADING,this.onManifestLoading,this);hls.on(Events.MANIFEST_PARSED,this.onManifestParsed,this);hls.on(Events.LEVEL_LOADING,this.onLevelLoading,this);hls.on(Events.LEVEL_SWITCHING,this.onLevelSwitching,this);hls.on(Events.AUDIO_TRACK_LOADED,this.onAudioTrackLoaded,this);hls.on(Events.ERROR,this.onError,this);}unregisterListeners(){const{hls}=this;hls.off(Events.MANIFEST_LOADING,this.onManifestLoading,this);hls.off(Events.MANIFEST_PARSED,this.onManifestParsed,this);hls.off(Events.LEVEL_LOADING,this.onLevelLoading,this);hls.off(Events.LEVEL_SWITCHING,this.onLevelSwitching,this);hls.off(Events.AUDIO_TRACK_LOADED,this.onAudioTrackLoaded,this);hls.off(Events.ERROR,this.onError,this);}destroy(){this.unregisterListeners();this.tracks.length=0;this.tracksInGroup.length=0;this.currentTrack=null;super.destroy();}onManifestLoading(){this.tracks=[];this.tracksInGroup=[];this.groupIds=null;this.currentTrack=null;this.trackId=-1;this.selectDefaultTrack=true;}onManifestParsed(event,data){this.tracks=data.audioTracks||[];}onAudioTrackLoaded(event,data){const{id,groupId,details}=data;const trackInActiveGroup=this.tracksInGroup[id];if(!trackInActiveGroup||trackInActiveGroup.groupId!==groupId){this.warn(`Audio track with id:${id} and group:${groupId} not found in active group ${trackInActiveGroup==null?void 0:trackInActiveGroup.groupId}`);return;}const curDetails=trackInActiveGroup.details;trackInActiveGroup.details=data.details;this.log(`Audio track ${id} "${trackInActiveGroup.name}" lang:${trackInActiveGroup.lang} group:${groupId} loaded [${details.startSN}-${details.endSN}]`);if(id===this.trackId){this.playlistLoaded(id,data,curDetails);}}onLevelLoading(event,data){this.switchLevel(data.level);}onLevelSwitching(event,data){this.switchLevel(data.level);}switchLevel(levelIndex){const levelInfo=this.hls.levels[levelIndex];if(!levelInfo){return;}const audioGroups=levelInfo.audioGroups||null;const currentGroups=this.groupIds;let currentTrack=this.currentTrack;if(!audioGroups||(currentGroups==null?void 0:currentGroups.length)!==(audioGroups==null?void 0:audioGroups.length)||audioGroups!=null&&audioGroups.some(groupId=>(currentGroups==null?void 0:currentGroups.indexOf(groupId))===-1)){this.groupIds=audioGroups;this.trackId=-1;this.currentTrack=null;const audioTracks=this.tracks.filter(track=>!audioGroups||audioGroups.indexOf(track.groupId)!==-1);if(audioTracks.length){// Disable selectDefaultTrack if there are no default tracks
if(this.selectDefaultTrack&&!audioTracks.some(track=>track.default)){this.selectDefaultTrack=false;}// track.id should match hls.audioTracks index
audioTracks.forEach((track,i)=>{track.id=i;});}else if(!currentTrack&&!this.tracksInGroup.length){// Do not dispatch AUDIO_TRACKS_UPDATED when there were and are no tracks
return;}this.tracksInGroup=audioTracks;// Find preferred track
const audioPreference=this.hls.config.audioPreference;if(!currentTrack&&audioPreference){const groupIndex=findMatchingOption(audioPreference,audioTracks,audioMatchPredicate);if(groupIndex>-1){currentTrack=audioTracks[groupIndex];}else {const allIndex=findMatchingOption(audioPreference,this.tracks);currentTrack=this.tracks[allIndex];}}// Select initial track
let trackId=this.findTrackId(currentTrack);if(trackId===-1&¤tTrack){trackId=this.findTrackId(null);}// Dispatch events and load track if needed
const audioTracksUpdated={audioTracks};this.log(`Updating audio tracks, ${audioTracks.length} track(s) found in group(s): ${audioGroups==null?void 0:audioGroups.join(',')}`);this.hls.trigger(Events.AUDIO_TRACKS_UPDATED,audioTracksUpdated);const selectedTrackId=this.trackId;if(trackId!==-1&&selectedTrackId===-1){this.setAudioTrack(trackId);}else if(audioTracks.length&&selectedTrackId===-1){var _this$groupIds;const error=new Error(`No audio track selected for current audio group-ID(s): ${(_this$groupIds=this.groupIds)==null?void 0:_this$groupIds.join(',')} track count: ${audioTracks.length}`);this.warn(error.message);this.hls.trigger(Events.ERROR,{type:ErrorTypes.MEDIA_ERROR,details:ErrorDetails.AUDIO_TRACK_LOAD_ERROR,fatal:true,error});}}else if(this.shouldReloadPlaylist(currentTrack)){// Retry playlist loading if no playlist is or has been loaded yet
this.setAudioTrack(this.trackId);}}onError(event,data){if(data.fatal||!data.context){return;}if(data.context.type===PlaylistContextType.AUDIO_TRACK&&data.context.id===this.trackId&&(!this.groupIds||this.groupIds.indexOf(data.context.groupId)!==-1)){this.requestScheduled=-1;this.checkRetry(data);}}get allAudioTracks(){return this.tracks;}get audioTracks(){return this.tracksInGroup;}get audioTrack(){return this.trackId;}set audioTrack(newId){// If audio track is selected from API then don't choose from the manifest default track
this.selectDefaultTrack=false;this.setAudioTrack(newId);}setAudioOption(audioOption){const hls=this.hls;hls.config.audioPreference=audioOption;if(audioOption){const allAudioTracks=this.allAudioTracks;this.selectDefaultTrack=false;if(allAudioTracks.length){// First see if current option matches (no switch op)
const currentTrack=this.currentTrack;if(currentTrack&&matchesOption(audioOption,currentTrack,audioMatchPredicate)){return currentTrack;}// Find option in available tracks (tracksInGroup)
const groupIndex=findMatchingOption(audioOption,this.tracksInGroup,audioMatchPredicate);if(groupIndex>-1){const track=this.tracksInGroup[groupIndex];this.setAudioTrack(groupIndex);return track;}else if(currentTrack){// Find option in nearest level audio group
let searchIndex=hls.loadLevel;if(searchIndex===-1){searchIndex=hls.firstAutoLevel;}const switchIndex=findClosestLevelWithAudioGroup(audioOption,hls.levels,allAudioTracks,searchIndex,audioMatchPredicate);if(switchIndex===-1){// could not find matching variant
return null;}// and switch level to acheive the audio group switch
hls.nextLoadLevel=switchIndex;}if(audioOption.channels||audioOption.audioCodec){// Could not find a match with codec / channels predicate
// Find a match without channels or codec
const withoutCodecAndChannelsMatch=findMatchingOption(audioOption,allAudioTracks);if(withoutCodecAndChannelsMatch>-1){return allAudioTracks[withoutCodecAndChannelsMatch];}}}}return null;}setAudioTrack(newId){const tracks=this.tracksInGroup;// check if level idx is valid
if(newId<0||newId>=tracks.length){this.warn(`Invalid audio track id: ${newId}`);return;}// stopping live reloading timer if any
this.clearTimer();this.selectDefaultTrack=false;const lastTrack=this.currentTrack;const track=tracks[newId];const trackLoaded=track.details&&!track.details.live;if(newId===this.trackId&&track===lastTrack&&trackLoaded){return;}this.log(`Switching to audio-track ${newId} "${track.name}" lang:${track.lang} group:${track.groupId} channels:${track.channels}`);this.trackId=newId;this.currentTrack=track;this.hls.trigger(Events.AUDIO_TRACK_SWITCHING,_objectSpread2({},track));// Do not reload track unless live
if(trackLoaded){return;}const hlsUrlParameters=this.switchParams(track.url,lastTrack==null?void 0:lastTrack.details,track.details);this.loadPlaylist(hlsUrlParameters);}findTrackId(currentTrack){const audioTracks=this.tracksInGroup;for(let i=0;i=buffered[i].start&&fragStart<=buffered[i].end){timeRange=buffered[i];break;}}const fragEnd=frag.start+frag.duration;if(timeRange){timeRange.end=fragEnd;}else {timeRange={start:fragStart,end:fragEnd};buffered.push(timeRange);}this.fragmentTracker.fragBuffered(frag);this.fragBufferedComplete(frag,null);}onBufferFlushing(event,data){const{startOffset,endOffset}=data;if(startOffset===0&&endOffset!==Number.POSITIVE_INFINITY){const endOffsetSubtitles=endOffset-1;if(endOffsetSubtitles<=0){return;}data.endOffsetSubtitles=Math.max(0,endOffsetSubtitles);this.tracksBuffered.forEach(buffered=>{for(let i=0;inew Level(mediaPlaylist));return;}this.tracksBuffered=[];this.levels=subtitleTracks.map(mediaPlaylist=>{const level=new Level(mediaPlaylist);this.tracksBuffered[level.id]=[];return level;});this.fragmentTracker.removeFragmentsInRange(0,Number.POSITIVE_INFINITY,PlaylistLevelType.SUBTITLE);this.fragPrevious=null;this.mediaBuffer=null;}onSubtitleTrackSwitch(event,data){var _this$levels;this.currentTrackId=data.id;if(!((_this$levels=this.levels)!=null&&_this$levels.length)||this.currentTrackId===-1){this.clearInterval();return;}// Check if track has the necessary details to load fragments
const currentTrack=this.levels[this.currentTrackId];if(currentTrack!=null&¤tTrack.details){this.mediaBuffer=this.mediaBufferTimeRanges;}else {this.mediaBuffer=null;}if(currentTrack){this.setInterval(TICK_INTERVAL$1);}}// Got a new set of subtitle fragments.
onSubtitleTrackLoaded(event,data){var _track$details;const{currentTrackId,levels}=this;const{details:newDetails,id:trackId}=data;if(!levels){this.warn(`Subtitle tracks were reset while loading level ${trackId}`);return;}const track=levels[trackId];if(trackId>=levels.length||!track){return;}this.log(`Subtitle track ${trackId} loaded [${newDetails.startSN},${newDetails.endSN}]${newDetails.lastPartSn?`[part-${newDetails.lastPartSn}-${newDetails.lastPartIndex}]`:''},duration:${newDetails.totalduration}`);this.mediaBuffer=this.mediaBufferTimeRanges;let sliding=0;if(newDetails.live||(_track$details=track.details)!=null&&_track$details.live){const mainDetails=this.mainDetails;if(newDetails.deltaUpdateFailed||!mainDetails){return;}const mainSlidingStartFragment=mainDetails.fragments[0];if(!track.details){if(newDetails.hasProgramDateTime&&mainDetails.hasProgramDateTime){alignMediaPlaylistByPDT(newDetails,mainDetails);sliding=newDetails.fragments[0].start;}else if(mainSlidingStartFragment){// line up live playlist with main so that fragments in range are loaded
sliding=mainSlidingStartFragment.start;addSliding(newDetails,sliding);}}else {var _this$levelLastLoaded;sliding=this.alignPlaylists(newDetails,track.details,(_this$levelLastLoaded=this.levelLastLoaded)==null?void 0:_this$levelLastLoaded.details);if(sliding===0&&mainSlidingStartFragment){// realign with main when there is no overlap with last refresh
sliding=mainSlidingStartFragment.start;addSliding(newDetails,sliding);}}}track.details=newDetails;this.levelLastLoaded=track;if(trackId!==currentTrackId){return;}if(!this.startFragRequested&&(this.mainDetails||!newDetails.live)){this.setStartPosition(this.mainDetails||newDetails,sliding);}// trigger handler right now
this.tick();// If playlist is misaligned because of bad PDT or drift, delete details to resync with main on reload
if(newDetails.live&&!this.fragCurrent&&this.media&&this.state===State.IDLE){const foundFrag=findFragmentByPTS(null,newDetails.fragments,this.media.currentTime,0);if(!foundFrag){this.warn('Subtitle playlist not aligned with playback');track.details=undefined;}}}_handleFragmentLoadComplete(fragLoadedData){const{frag,payload}=fragLoadedData;const decryptData=frag.decryptdata;const hls=this.hls;if(this.fragContextChanged(frag)){return;}// check to see if the payload needs to be decrypted
if(payload&&payload.byteLength>0&&decryptData!=null&&decryptData.key&&decryptData.iv&&decryptData.method==='AES-128'){const startTime=performance.now();// decrypt the subtitles
this.decrypter.decrypt(new Uint8Array(payload),decryptData.key.buffer,decryptData.iv.buffer).catch(err=>{hls.trigger(Events.ERROR,{type:ErrorTypes.MEDIA_ERROR,details:ErrorDetails.FRAG_DECRYPT_ERROR,fatal:false,error:err,reason:err.message,frag});throw err;}).then(decryptedData=>{const endTime=performance.now();hls.trigger(Events.FRAG_DECRYPTED,{frag,payload:decryptedData,stats:{tstart:startTime,tdecrypt:endTime}});}).catch(err=>{this.warn(`${err.name}: ${err.message}`);this.state=State.IDLE;});}}doTick(){if(!this.media){this.state=State.IDLE;return;}if(this.state===State.IDLE){const{currentTrackId,levels}=this;const track=levels==null?void 0:levels[currentTrackId];if(!track||!levels.length||!track.details){return;}const{config}=this;const currentTime=this.getLoadPosition();const bufferedInfo=BufferHelper.bufferedInfo(this.tracksBuffered[this.currentTrackId]||[],currentTime,config.maxBufferHole);const{end:targetBufferTime,len:bufferLen}=bufferedInfo;const mainBufferInfo=this.getFwdBufferInfo(this.media,PlaylistLevelType.MAIN);const trackDetails=track.details;const maxBufLen=this.getMaxBufferLength(mainBufferInfo==null?void 0:mainBufferInfo.len)+trackDetails.levelTargetDuration;if(bufferLen>maxBufLen){return;}const fragments=trackDetails.fragments;const fragLen=fragments.length;const end=trackDetails.edge;let foundFrag=null;const fragPrevious=this.fragPrevious;if(targetBufferTimeend-tolerance?0:tolerance;foundFrag=findFragmentByPTS(fragPrevious,fragments,Math.max(fragments[0].start,targetBufferTime),lookupTolerance);if(!foundFrag&&fragPrevious&&fragPrevious.start{index=index>>>0;if(index>length-1){throw new DOMException(`Failed to execute '${name}' on 'TimeRanges': The index provided (${index}) is greater than the maximum bound (${length})`);}return timeranges[index][name];};this.buffered={get length(){return timeranges.length;},end(index){return getRange('end',index,timeranges.length);},start(index){return getRange('start',index,timeranges.length);}};}}class SubtitleTrackController extends BasePlaylistController{constructor(hls){super(hls,'[subtitle-track-controller]');this.media=null;this.tracks=[];this.groupIds=null;this.tracksInGroup=[];this.trackId=-1;this.currentTrack=null;this.selectDefaultTrack=true;this.queuedDefaultTrack=-1;this.asyncPollTrackChange=()=>this.pollTrackChange(0);this.useTextTrackPolling=false;this.subtitlePollingInterval=-1;this._subtitleDisplay=true;this.onTextTracksChanged=()=>{if(!this.useTextTrackPolling){self.clearInterval(this.subtitlePollingInterval);}// Media is undefined when switching streams via loadSource()
if(!this.media||!this.hls.config.renderTextTracksNatively){return;}let textTrack=null;const tracks=filterSubtitleTracks(this.media.textTracks);for(let i=0;i-1){this.toggleTrackModes();}}registerListeners(){const{hls}=this;hls.on(Events.MEDIA_ATTACHED,this.onMediaAttached,this);hls.on(Events.MEDIA_DETACHING,this.onMediaDetaching,this);hls.on(Events.MANIFEST_LOADING,this.onManifestLoading,this);hls.on(Events.MANIFEST_PARSED,this.onManifestParsed,this);hls.on(Events.LEVEL_LOADING,this.onLevelLoading,this);hls.on(Events.LEVEL_SWITCHING,this.onLevelSwitching,this);hls.on(Events.SUBTITLE_TRACK_LOADED,this.onSubtitleTrackLoaded,this);hls.on(Events.ERROR,this.onError,this);}unregisterListeners(){const{hls}=this;hls.off(Events.MEDIA_ATTACHED,this.onMediaAttached,this);hls.off(Events.MEDIA_DETACHING,this.onMediaDetaching,this);hls.off(Events.MANIFEST_LOADING,this.onManifestLoading,this);hls.off(Events.MANIFEST_PARSED,this.onManifestParsed,this);hls.off(Events.LEVEL_LOADING,this.onLevelLoading,this);hls.off(Events.LEVEL_SWITCHING,this.onLevelSwitching,this);hls.off(Events.SUBTITLE_TRACK_LOADED,this.onSubtitleTrackLoaded,this);hls.off(Events.ERROR,this.onError,this);}// Listen for subtitle track change, then extract the current track ID.
onMediaAttached(event,data){this.media=data.media;if(!this.media){return;}if(this.queuedDefaultTrack>-1){this.subtitleTrack=this.queuedDefaultTrack;this.queuedDefaultTrack=-1;}this.useTextTrackPolling=!(this.media.textTracks&&'onchange'in this.media.textTracks);if(this.useTextTrackPolling){this.pollTrackChange(500);}else {this.media.textTracks.addEventListener('change',this.asyncPollTrackChange);}}pollTrackChange(timeout){self.clearInterval(this.subtitlePollingInterval);this.subtitlePollingInterval=self.setInterval(this.onTextTracksChanged,timeout);}onMediaDetaching(){if(!this.media){return;}self.clearInterval(this.subtitlePollingInterval);if(!this.useTextTrackPolling){this.media.textTracks.removeEventListener('change',this.asyncPollTrackChange);}if(this.trackId>-1){this.queuedDefaultTrack=this.trackId;}const textTracks=filterSubtitleTracks(this.media.textTracks);// Clear loaded cues on media detachment from tracks
textTracks.forEach(track=>{clearCurrentCues(track);});// Disable all subtitle tracks before detachment so when reattached only tracks in that content are enabled.
this.subtitleTrack=-1;this.media=null;}onManifestLoading(){this.tracks=[];this.groupIds=null;this.tracksInGroup=[];this.trackId=-1;this.currentTrack=null;this.selectDefaultTrack=true;}// Fired whenever a new manifest is loaded.
onManifestParsed(event,data){this.tracks=data.subtitleTracks;}onSubtitleTrackLoaded(event,data){const{id,groupId,details}=data;const trackInActiveGroup=this.tracksInGroup[id];if(!trackInActiveGroup||trackInActiveGroup.groupId!==groupId){this.warn(`Subtitle track with id:${id} and group:${groupId} not found in active group ${trackInActiveGroup==null?void 0:trackInActiveGroup.groupId}`);return;}const curDetails=trackInActiveGroup.details;trackInActiveGroup.details=data.details;this.log(`Subtitle track ${id} "${trackInActiveGroup.name}" lang:${trackInActiveGroup.lang} group:${groupId} loaded [${details.startSN}-${details.endSN}]`);if(id===this.trackId){this.playlistLoaded(id,data,curDetails);}}onLevelLoading(event,data){this.switchLevel(data.level);}onLevelSwitching(event,data){this.switchLevel(data.level);}switchLevel(levelIndex){const levelInfo=this.hls.levels[levelIndex];if(!levelInfo){return;}const subtitleGroups=levelInfo.subtitleGroups||null;const currentGroups=this.groupIds;let currentTrack=this.currentTrack;if(!subtitleGroups||(currentGroups==null?void 0:currentGroups.length)!==(subtitleGroups==null?void 0:subtitleGroups.length)||subtitleGroups!=null&&subtitleGroups.some(groupId=>(currentGroups==null?void 0:currentGroups.indexOf(groupId))===-1)){this.groupIds=subtitleGroups;this.trackId=-1;this.currentTrack=null;const subtitleTracks=this.tracks.filter(track=>!subtitleGroups||subtitleGroups.indexOf(track.groupId)!==-1);if(subtitleTracks.length){// Disable selectDefaultTrack if there are no default tracks
if(this.selectDefaultTrack&&!subtitleTracks.some(track=>track.default)){this.selectDefaultTrack=false;}// track.id should match hls.audioTracks index
subtitleTracks.forEach((track,i)=>{track.id=i;});}else if(!currentTrack&&!this.tracksInGroup.length){// Do not dispatch SUBTITLE_TRACKS_UPDATED when there were and are no tracks
return;}this.tracksInGroup=subtitleTracks;// Find preferred track
const subtitlePreference=this.hls.config.subtitlePreference;if(!currentTrack&&subtitlePreference){this.selectDefaultTrack=false;const groupIndex=findMatchingOption(subtitlePreference,subtitleTracks);if(groupIndex>-1){currentTrack=subtitleTracks[groupIndex];}else {const allIndex=findMatchingOption(subtitlePreference,this.tracks);currentTrack=this.tracks[allIndex];}}// Select initial track
let trackId=this.findTrackId(currentTrack);if(trackId===-1&¤tTrack){trackId=this.findTrackId(null);}// Dispatch events and load track if needed
const subtitleTracksUpdated={subtitleTracks};this.log(`Updating subtitle tracks, ${subtitleTracks.length} track(s) found in "${subtitleGroups==null?void 0:subtitleGroups.join(',')}" group-id`);this.hls.trigger(Events.SUBTITLE_TRACKS_UPDATED,subtitleTracksUpdated);if(trackId!==-1&&this.trackId===-1){this.setSubtitleTrack(trackId);}}else if(this.shouldReloadPlaylist(currentTrack)){// Retry playlist loading if no playlist is or has been loaded yet
this.setSubtitleTrack(this.trackId);}}findTrackId(currentTrack){const tracks=this.tracksInGroup;const selectDefault=this.selectDefaultTrack;for(let i=0;i-1){const track=this.tracksInGroup[groupIndex];this.setSubtitleTrack(groupIndex);return track;}else if(currentTrack){// If this is not the initial selection return null
// option should have matched one in active group
return null;}else {// Find the option in all tracks for initial selection
const allIndex=findMatchingOption(subtitleOption,allSubtitleTracks);if(allIndex>-1){return allSubtitleTracks[allIndex];}}}}return null;}loadPlaylist(hlsUrlParameters){super.loadPlaylist();const currentTrack=this.currentTrack;if(this.shouldLoadPlaylist(currentTrack)&¤tTrack){const id=currentTrack.id;const groupId=currentTrack.groupId;let url=currentTrack.url;if(hlsUrlParameters){try{url=hlsUrlParameters.addDirectives(url);}catch(error){this.warn(`Could not construct new URL with HLS Delivery Directives: ${error}`);}}this.log(`Loading subtitle playlist for id ${id}`);this.hls.trigger(Events.SUBTITLE_TRACK_LOADING,{url,id,groupId,deliveryDirectives:hlsUrlParameters||null});}}/**
* Disables the old subtitleTrack and sets current mode on the next subtitleTrack.
* This operates on the DOM textTracks.
* A value of -1 will disable all subtitle tracks.
*/toggleTrackModes(){const{media}=this;if(!media){return;}const textTracks=filterSubtitleTracks(media.textTracks);const currentTrack=this.currentTrack;let nextTrack;if(currentTrack){nextTrack=textTracks.filter(textTrack=>subtitleTrackMatchesTextTrack(currentTrack,textTrack))[0];if(!nextTrack){this.warn(`Unable to find subtitle TextTrack with name "${currentTrack.name}" and language "${currentTrack.lang}"`);}}[].slice.call(textTracks).forEach(track=>{if(track.mode!=='disabled'&&track!==nextTrack){track.mode='disabled';}});if(nextTrack){const mode=this.subtitleDisplay?'showing':'hidden';if(nextTrack.mode!==mode){nextTrack.mode=mode;}}}/**
* This method is responsible for validating the subtitle index and periodically reloading if live.
* Dispatches the SUBTITLE_TRACK_SWITCH event, which instructs the subtitle-stream-controller to load the selected track.
*/setSubtitleTrack(newId){const tracks=this.tracksInGroup;// setting this.subtitleTrack will trigger internal logic
// if media has not been attached yet, it will fail
// we keep a reference to the default track id
// and we'll set subtitleTrack when onMediaAttached is triggered
if(!this.media){this.queuedDefaultTrack=newId;return;}// exit if track id as already set or invalid
if(newId<-1||newId>=tracks.length||!isFiniteNumber(newId)){this.warn(`Invalid subtitle track id: ${newId}`);return;}// stopping live reloading timer if any
this.clearTimer();this.selectDefaultTrack=false;const lastTrack=this.currentTrack;const track=tracks[newId]||null;this.trackId=newId;this.currentTrack=track;this.toggleTrackModes();if(!track){// switch to -1
this.hls.trigger(Events.SUBTITLE_TRACK_SWITCH,{id:newId});return;}const trackLoaded=!!track.details&&!track.details.live;if(newId===this.trackId&&track===lastTrack&&trackLoaded){return;}this.log(`Switching to subtitle-track ${newId}`+(track?` "${track.name}" lang:${track.lang} group:${track.groupId}`:''));const{id,groupId='',name,type,url}=track;this.hls.trigger(Events.SUBTITLE_TRACK_SWITCH,{id,groupId,name,type,url});const hlsUrlParameters=this.switchParams(track.url,lastTrack==null?void 0:lastTrack.details,track.details);this.loadPlaylist(hlsUrlParameters);}}class BufferOperationQueue{constructor(sourceBufferReference){this.buffers=void 0;this.queues={video:[],audio:[],audiovideo:[]};this.buffers=sourceBufferReference;}append(operation,type,pending){const queue=this.queues[type];queue.push(operation);if(queue.length===1&&!pending){this.executeNext(type);}}insertAbort(operation,type){const queue=this.queues[type];queue.unshift(operation);this.executeNext(type);}appendBlocker(type){let execute;const promise=new Promise(resolve=>{execute=resolve;});const operation={execute,onStart:()=>{},onComplete:()=>{},onError:()=>{}};this.append(operation,type);return promise;}executeNext(type){const queue=this.queues[type];if(queue.length){const operation=queue[0];try{// Operations are expected to result in an 'updateend' event being fired. If not, the queue will lock. Operations
// which do not end with this event must call _onSBUpdateEnd manually
operation.execute();}catch(error){logger.warn(`[buffer-operation-queue]: Exception executing "${type}" SourceBuffer operation: ${error}`);operation.onError(error);// Only shift the current operation off, otherwise the updateend handler will do this for us
const sb=this.buffers[type];if(!(sb!=null&&sb.updating)){this.shiftAndExecuteNext(type);}}}}shiftAndExecuteNext(type){this.queues[type].shift();this.executeNext(type);}current(type){return this.queues[type][0];}}const VIDEO_CODEC_PROFILE_REPLACE=/(avc[1234]|hvc1|hev1|dvh[1e]|vp09|av01)(?:\.[^.,]+)+/;class BufferController{constructor(hls){// The level details used to determine duration, target-duration and live
this.details=null;// cache the self generated object url to detect hijack of video tag
this._objectUrl=null;// A queue of buffer operations which require the SourceBuffer to not be updating upon execution
this.operationQueue=void 0;// References to event listeners for each SourceBuffer, so that they can be referenced for event removal
this.listeners=void 0;this.hls=void 0;// The number of BUFFER_CODEC events received before any sourceBuffers are created
this.bufferCodecEventsExpected=0;// The total number of BUFFER_CODEC events received
this._bufferCodecEventsTotal=0;// A reference to the attached media element
this.media=null;// A reference to the active media source
this.mediaSource=null;// Last MP3 audio chunk appended
this.lastMpegAudioChunk=null;this.appendSource=void 0;// counters
this.appendErrors={audio:0,video:0,audiovideo:0};this.tracks={};this.pendingTracks={};this.sourceBuffer=void 0;this.log=void 0;this.warn=void 0;this.error=void 0;this._onEndStreaming=event=>{if(!this.hls){return;}this.hls.pauseBuffering();};this._onStartStreaming=event=>{if(!this.hls){return;}this.hls.resumeBuffering();};// Keep as arrow functions so that we can directly reference these functions directly as event listeners
this._onMediaSourceOpen=()=>{const{media,mediaSource}=this;this.log('Media source opened');if(media){media.removeEventListener('emptied',this._onMediaEmptied);this.updateMediaElementDuration();this.hls.trigger(Events.MEDIA_ATTACHED,{media,mediaSource:mediaSource});}if(mediaSource){// once received, don't listen anymore to sourceopen event
mediaSource.removeEventListener('sourceopen',this._onMediaSourceOpen);}this.checkPendingTracks();};this._onMediaSourceClose=()=>{this.log('Media source closed');};this._onMediaSourceEnded=()=>{this.log('Media source ended');};this._onMediaEmptied=()=>{const{mediaSrc,_objectUrl}=this;if(mediaSrc!==_objectUrl){logger.error(`Media element src was set while attaching MediaSource (${_objectUrl} > ${mediaSrc})`);}};this.hls=hls;const logPrefix='[buffer-controller]';this.appendSource=isManagedMediaSource(getMediaSource(hls.config.preferManagedMediaSource));this.log=logger.log.bind(logger,logPrefix);this.warn=logger.warn.bind(logger,logPrefix);this.error=logger.error.bind(logger,logPrefix);this._initSourceBuffer();this.registerListeners();}hasSourceTypes(){return this.getSourceBufferTypes().length>0||Object.keys(this.pendingTracks).length>0;}destroy(){this.unregisterListeners();this.details=null;this.lastMpegAudioChunk=null;// @ts-ignore
this.hls=null;}registerListeners(){const{hls}=this;hls.on(Events.MEDIA_ATTACHING,this.onMediaAttaching,this);hls.on(Events.MEDIA_DETACHING,this.onMediaDetaching,this);hls.on(Events.MANIFEST_LOADING,this.onManifestLoading,this);hls.on(Events.MANIFEST_PARSED,this.onManifestParsed,this);hls.on(Events.BUFFER_RESET,this.onBufferReset,this);hls.on(Events.BUFFER_APPENDING,this.onBufferAppending,this);hls.on(Events.BUFFER_CODECS,this.onBufferCodecs,this);hls.on(Events.BUFFER_EOS,this.onBufferEos,this);hls.on(Events.BUFFER_FLUSHING,this.onBufferFlushing,this);hls.on(Events.LEVEL_UPDATED,this.onLevelUpdated,this);hls.on(Events.FRAG_PARSED,this.onFragParsed,this);hls.on(Events.FRAG_CHANGED,this.onFragChanged,this);}unregisterListeners(){const{hls}=this;hls.off(Events.MEDIA_ATTACHING,this.onMediaAttaching,this);hls.off(Events.MEDIA_DETACHING,this.onMediaDetaching,this);hls.off(Events.MANIFEST_LOADING,this.onManifestLoading,this);hls.off(Events.MANIFEST_PARSED,this.onManifestParsed,this);hls.off(Events.BUFFER_RESET,this.onBufferReset,this);hls.off(Events.BUFFER_APPENDING,this.onBufferAppending,this);hls.off(Events.BUFFER_CODECS,this.onBufferCodecs,this);hls.off(Events.BUFFER_EOS,this.onBufferEos,this);hls.off(Events.BUFFER_FLUSHING,this.onBufferFlushing,this);hls.off(Events.LEVEL_UPDATED,this.onLevelUpdated,this);hls.off(Events.FRAG_PARSED,this.onFragParsed,this);hls.off(Events.FRAG_CHANGED,this.onFragChanged,this);}_initSourceBuffer(){this.sourceBuffer={};this.operationQueue=new BufferOperationQueue(this.sourceBuffer);this.listeners={audio:[],video:[],audiovideo:[]};this.appendErrors={audio:0,video:0,audiovideo:0};this.lastMpegAudioChunk=null;}onManifestLoading(){this.bufferCodecEventsExpected=this._bufferCodecEventsTotal=0;this.details=null;}onManifestParsed(event,data){// in case of alt audio 2 BUFFER_CODECS events will be triggered, one per stream controller
// sourcebuffers will be created all at once when the expected nb of tracks will be reached
// in case alt audio is not used, only one BUFFER_CODEC event will be fired from main stream controller
// it will contain the expected nb of source buffers, no need to compute it
let codecEvents=2;if(data.audio&&!data.video||!data.altAudio||!true){codecEvents=1;}this.bufferCodecEventsExpected=this._bufferCodecEventsTotal=codecEvents;this.log(`${this.bufferCodecEventsExpected} bufferCodec event(s) expected`);}onMediaAttaching(event,data){const media=this.media=data.media;const MediaSource=getMediaSource(this.appendSource);if(media&&MediaSource){var _ms$constructor;const ms=this.mediaSource=new MediaSource();this.log(`created media source: ${(_ms$constructor=ms.constructor)==null?void 0:_ms$constructor.name}`);// MediaSource listeners are arrow functions with a lexical scope, and do not need to be bound
ms.addEventListener('sourceopen',this._onMediaSourceOpen);ms.addEventListener('sourceended',this._onMediaSourceEnded);ms.addEventListener('sourceclose',this._onMediaSourceClose);if(this.appendSource){ms.addEventListener('startstreaming',this._onStartStreaming);ms.addEventListener('endstreaming',this._onEndStreaming);}// cache the locally generated object url
const objectUrl=this._objectUrl=self.URL.createObjectURL(ms);// link video and media Source
if(this.appendSource){try{media.removeAttribute('src');// ManagedMediaSource will not open without disableRemotePlayback set to false or source alternatives
const MMS=self.ManagedMediaSource;media.disableRemotePlayback=media.disableRemotePlayback||MMS&&ms instanceof MMS;removeSourceChildren(media);addSource(media,objectUrl);media.load();}catch(error){media.src=objectUrl;}}else {media.src=objectUrl;}media.addEventListener('emptied',this._onMediaEmptied);}}onMediaDetaching(){const{media,mediaSource,_objectUrl}=this;if(mediaSource){this.log('media source detaching');if(mediaSource.readyState==='open'){try{// endOfStream could trigger exception if any sourcebuffer is in updating state
// we don't really care about checking sourcebuffer state here,
// as we are anyway detaching the MediaSource
// let's just avoid this exception to propagate
mediaSource.endOfStream();}catch(err){this.warn(`onMediaDetaching: ${err.message} while calling endOfStream`);}}// Clean up the SourceBuffers by invoking onBufferReset
this.onBufferReset();mediaSource.removeEventListener('sourceopen',this._onMediaSourceOpen);mediaSource.removeEventListener('sourceended',this._onMediaSourceEnded);mediaSource.removeEventListener('sourceclose',this._onMediaSourceClose);if(this.appendSource){mediaSource.removeEventListener('startstreaming',this._onStartStreaming);mediaSource.removeEventListener('endstreaming',this._onEndStreaming);}// Detach properly the MediaSource from the HTMLMediaElement as
// suggested in https://github.com/w3c/media-source/issues/53.
if(media){media.removeEventListener('emptied',this._onMediaEmptied);if(_objectUrl){self.URL.revokeObjectURL(_objectUrl);}// clean up video tag src only if it's our own url. some external libraries might
// hijack the video tag and change its 'src' without destroying the Hls instance first
if(this.mediaSrc===_objectUrl){media.removeAttribute('src');if(this.appendSource){removeSourceChildren(media);}media.load();}else {this.warn('media|source.src was changed by a third party - skip cleanup');}}this.mediaSource=null;this.media=null;this._objectUrl=null;this.bufferCodecEventsExpected=this._bufferCodecEventsTotal;this.pendingTracks={};this.tracks={};}this.hls.trigger(Events.MEDIA_DETACHED,undefined);}onBufferReset(){this.getSourceBufferTypes().forEach(type=>{this.resetBuffer(type);});this._initSourceBuffer();}resetBuffer(type){const sb=this.sourceBuffer[type];try{if(sb){var _this$mediaSource;this.removeBufferListeners(type);// Synchronously remove the SB from the map before the next call in order to prevent an async function from
// accessing it
this.sourceBuffer[type]=undefined;if((_this$mediaSource=this.mediaSource)!=null&&_this$mediaSource.sourceBuffers.length){this.mediaSource.removeSourceBuffer(sb);}}}catch(err){this.warn(`onBufferReset ${type}`,err);}}onBufferCodecs(event,data){const sourceBufferCount=this.getSourceBufferTypes().length;const trackNames=Object.keys(data);trackNames.forEach(trackName=>{if(sourceBufferCount){// check if SourceBuffer codec needs to change
const track=this.tracks[trackName];if(track&&typeof track.buffer.changeType==='function'){var _trackCodec;const{id,codec,levelCodec,container,metadata}=data[trackName];const currentCodecFull=pickMostCompleteCodecName(track.codec,track.levelCodec);const currentCodec=currentCodecFull==null?void 0:currentCodecFull.replace(VIDEO_CODEC_PROFILE_REPLACE,'$1');let trackCodec=pickMostCompleteCodecName(codec,levelCodec);const nextCodec=(_trackCodec=trackCodec)==null?void 0:_trackCodec.replace(VIDEO_CODEC_PROFILE_REPLACE,'$1');if(trackCodec&¤tCodec!==nextCodec){if(trackName.slice(0,5)==='audio'){trackCodec=getCodecCompatibleName(trackCodec,this.appendSource);}const mimeType=`${container};codecs=${trackCodec}`;this.appendChangeType(trackName,mimeType);this.log(`switching codec ${currentCodecFull} to ${trackCodec}`);this.tracks[trackName]={buffer:track.buffer,codec,container,levelCodec,metadata,id};}}}else {// if source buffer(s) not created yet, appended buffer tracks in this.pendingTracks
this.pendingTracks[trackName]=data[trackName];}});// if sourcebuffers already created, do nothing ...
if(sourceBufferCount){return;}const bufferCodecEventsExpected=Math.max(this.bufferCodecEventsExpected-1,0);if(this.bufferCodecEventsExpected!==bufferCodecEventsExpected){this.log(`${bufferCodecEventsExpected} bufferCodec event(s) expected ${trackNames.join(',')}`);this.bufferCodecEventsExpected=bufferCodecEventsExpected;}if(this.mediaSource&&this.mediaSource.readyState==='open'){this.checkPendingTracks();}}appendChangeType(type,mimeType){const{operationQueue}=this;const operation={execute:()=>{const sb=this.sourceBuffer[type];if(sb){this.log(`changing ${type} sourceBuffer type to ${mimeType}`);sb.changeType(mimeType);}operationQueue.shiftAndExecuteNext(type);},onStart:()=>{},onComplete:()=>{},onError:error=>{this.warn(`Failed to change ${type} SourceBuffer type`,error);}};operationQueue.append(operation,type,!!this.pendingTracks[type]);}onBufferAppending(event,eventData){const{hls,operationQueue,tracks}=this;const{data,type,frag,part,chunkMeta}=eventData;const chunkStats=chunkMeta.buffering[type];const bufferAppendingStart=self.performance.now();chunkStats.start=bufferAppendingStart;const fragBuffering=frag.stats.buffering;const partBuffering=part?part.stats.buffering:null;if(fragBuffering.start===0){fragBuffering.start=bufferAppendingStart;}if(partBuffering&&partBuffering.start===0){partBuffering.start=bufferAppendingStart;}// TODO: Only update timestampOffset when audio/mpeg fragment or part is not contiguous with previously appended
// Adjusting `SourceBuffer.timestampOffset` (desired point in the timeline where the next frames should be appended)
// in Chrome browser when we detect MPEG audio container and time delta between level PTS and `SourceBuffer.timestampOffset`
// is greater than 100ms (this is enough to handle seek for VOD or level change for LIVE videos).
// More info here: https://github.com/video-dev/hls.js/issues/332#issuecomment-257986486
const audioTrack=tracks.audio;let checkTimestampOffset=false;if(type==='audio'&&(audioTrack==null?void 0:audioTrack.container)==='audio/mpeg'){checkTimestampOffset=!this.lastMpegAudioChunk||chunkMeta.id===1||this.lastMpegAudioChunk.sn!==chunkMeta.sn;this.lastMpegAudioChunk=chunkMeta;}const fragStart=frag.start;const operation={execute:()=>{chunkStats.executeStart=self.performance.now();if(checkTimestampOffset){const sb=this.sourceBuffer[type];if(sb){const delta=fragStart-sb.timestampOffset;if(Math.abs(delta)>=0.1){this.log(`Updating audio SourceBuffer timestampOffset to ${fragStart} (delta: ${delta}) sn: ${frag.sn})`);sb.timestampOffset=fragStart;}}}this.appendExecutor(data,type);},onStart:()=>{// logger.debug(`[buffer-controller]: ${type} SourceBuffer updatestart`);
},onComplete:()=>{// logger.debug(`[buffer-controller]: ${type} SourceBuffer updateend`);
const end=self.performance.now();chunkStats.executeEnd=chunkStats.end=end;if(fragBuffering.first===0){fragBuffering.first=end;}if(partBuffering&&partBuffering.first===0){partBuffering.first=end;}const{sourceBuffer}=this;const timeRanges={};for(const type in sourceBuffer){timeRanges[type]=BufferHelper.getBuffered(sourceBuffer[type]);}this.appendErrors[type]=0;if(type==='audio'||type==='video'){this.appendErrors.audiovideo=0;}else {this.appendErrors.audio=0;this.appendErrors.video=0;}this.hls.trigger(Events.BUFFER_APPENDED,{type,frag,part,chunkMeta,parent:frag.type,timeRanges});},onError:error=>{// in case any error occured while appending, put back segment in segments table
const event={type:ErrorTypes.MEDIA_ERROR,parent:frag.type,details:ErrorDetails.BUFFER_APPEND_ERROR,sourceBufferName:type,frag,part,chunkMeta,error,err:error,fatal:false};if(error.code===DOMException.QUOTA_EXCEEDED_ERR){// QuotaExceededError: http://www.w3.org/TR/html5/infrastructure.html#quotaexceedederror
// let's stop appending any segments, and report BUFFER_FULL_ERROR error
event.details=ErrorDetails.BUFFER_FULL_ERROR;}else {const appendErrorCount=++this.appendErrors[type];event.details=ErrorDetails.BUFFER_APPEND_ERROR;/* with UHD content, we could get loop of quota exceeded error until
browser is able to evict some data from sourcebuffer. Retrying can help recover.
*/this.warn(`Failed ${appendErrorCount}/${hls.config.appendErrorMaxRetry} times to append segment in "${type}" sourceBuffer`);if(appendErrorCount>=hls.config.appendErrorMaxRetry){event.fatal=true;}}hls.trigger(Events.ERROR,event);}};operationQueue.append(operation,type,!!this.pendingTracks[type]);}onBufferFlushing(event,data){const{operationQueue}=this;const flushOperation=type=>({execute:this.removeExecutor.bind(this,type,data.startOffset,data.endOffset),onStart:()=>{// logger.debug(`[buffer-controller]: Started flushing ${data.startOffset} -> ${data.endOffset} for ${type} Source Buffer`);
},onComplete:()=>{// logger.debug(`[buffer-controller]: Finished flushing ${data.startOffset} -> ${data.endOffset} for ${type} Source Buffer`);
this.hls.trigger(Events.BUFFER_FLUSHED,{type});},onError:error=>{this.warn(`Failed to remove from ${type} SourceBuffer`,error);}});if(data.type){operationQueue.append(flushOperation(data.type),data.type);}else {this.getSourceBufferTypes().forEach(type=>{operationQueue.append(flushOperation(type),type);});}}onFragParsed(event,data){const{frag,part}=data;const buffersAppendedTo=[];const elementaryStreams=part?part.elementaryStreams:frag.elementaryStreams;if(elementaryStreams[ElementaryStreamTypes.AUDIOVIDEO]){buffersAppendedTo.push('audiovideo');}else {if(elementaryStreams[ElementaryStreamTypes.AUDIO]){buffersAppendedTo.push('audio');}if(elementaryStreams[ElementaryStreamTypes.VIDEO]){buffersAppendedTo.push('video');}}const onUnblocked=()=>{const now=self.performance.now();frag.stats.buffering.end=now;if(part){part.stats.buffering.end=now;}const stats=part?part.stats:frag.stats;this.hls.trigger(Events.FRAG_BUFFERED,{frag,part,stats,id:frag.type});};if(buffersAppendedTo.length===0){this.warn(`Fragments must have at least one ElementaryStreamType set. type: ${frag.type} level: ${frag.level} sn: ${frag.sn}`);}this.blockBuffers(onUnblocked,buffersAppendedTo);}onFragChanged(event,data){this.trimBuffers();}// on BUFFER_EOS mark matching sourcebuffer(s) as ended and trigger checkEos()
// an undefined data.type will mark all buffers as EOS.
onBufferEos(event,data){const ended=this.getSourceBufferTypes().reduce((acc,type)=>{const sb=this.sourceBuffer[type];if(sb&&(!data.type||data.type===type)){sb.ending=true;if(!sb.ended){sb.ended=true;this.log(`${type} sourceBuffer now EOS`);}}return acc&&!!(!sb||sb.ended);},true);if(ended){this.log(`Queueing mediaSource.endOfStream()`);this.blockBuffers(()=>{this.getSourceBufferTypes().forEach(type=>{const sb=this.sourceBuffer[type];if(sb){sb.ending=false;}});const{mediaSource}=this;if(!mediaSource||mediaSource.readyState!=='open'){if(mediaSource){this.log(`Could not call mediaSource.endOfStream(). mediaSource.readyState: ${mediaSource.readyState}`);}return;}this.log(`Calling mediaSource.endOfStream()`);// Allow this to throw and be caught by the enqueueing function
mediaSource.endOfStream();});}}onLevelUpdated(event,{details}){if(!details.fragments.length){return;}this.details=details;if(this.getSourceBufferTypes().length){this.blockBuffers(this.updateMediaElementDuration.bind(this));}else {this.updateMediaElementDuration();}}trimBuffers(){const{hls,details,media}=this;if(!media||details===null){return;}const sourceBufferTypes=this.getSourceBufferTypes();if(!sourceBufferTypes.length){return;}const config=hls.config;const currentTime=media.currentTime;const targetDuration=details.levelTargetDuration;// Support for deprecated liveBackBufferLength
const backBufferLength=details.live&&config.liveBackBufferLength!==null?config.liveBackBufferLength:config.backBufferLength;if(isFiniteNumber(backBufferLength)&&backBufferLength>0){const maxBackBufferLength=Math.max(backBufferLength,targetDuration);const targetBackBufferPosition=Math.floor(currentTime/targetDuration)*targetDuration-maxBackBufferLength;this.flushBackBuffer(currentTime,targetDuration,targetBackBufferPosition);}if(isFiniteNumber(config.frontBufferFlushThreshold)&&config.frontBufferFlushThreshold>0){const frontBufferLength=Math.max(config.maxBufferLength,config.frontBufferFlushThreshold);const maxFrontBufferLength=Math.max(frontBufferLength,targetDuration);const targetFrontBufferPosition=Math.floor(currentTime/targetDuration)*targetDuration+maxFrontBufferLength;this.flushFrontBuffer(currentTime,targetDuration,targetFrontBufferPosition);}}flushBackBuffer(currentTime,targetDuration,targetBackBufferPosition){const{details,sourceBuffer}=this;const sourceBufferTypes=this.getSourceBufferTypes();sourceBufferTypes.forEach(type=>{const sb=sourceBuffer[type];if(sb){const buffered=BufferHelper.getBuffered(sb);// when target buffer start exceeds actual buffer start
if(buffered.length>0&&targetBackBufferPosition>buffered.start(0)){this.hls.trigger(Events.BACK_BUFFER_REACHED,{bufferEnd:targetBackBufferPosition});// Support for deprecated event:
if(details!=null&&details.live){this.hls.trigger(Events.LIVE_BACK_BUFFER_REACHED,{bufferEnd:targetBackBufferPosition});}else if(sb.ended&&buffered.end(buffered.length-1)-currentTime{const sb=sourceBuffer[type];if(sb){const buffered=BufferHelper.getBuffered(sb);const numBufferedRanges=buffered.length;// The buffer is either empty or contiguous
if(numBufferedRanges<2){return;}const bufferStart=buffered.start(numBufferedRanges-1);const bufferEnd=buffered.end(numBufferedRanges-1);// No flush if we can tolerate the current buffer length or the current buffer range we would flush is contiguous with current position
if(targetFrontBufferPosition>bufferStart||currentTime>=bufferStart&¤tTime<=bufferEnd){return;}else if(sb.ended&¤tTime-bufferEnd<2*targetDuration){this.log(`Cannot flush ${type} front buffer while SourceBuffer is in ended state`);return;}this.hls.trigger(Events.BUFFER_FLUSHING,{startOffset:bufferStart,endOffset:Infinity,type});}});}/**
* Update Media Source duration to current level duration or override to Infinity if configuration parameter
* 'liveDurationInfinity` is set to `true`
* More details: https://github.com/video-dev/hls.js/issues/355
*/updateMediaElementDuration(){if(!this.details||!this.media||!this.mediaSource||this.mediaSource.readyState!=='open'){return;}const{details,hls,media,mediaSource}=this;const levelDuration=details.fragments[0].start+details.totalduration;const mediaDuration=media.duration;const msDuration=isFiniteNumber(mediaSource.duration)?mediaSource.duration:0;if(details.live&&hls.config.liveDurationInfinity){// Override duration to Infinity
mediaSource.duration=Infinity;this.updateSeekableRange(details);}else if(levelDuration>msDuration&&levelDuration>mediaDuration||!isFiniteNumber(mediaDuration)){// levelDuration was the last value we set.
// not using mediaSource.duration as the browser may tweak this value
// only update Media Source duration if its value increase, this is to avoid
// flushing already buffered portion when switching between quality level
this.log(`Updating Media Source duration to ${levelDuration.toFixed(3)}`);mediaSource.duration=levelDuration;}}updateSeekableRange(levelDetails){const mediaSource=this.mediaSource;const fragments=levelDetails.fragments;const len=fragments.length;if(len&&levelDetails.live&&mediaSource!=null&&mediaSource.setLiveSeekableRange){const start=Math.max(0,fragments[0].start);const end=Math.max(start,start+levelDetails.totalduration);this.log(`Media Source duration is set to ${mediaSource.duration}. Setting seekable range to ${start}-${end}.`);mediaSource.setLiveSeekableRange(start,end);}}checkPendingTracks(){const{bufferCodecEventsExpected,operationQueue,pendingTracks}=this;// Check if we've received all of the expected bufferCodec events. When none remain, create all the sourceBuffers at once.
// This is important because the MSE spec allows implementations to throw QuotaExceededErrors if creating new sourceBuffers after
// data has been appended to existing ones.
// 2 tracks is the max (one for audio, one for video). If we've reach this max go ahead and create the buffers.
const pendingTracksCount=Object.keys(pendingTracks).length;if(pendingTracksCount&&(!bufferCodecEventsExpected||pendingTracksCount===2||'audiovideo'in pendingTracks)){// ok, let's create them now !
this.createSourceBuffers(pendingTracks);this.pendingTracks={};// append any pending segments now !
const buffers=this.getSourceBufferTypes();if(buffers.length){this.hls.trigger(Events.BUFFER_CREATED,{tracks:this.tracks});buffers.forEach(type=>{operationQueue.executeNext(type);});}else {const error=new Error('could not create source buffer for media codec(s)');this.hls.trigger(Events.ERROR,{type:ErrorTypes.MEDIA_ERROR,details:ErrorDetails.BUFFER_INCOMPATIBLE_CODECS_ERROR,fatal:true,error,reason:error.message});}}}createSourceBuffers(tracks){const{sourceBuffer,mediaSource}=this;if(!mediaSource){throw Error('createSourceBuffers called when mediaSource was null');}for(const trackName in tracks){if(!sourceBuffer[trackName]){var _track$levelCodec;const track=tracks[trackName];if(!track){throw Error(`source buffer exists for track ${trackName}, however track does not`);}// use levelCodec as first priority unless it contains multiple comma-separated codec values
let codec=((_track$levelCodec=track.levelCodec)==null?void 0:_track$levelCodec.indexOf(','))===-1?track.levelCodec:track.codec;if(codec){if(trackName.slice(0,5)==='audio'){codec=getCodecCompatibleName(codec,this.appendSource);}}const mimeType=`${track.container};codecs=${codec}`;this.log(`creating sourceBuffer(${mimeType})`);try{const sb=sourceBuffer[trackName]=mediaSource.addSourceBuffer(mimeType);const sbName=trackName;this.addBufferListener(sbName,'updatestart',this._onSBUpdateStart);this.addBufferListener(sbName,'updateend',this._onSBUpdateEnd);this.addBufferListener(sbName,'error',this._onSBUpdateError);// ManagedSourceBuffer bufferedchange event
if(this.appendSource){this.addBufferListener(sbName,'bufferedchange',(type,event)=>{// If media was ejected check for a change. Added ranges are redundant with changes on 'updateend' event.
const removedRanges=event.removedRanges;if(removedRanges!=null&&removedRanges.length){this.hls.trigger(Events.BUFFER_FLUSHED,{type:trackName});}});}this.tracks[trackName]={buffer:sb,codec:codec,container:track.container,levelCodec:track.levelCodec,metadata:track.metadata,id:track.id};}catch(err){this.error(`error while trying to add sourceBuffer: ${err.message}`);this.hls.trigger(Events.ERROR,{type:ErrorTypes.MEDIA_ERROR,details:ErrorDetails.BUFFER_ADD_CODEC_ERROR,fatal:false,error:err,sourceBufferName:trackName,mimeType:mimeType});}}}}get mediaSrc(){var _this$media;const media=((_this$media=this.media)==null?void 0:_this$media.firstChild)||this.media;return media==null?void 0:media.src;}_onSBUpdateStart(type){const{operationQueue}=this;const operation=operationQueue.current(type);operation.onStart();}_onSBUpdateEnd(type){var _this$mediaSource2;if(((_this$mediaSource2=this.mediaSource)==null?void 0:_this$mediaSource2.readyState)==='closed'){this.resetBuffer(type);return;}const{operationQueue}=this;const operation=operationQueue.current(type);operation.onComplete();operationQueue.shiftAndExecuteNext(type);}_onSBUpdateError(type,event){var _this$mediaSource3;const error=new Error(`${type} SourceBuffer error. MediaSource readyState: ${(_this$mediaSource3=this.mediaSource)==null?void 0:_this$mediaSource3.readyState}`);this.error(`${error}`,event);// according to http://www.w3.org/TR/media-source/#sourcebuffer-append-error
// SourceBuffer errors are not necessarily fatal; if so, the HTMLMediaElement will fire an error event
this.hls.trigger(Events.ERROR,{type:ErrorTypes.MEDIA_ERROR,details:ErrorDetails.BUFFER_APPENDING_ERROR,sourceBufferName:type,error,fatal:false});// updateend is always fired after error, so we'll allow that to shift the current operation off of the queue
const operation=this.operationQueue.current(type);if(operation){operation.onError(error);}}// This method must result in an updateend event; if remove is not called, _onSBUpdateEnd must be called manually
removeExecutor(type,startOffset,endOffset){const{media,mediaSource,operationQueue,sourceBuffer}=this;const sb=sourceBuffer[type];if(!media||!mediaSource||!sb){this.warn(`Attempting to remove from the ${type} SourceBuffer, but it does not exist`);operationQueue.shiftAndExecuteNext(type);return;}const mediaDuration=isFiniteNumber(media.duration)?media.duration:Infinity;const msDuration=isFiniteNumber(mediaSource.duration)?mediaSource.duration:Infinity;const removeStart=Math.max(0,startOffset);const removeEnd=Math.min(endOffset,mediaDuration,msDuration);if(removeEnd>removeStart&&(!sb.ending||sb.ended)){sb.ended=false;this.log(`Removing [${removeStart},${removeEnd}] from the ${type} SourceBuffer`);sb.remove(removeStart,removeEnd);}else {// Cycle the queue
operationQueue.shiftAndExecuteNext(type);}}// This method must result in an updateend event; if append is not called, _onSBUpdateEnd must be called manually
appendExecutor(data,type){const sb=this.sourceBuffer[type];if(!sb){if(!this.pendingTracks[type]){throw new Error(`Attempting to append to the ${type} SourceBuffer, but it does not exist`);}return;}sb.ended=false;sb.appendBuffer(data);}// Enqueues an operation to each SourceBuffer queue which, upon execution, resolves a promise. When all promises
// resolve, the onUnblocked function is executed. Functions calling this method do not need to unblock the queue
// upon completion, since we already do it here
blockBuffers(onUnblocked,buffers=this.getSourceBufferTypes()){if(!buffers.length){this.log('Blocking operation requested, but no SourceBuffers exist');Promise.resolve().then(onUnblocked);return;}const{operationQueue}=this;// logger.debug(`[buffer-controller]: Blocking ${buffers} SourceBuffer`);
const blockingOperations=buffers.map(type=>operationQueue.appendBlocker(type));Promise.all(blockingOperations).then(()=>{// logger.debug(`[buffer-controller]: Blocking operation resolved; unblocking ${buffers} SourceBuffer`);
onUnblocked();buffers.forEach(type=>{const sb=this.sourceBuffer[type];// Only cycle the queue if the SB is not updating. There's a bug in Chrome which sets the SB updating flag to
// true when changing the MediaSource duration (https://bugs.chromium.org/p/chromium/issues/detail?id=959359&can=2&q=mediasource%20duration)
// While this is a workaround, it's probably useful to have around
if(!(sb!=null&&sb.updating)){operationQueue.shiftAndExecuteNext(type);}});});}getSourceBufferTypes(){return Object.keys(this.sourceBuffer);}addBufferListener(type,event,fn){const buffer=this.sourceBuffer[type];if(!buffer){return;}const listener=fn.bind(this,type);this.listeners[type].push({event,listener});buffer.addEventListener(event,listener);}removeBufferListeners(type){const buffer=this.sourceBuffer[type];if(!buffer){return;}this.listeners[type].forEach(l=>{buffer.removeEventListener(l.event,l.listener);});}}function removeSourceChildren(node){const sourceChildren=node.querySelectorAll('source');[].slice.call(sourceChildren).forEach(source=>{node.removeChild(source);});}function addSource(media,url){const source=self.document.createElement('source');source.type='video/mp4';source.src=url;media.appendChild(source);}/**
*
* This code was ported from the dash.js project at:
* https://github.com/Dash-Industry-Forum/dash.js/blob/development/externals/cea608-parser.js
* https://github.com/Dash-Industry-Forum/dash.js/commit/8269b26a761e0853bb21d78780ed945144ecdd4d#diff-71bc295a2d6b6b7093a1d3290d53a4b2
*
* The original copyright appears below:
*
* The copyright in this software is being made available under the BSD License,
* included below. This software may be subject to other third party and contributor
* rights, including patent rights, and no such rights are granted under this license.
*
* Copyright (c) 2015-2016, DASH Industry Forum.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
* 2. Neither the name of Dash Industry Forum nor the names of its
* contributors may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/ /**
* Exceptions from regular ASCII. CodePoints are mapped to UTF-16 codes
*/const specialCea608CharsCodes={0x2a:0xe1,// lowercase a, acute accent
0x5c:0xe9,// lowercase e, acute accent
0x5e:0xed,// lowercase i, acute accent
0x5f:0xf3,// lowercase o, acute accent
0x60:0xfa,// lowercase u, acute accent
0x7b:0xe7,// lowercase c with cedilla
0x7c:0xf7,// division symbol
0x7d:0xd1,// uppercase N tilde
0x7e:0xf1,// lowercase n tilde
0x7f:0x2588,// Full block
// THIS BLOCK INCLUDES THE 16 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS
// THAT COME FROM HI BYTE=0x11 AND LOW BETWEEN 0x30 AND 0x3F
// THIS MEANS THAT \x50 MUST BE ADDED TO THE VALUES
0x80:0xae,// Registered symbol (R)
0x81:0xb0,// degree sign
0x82:0xbd,// 1/2 symbol
0x83:0xbf,// Inverted (open) question mark
0x84:0x2122,// Trademark symbol (TM)
0x85:0xa2,// Cents symbol
0x86:0xa3,// Pounds sterling
0x87:0x266a,// Music 8'th note
0x88:0xe0,// lowercase a, grave accent
0x89:0x20,// transparent space (regular)
0x8a:0xe8,// lowercase e, grave accent
0x8b:0xe2,// lowercase a, circumflex accent
0x8c:0xea,// lowercase e, circumflex accent
0x8d:0xee,// lowercase i, circumflex accent
0x8e:0xf4,// lowercase o, circumflex accent
0x8f:0xfb,// lowercase u, circumflex accent
// THIS BLOCK INCLUDES THE 32 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS
// THAT COME FROM HI BYTE=0x12 AND LOW BETWEEN 0x20 AND 0x3F
0x90:0xc1,// capital letter A with acute
0x91:0xc9,// capital letter E with acute
0x92:0xd3,// capital letter O with acute
0x93:0xda,// capital letter U with acute
0x94:0xdc,// capital letter U with diaresis
0x95:0xfc,// lowercase letter U with diaeresis
0x96:0x2018,// opening single quote
0x97:0xa1,// inverted exclamation mark
0x98:0x2a,// asterisk
0x99:0x2019,// closing single quote
0x9a:0x2501,// box drawings heavy horizontal
0x9b:0xa9,// copyright sign
0x9c:0x2120,// Service mark
0x9d:0x2022,// (round) bullet
0x9e:0x201c,// Left double quotation mark
0x9f:0x201d,// Right double quotation mark
0xa0:0xc0,// uppercase A, grave accent
0xa1:0xc2,// uppercase A, circumflex
0xa2:0xc7,// uppercase C with cedilla
0xa3:0xc8,// uppercase E, grave accent
0xa4:0xca,// uppercase E, circumflex
0xa5:0xcb,// capital letter E with diaresis
0xa6:0xeb,// lowercase letter e with diaresis
0xa7:0xce,// uppercase I, circumflex
0xa8:0xcf,// uppercase I, with diaresis
0xa9:0xef,// lowercase i, with diaresis
0xaa:0xd4,// uppercase O, circumflex
0xab:0xd9,// uppercase U, grave accent
0xac:0xf9,// lowercase u, grave accent
0xad:0xdb,// uppercase U, circumflex
0xae:0xab,// left-pointing double angle quotation mark
0xaf:0xbb,// right-pointing double angle quotation mark
// THIS BLOCK INCLUDES THE 32 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS
// THAT COME FROM HI BYTE=0x13 AND LOW BETWEEN 0x20 AND 0x3F
0xb0:0xc3,// Uppercase A, tilde
0xb1:0xe3,// Lowercase a, tilde
0xb2:0xcd,// Uppercase I, acute accent
0xb3:0xcc,// Uppercase I, grave accent
0xb4:0xec,// Lowercase i, grave accent
0xb5:0xd2,// Uppercase O, grave accent
0xb6:0xf2,// Lowercase o, grave accent
0xb7:0xd5,// Uppercase O, tilde
0xb8:0xf5,// Lowercase o, tilde
0xb9:0x7b,// Open curly brace
0xba:0x7d,// Closing curly brace
0xbb:0x5c,// Backslash
0xbc:0x5e,// Caret
0xbd:0x5f,// Underscore
0xbe:0x7c,// Pipe (vertical line)
0xbf:0x223c,// Tilde operator
0xc0:0xc4,// Uppercase A, umlaut
0xc1:0xe4,// Lowercase A, umlaut
0xc2:0xd6,// Uppercase O, umlaut
0xc3:0xf6,// Lowercase o, umlaut
0xc4:0xdf,// Esszett (sharp S)
0xc5:0xa5,// Yen symbol
0xc6:0xa4,// Generic currency sign
0xc7:0x2503,// Box drawings heavy vertical
0xc8:0xc5,// Uppercase A, ring
0xc9:0xe5,// Lowercase A, ring
0xca:0xd8,// Uppercase O, stroke
0xcb:0xf8,// Lowercase o, strok
0xcc:0x250f,// Box drawings heavy down and right
0xcd:0x2513,// Box drawings heavy down and left
0xce:0x2517,// Box drawings heavy up and right
0xcf:0x251b// Box drawings heavy up and left
};/**
* Utils
*/const getCharForByte=byte=>String.fromCharCode(specialCea608CharsCodes[byte]||byte);const NR_ROWS=15;const NR_COLS=100;// Tables to look up row from PAC data
const rowsLowCh1={0x11:1,0x12:3,0x15:5,0x16:7,0x17:9,0x10:11,0x13:12,0x14:14};const rowsHighCh1={0x11:2,0x12:4,0x15:6,0x16:8,0x17:10,0x13:13,0x14:15};const rowsLowCh2={0x19:1,0x1a:3,0x1d:5,0x1e:7,0x1f:9,0x18:11,0x1b:12,0x1c:14};const rowsHighCh2={0x19:2,0x1a:4,0x1d:6,0x1e:8,0x1f:10,0x1b:13,0x1c:15};const backgroundColors=['white','green','blue','cyan','red','yellow','magenta','black','transparent'];class CaptionsLogger{constructor(){this.time=null;this.verboseLevel=0;}log(severity,msg){if(this.verboseLevel>=severity){const m=typeof msg==='function'?msg():msg;logger.log(`${this.time} [${severity}] ${m}`);}}}const numArrayToHexArray=function numArrayToHexArray(numArray){const hexArray=[];for(let j=0;jNR_COLS){this.logger.log(3,'Too large cursor position '+this.pos);this.pos=NR_COLS;}}/**
* Move the cursor relative to current position.
*/moveCursor(relPos){const newPos=this.pos+relPos;if(relPos>1){for(let i=this.pos+1;i=0x90){// Extended char
this.backSpace();}const char=getCharForByte(byte);if(this.pos>=NR_COLS){this.logger.log(0,()=>'Cannot insert '+byte.toString(16)+' ('+char+') at position '+this.pos+'. Skipping it!');return;}this.chars[this.pos].setChar(char,this.currPenState);this.moveCursor(1);}clearFromPos(startPos){let i;for(i=startPos;i'pacData = '+JSON.stringify(pacData));let newRow=pacData.row-1;if(this.nrRollUpRows&&newRow'bkgData = '+JSON.stringify(bkgData));this.backSpace();this.setPen(bkgData);this.insertChar(0x20);// Space
}setRollUpRows(nrRows){this.nrRollUpRows=nrRows;}rollUp(){if(this.nrRollUpRows===null){this.logger.log(3,'roll_up but nrRollUpRows not set yet');return;// Not properly setup
}this.logger.log(1,()=>this.getDisplayText());const topRowIndex=this.currRow+1-this.nrRollUpRows;const topRow=this.rows.splice(topRowIndex,1)[0];topRow.clear();this.rows.splice(this.currRow,0,topRow);this.logger.log(2,'Rolling up');// this.logger.log(VerboseLevel.TEXT, this.get_display_text())
}/**
* Get all non-empty rows with as unicode text.
*/getDisplayText(asOneRow){asOneRow=asOneRow||false;const displayText=[];let text='';let rowNr=-1;for(let i=0;i0){if(asOneRow){text='['+displayText.join(' | ')+']';}else {text=displayText.join('\n');}}return text;}getTextAndFormat(){return this.rows;}}// var modes = ['MODE_ROLL-UP', 'MODE_POP-ON', 'MODE_PAINT-ON', 'MODE_TEXT'];
class Cea608Channel{constructor(channelNumber,outputFilter,logger){this.chNr=void 0;this.outputFilter=void 0;this.mode=void 0;this.verbose=void 0;this.displayedMemory=void 0;this.nonDisplayedMemory=void 0;this.lastOutputScreen=void 0;this.currRollUpRow=void 0;this.writeScreen=void 0;this.cueStartTime=void 0;this.logger=void 0;this.chNr=channelNumber;this.outputFilter=outputFilter;this.mode=null;this.verbose=0;this.displayedMemory=new CaptionScreen(logger);this.nonDisplayedMemory=new CaptionScreen(logger);this.lastOutputScreen=new CaptionScreen(logger);this.currRollUpRow=this.displayedMemory.rows[NR_ROWS-1];this.writeScreen=this.displayedMemory;this.mode=null;this.cueStartTime=null;// Keeps track of where a cue started.
this.logger=logger;}reset(){this.mode=null;this.displayedMemory.reset();this.nonDisplayedMemory.reset();this.lastOutputScreen.reset();this.outputFilter.reset();this.currRollUpRow=this.displayedMemory.rows[NR_ROWS-1];this.writeScreen=this.displayedMemory;this.mode=null;this.cueStartTime=null;}getHandler(){return this.outputFilter;}setHandler(newHandler){this.outputFilter=newHandler;}setPAC(pacData){this.writeScreen.setPAC(pacData);}setBkgData(bkgData){this.writeScreen.setBkgData(bkgData);}setMode(newMode){if(newMode===this.mode){return;}this.mode=newMode;this.logger.log(2,()=>'MODE='+newMode);if(this.mode==='MODE_POP-ON'){this.writeScreen=this.nonDisplayedMemory;}else {this.writeScreen=this.displayedMemory;this.writeScreen.reset();}if(this.mode!=='MODE_ROLL-UP'){this.displayedMemory.nrRollUpRows=null;this.nonDisplayedMemory.nrRollUpRows=null;}this.mode=newMode;}insertChars(chars){for(let i=0;iscreen+': '+this.writeScreen.getDisplayText(true));if(this.mode==='MODE_PAINT-ON'||this.mode==='MODE_ROLL-UP'){this.logger.log(1,()=>'DISPLAYED: '+this.displayedMemory.getDisplayText(true));this.outputDataUpdate();}}ccRCL(){// Resume Caption Loading (switch mode to Pop On)
this.logger.log(2,'RCL - Resume Caption Loading');this.setMode('MODE_POP-ON');}ccBS(){// BackSpace
this.logger.log(2,'BS - BackSpace');if(this.mode==='MODE_TEXT'){return;}this.writeScreen.backSpace();if(this.writeScreen===this.displayedMemory){this.outputDataUpdate();}}ccAOF(){// Reserved (formerly Alarm Off)
}ccAON(){// Reserved (formerly Alarm On)
}ccDER(){// Delete to End of Row
this.logger.log(2,'DER- Delete to End of Row');this.writeScreen.clearToEndOfRow();this.outputDataUpdate();}ccRU(nrRows){// Roll-Up Captions-2,3,or 4 Rows
this.logger.log(2,'RU('+nrRows+') - Roll Up');this.writeScreen=this.displayedMemory;this.setMode('MODE_ROLL-UP');this.writeScreen.setRollUpRows(nrRows);}ccFON(){// Flash On
this.logger.log(2,'FON - Flash On');this.writeScreen.setPen({flash:true});}ccRDC(){// Resume Direct Captioning (switch mode to PaintOn)
this.logger.log(2,'RDC - Resume Direct Captioning');this.setMode('MODE_PAINT-ON');}ccTR(){// Text Restart in text mode (not supported, however)
this.logger.log(2,'TR');this.setMode('MODE_TEXT');}ccRTD(){// Resume Text Display in Text mode (not supported, however)
this.logger.log(2,'RTD');this.setMode('MODE_TEXT');}ccEDM(){// Erase Displayed Memory
this.logger.log(2,'EDM - Erase Displayed Memory');this.displayedMemory.reset();this.outputDataUpdate(true);}ccCR(){// Carriage Return
this.logger.log(2,'CR - Carriage Return');this.writeScreen.rollUp();this.outputDataUpdate(true);}ccENM(){// Erase Non-Displayed Memory
this.logger.log(2,'ENM - Erase Non-displayed Memory');this.nonDisplayedMemory.reset();}ccEOC(){// End of Caption (Flip Memories)
this.logger.log(2,'EOC - End Of Caption');if(this.mode==='MODE_POP-ON'){const tmp=this.displayedMemory;this.displayedMemory=this.nonDisplayedMemory;this.nonDisplayedMemory=tmp;this.writeScreen=this.nonDisplayedMemory;this.logger.log(1,()=>'DISP: '+this.displayedMemory.getDisplayText());}this.outputDataUpdate(true);}ccTO(nrCols){// Tab Offset 1,2, or 3 columns
this.logger.log(2,'TO('+nrCols+') - Tab Offset');this.writeScreen.moveCursor(nrCols);}ccMIDROW(secondByte){// Parse MIDROW command
const styles={flash:false};styles.underline=secondByte%2===1;styles.italics=secondByte>=0x2e;if(!styles.italics){const colorIndex=Math.floor(secondByte/2)-0x10;const colors=['white','green','blue','cyan','red','yellow','magenta'];styles.foreground=colors[colorIndex];}else {styles.foreground='white';}this.logger.log(2,'MIDROW: '+JSON.stringify(styles));this.writeScreen.setPen(styles);}outputDataUpdate(dispatch=false){const time=this.logger.time;if(time===null){return;}if(this.outputFilter){if(this.cueStartTime===null&&!this.displayedMemory.isEmpty()){// Start of a new cue
this.cueStartTime=time;}else {if(!this.displayedMemory.equals(this.lastOutputScreen)){this.outputFilter.newCue(this.cueStartTime,time,this.lastOutputScreen);if(dispatch&&this.outputFilter.dispatchCue){this.outputFilter.dispatchCue();}this.cueStartTime=this.displayedMemory.isEmpty()?null:time;}}this.lastOutputScreen.copy(this.displayedMemory);}}cueSplitAtTime(t){if(this.outputFilter){if(!this.displayedMemory.isEmpty()){if(this.outputFilter.newCue){this.outputFilter.newCue(this.cueStartTime,t,this.displayedMemory);}this.cueStartTime=t;}}}}// Will be 1 or 2 when parsing captions
class Cea608Parser{constructor(field,out1,out2){this.channels=void 0;this.currentChannel=0;this.cmdHistory=createCmdHistory();this.logger=void 0;const logger=this.logger=new CaptionsLogger();this.channels=[null,new Cea608Channel(field,out1,logger),new Cea608Channel(field+1,out2,logger)];}getHandler(channel){return this.channels[channel].getHandler();}setHandler(channel,newHandler){this.channels[channel].setHandler(newHandler);}/**
* Add data for time t in forms of list of bytes (unsigned ints). The bytes are treated as pairs.
*/addData(time,byteList){this.logger.time=time;for(let i=0;i'['+numArrayToHexArray([byteList[i],byteList[i+1]])+'] -> ('+numArrayToHexArray([a,b])+')');}const cmdHistory=this.cmdHistory;const isControlCode=a>=0x10&&a<=0x1f;if(isControlCode){// Skip redundant control codes
if(hasCmdRepeated(a,b,cmdHistory)){setLastCmd(null,null,cmdHistory);this.logger.log(3,()=>'Repeated command ('+numArrayToHexArray([a,b])+') is dropped');continue;}setLastCmd(a,b,this.cmdHistory);cmdFound=this.parseCmd(a,b);if(!cmdFound){cmdFound=this.parseMidrow(a,b);}if(!cmdFound){cmdFound=this.parsePAC(a,b);}if(!cmdFound){cmdFound=this.parseBackgroundAttributes(a,b);}}else {setLastCmd(null,null,cmdHistory);}if(!cmdFound){charsFound=this.parseChars(a,b);if(charsFound){const currChNr=this.currentChannel;if(currChNr&&currChNr>0){const channel=this.channels[currChNr];channel.insertChars(charsFound);}else {this.logger.log(2,'No channel found yet. TEXT-MODE?');}}}if(!cmdFound&&!charsFound){this.logger.log(2,()=>"Couldn't parse cleaned data "+numArrayToHexArray([a,b])+' orig: '+numArrayToHexArray([byteList[i],byteList[i+1]]));}}}/**
* Parse Command.
* @returns True if a command was found
*/parseCmd(a,b){const cond1=(a===0x14||a===0x1c||a===0x15||a===0x1d)&&b>=0x20&&b<=0x2f;const cond2=(a===0x17||a===0x1f)&&b>=0x21&&b<=0x23;if(!(cond1||cond2)){return false;}const chNr=a===0x14||a===0x15||a===0x17?1:2;const channel=this.channels[chNr];if(a===0x14||a===0x15||a===0x1c||a===0x1d){if(b===0x20){channel.ccRCL();}else if(b===0x21){channel.ccBS();}else if(b===0x22){channel.ccAOF();}else if(b===0x23){channel.ccAON();}else if(b===0x24){channel.ccDER();}else if(b===0x25){channel.ccRU(2);}else if(b===0x26){channel.ccRU(3);}else if(b===0x27){channel.ccRU(4);}else if(b===0x28){channel.ccFON();}else if(b===0x29){channel.ccRDC();}else if(b===0x2a){channel.ccTR();}else if(b===0x2b){channel.ccRTD();}else if(b===0x2c){channel.ccEDM();}else if(b===0x2d){channel.ccCR();}else if(b===0x2e){channel.ccENM();}else if(b===0x2f){channel.ccEOC();}}else {// a == 0x17 || a == 0x1F
channel.ccTO(b-0x20);}this.currentChannel=chNr;return true;}/**
* Parse midrow styling command
*/parseMidrow(a,b){let chNr=0;if((a===0x11||a===0x19)&&b>=0x20&&b<=0x2f){if(a===0x11){chNr=1;}else {chNr=2;}if(chNr!==this.currentChannel){this.logger.log(0,'Mismatch channel in midrow parsing');return false;}const channel=this.channels[chNr];if(!channel){return false;}channel.ccMIDROW(b);this.logger.log(3,()=>'MIDROW ('+numArrayToHexArray([a,b])+')');return true;}return false;}/**
* Parse Preable Access Codes (Table 53).
* @returns {Boolean} Tells if PAC found
*/parsePAC(a,b){let row;const case1=(a>=0x11&&a<=0x17||a>=0x19&&a<=0x1f)&&b>=0x40&&b<=0x7f;const case2=(a===0x10||a===0x18)&&b>=0x40&&b<=0x5f;if(!(case1||case2)){return false;}const chNr=a<=0x17?1:2;if(b>=0x40&&b<=0x5f){row=chNr===1?rowsLowCh1[a]:rowsLowCh2[a];}else {// 0x60 <= b <= 0x7F
row=chNr===1?rowsHighCh1[a]:rowsHighCh2[a];}const channel=this.channels[chNr];if(!channel){return false;}channel.setPAC(this.interpretPAC(row,b));this.currentChannel=chNr;return true;}/**
* Interpret the second byte of the pac, and return the information.
* @returns pacData with style parameters
*/interpretPAC(row,byte){let pacIndex;const pacData={color:null,italics:false,indent:null,underline:false,row:row};if(byte>0x5f){pacIndex=byte-0x60;}else {pacIndex=byte-0x40;}pacData.underline=(pacIndex&1)===1;if(pacIndex<=0xd){pacData.color=['white','green','blue','cyan','red','yellow','magenta','white'][Math.floor(pacIndex/2)];}else if(pacIndex<=0xf){pacData.italics=true;pacData.color='white';}else {pacData.indent=Math.floor((pacIndex-0x10)/2)*4;}return pacData;// Note that row has zero offset. The spec uses 1.
}/**
* Parse characters.
* @returns An array with 1 to 2 codes corresponding to chars, if found. null otherwise.
*/parseChars(a,b){let channelNr;let charCodes=null;let charCode1=null;if(a>=0x19){channelNr=2;charCode1=a-8;}else {channelNr=1;charCode1=a;}if(charCode1>=0x11&&charCode1<=0x13){// Special character
let oneCode;if(charCode1===0x11){oneCode=b+0x50;}else if(charCode1===0x12){oneCode=b+0x70;}else {oneCode=b+0x90;}this.logger.log(2,()=>"Special char '"+getCharForByte(oneCode)+"' in channel "+channelNr);charCodes=[oneCode];}else if(a>=0x20&&a<=0x7f){charCodes=b===0?[a]:[a,b];}if(charCodes){this.logger.log(3,()=>'Char codes = '+numArrayToHexArray(charCodes).join(','));}return charCodes;}/**
* Parse extended background attributes as well as new foreground color black.
* @returns True if background attributes are found
*/parseBackgroundAttributes(a,b){const case1=(a===0x10||a===0x18)&&b>=0x20&&b<=0x2f;const case2=(a===0x17||a===0x1f)&&b>=0x2d&&b<=0x2f;if(!(case1||case2)){return false;}let index;const bkgData={};if(a===0x10||a===0x18){index=Math.floor((b-0x20)/2);bkgData.background=backgroundColors[index];if(b%2===1){bkgData.background=bkgData.background+'_semi';}}else if(b===0x2d){bkgData.background='transparent';}else {bkgData.foreground='black';if(b===0x2f){bkgData.underline=true;}}const chNr=a<=0x17?1:2;const channel=this.channels[chNr];channel.setBkgData(bkgData);return true;}/**
* Reset state of parser and its channels.
*/reset(){for(let i=0;istartTime){this.startTime=startTime;}this.endTime=endTime;this.screen=screen;this.timelineController.createCaptionsTrack(this.trackName);}reset(){this.cueRanges=[];this.startTime=null;}}/**
* Copyright 2013 vtt.js Contributors
*
* Licensed under the Apache License, Version 2.0 (the 'License');
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an 'AS IS' BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/var VTTCue=function(){if(optionalSelf!=null&&optionalSelf.VTTCue){return self.VTTCue;}const AllowedDirections=['','lr','rl'];const AllowedAlignments=['start','middle','end','left','right'];function isAllowedValue(allowed,value){if(typeof value!=='string'){return false;}// necessary for assuring the generic conforms to the Array interface
if(!Array.isArray(allowed)){return false;}// reset the type so that the next narrowing works well
const lcValue=value.toLowerCase();// use the allow list to narrow the type to a specific subset of strings
if(~allowed.indexOf(lcValue)){return lcValue;}return false;}function findDirectionSetting(value){return isAllowedValue(AllowedDirections,value);}function findAlignSetting(value){return isAllowedValue(AllowedAlignments,value);}function extend(obj,...rest){let i=1;for(;i100){throw new Error('Position must be between 0 and 100.');}_position=value;this.hasBeenReset=true;}}));Object.defineProperty(cue,'positionAlign',extend({},baseObj,{get:function(){return _positionAlign;},set:function(value){const setting=findAlignSetting(value);if(!setting){throw new SyntaxError('An invalid or illegal string was specified.');}_positionAlign=setting;this.hasBeenReset=true;}}));Object.defineProperty(cue,'size',extend({},baseObj,{get:function(){return _size;},set:function(value){if(value<0||value>100){throw new Error('Size must be between 0 and 100.');}_size=value;this.hasBeenReset=true;}}));Object.defineProperty(cue,'align',extend({},baseObj,{get:function(){return _align;},set:function(value){const setting=findAlignSetting(value);if(!setting){throw new SyntaxError('An invalid or illegal string was specified.');}_align=setting;this.hasBeenReset=true;}}));/**
* Other