Internal change
PiperOrigin-RevId: 338056838
Change-Id: I533a514d8a470b786b9c990cee19794485cb4e5d
diff --git a/BasicUsageEnvironment/BasicHashTable.cpp b/BasicUsageEnvironment/BasicHashTable.cpp
new file mode 100644
index 0000000..fb5617a
--- /dev/null
+++ b/BasicUsageEnvironment/BasicHashTable.cpp
@@ -0,0 +1,277 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Basic Hash Table implementation
+// Implementation
+
+#include "BasicHashTable.hh"
+#include "strDup.hh"
+
+#if defined(__WIN32__) || defined(_WIN32)
+#else
+#include <stddef.h>
+#endif
+#include <string.h>
+#include <stdio.h>
+
+// When there are this many entries per bucket, on average, rebuild
+// the table to increase the number of buckets
+#define REBUILD_MULTIPLIER 3
+
+BasicHashTable::BasicHashTable(int keyType)
+ : fBuckets(fStaticBuckets), fNumBuckets(SMALL_HASH_TABLE_SIZE),
+ fNumEntries(0), fRebuildSize(SMALL_HASH_TABLE_SIZE*REBUILD_MULTIPLIER),
+ fDownShift(28), fMask(0x3), fKeyType(keyType) {
+ for (unsigned i = 0; i < SMALL_HASH_TABLE_SIZE; ++i) {
+ fStaticBuckets[i] = NULL;
+ }
+}
+
+BasicHashTable::~BasicHashTable() {
+ // Free all the entries in the table:
+ for (unsigned i = 0; i < fNumBuckets; ++i) {
+ TableEntry* entry;
+ while ((entry = fBuckets[i]) != NULL) {
+ deleteEntry(i, entry);
+ }
+ }
+
+ // Also free the bucket array, if it was dynamically allocated:
+ if (fBuckets != fStaticBuckets) delete[] fBuckets;
+}
+
+void* BasicHashTable::Add(char const* key, void* value) {
+ void* oldValue;
+ unsigned index;
+ TableEntry* entry = lookupKey(key, index);
+ if (entry != NULL) {
+ // There's already an item with this key
+ oldValue = entry->value;
+ } else {
+ // There's no existing entry; create a new one:
+ entry = insertNewEntry(index, key);
+ oldValue = NULL;
+ }
+ entry->value = value;
+
+ // If the table has become too large, rebuild it with more buckets:
+ if (fNumEntries >= fRebuildSize) rebuild();
+
+ return oldValue;
+}
+
+Boolean BasicHashTable::Remove(char const* key) {
+ unsigned index;
+ TableEntry* entry = lookupKey(key, index);
+ if (entry == NULL) return False; // no such entry
+
+ deleteEntry(index, entry);
+
+ return True;
+}
+
+void* BasicHashTable::Lookup(char const* key) const {
+ unsigned index;
+ TableEntry* entry = lookupKey(key, index);
+ if (entry == NULL) return NULL; // no such entry
+
+ return entry->value;
+}
+
+unsigned BasicHashTable::numEntries() const {
+ return fNumEntries;
+}
+
+BasicHashTable::Iterator::Iterator(BasicHashTable const& table)
+ : fTable(table), fNextIndex(0), fNextEntry(NULL) {
+}
+
+void* BasicHashTable::Iterator::next(char const*& key) {
+ while (fNextEntry == NULL) {
+ if (fNextIndex >= fTable.fNumBuckets) return NULL;
+
+ fNextEntry = fTable.fBuckets[fNextIndex++];
+ }
+
+ BasicHashTable::TableEntry* entry = fNextEntry;
+ fNextEntry = entry->fNext;
+
+ key = entry->key;
+ return entry->value;
+}
+
+////////// Implementation of HashTable creation functions //////////
+
+HashTable* HashTable::create(int keyType) {
+ return new BasicHashTable(keyType);
+}
+
+HashTable::Iterator* HashTable::Iterator::create(HashTable const& hashTable) {
+ // "hashTable" is assumed to be a BasicHashTable
+ return new BasicHashTable::Iterator((BasicHashTable const&)hashTable);
+}
+
+////////// Implementation of internal member functions //////////
+
+BasicHashTable::TableEntry* BasicHashTable
+::lookupKey(char const* key, unsigned& index) const {
+ TableEntry* entry;
+ index = hashIndexFromKey(key);
+
+ for (entry = fBuckets[index]; entry != NULL; entry = entry->fNext) {
+ if (keyMatches(key, entry->key)) break;
+ }
+
+ return entry;
+}
+
+Boolean BasicHashTable
+::keyMatches(char const* key1, char const* key2) const {
+ // The way we check the keys for a match depends upon their type:
+ if (fKeyType == STRING_HASH_KEYS) {
+ return (strcmp(key1, key2) == 0);
+ } else if (fKeyType == ONE_WORD_HASH_KEYS) {
+ return (key1 == key2);
+ } else {
+ unsigned* k1 = (unsigned*)key1;
+ unsigned* k2 = (unsigned*)key2;
+
+ for (int i = 0; i < fKeyType; ++i) {
+ if (k1[i] != k2[i]) return False; // keys differ
+ }
+ return True;
+ }
+}
+
+BasicHashTable::TableEntry* BasicHashTable
+::insertNewEntry(unsigned index, char const* key) {
+ TableEntry* entry = new TableEntry();
+ entry->fNext = fBuckets[index];
+ fBuckets[index] = entry;
+
+ ++fNumEntries;
+ assignKey(entry, key);
+
+ return entry;
+}
+
+void BasicHashTable::assignKey(TableEntry* entry, char const* key) {
+ // The way we assign the key depends upon its type:
+ if (fKeyType == STRING_HASH_KEYS) {
+ entry->key = strDup(key);
+ } else if (fKeyType == ONE_WORD_HASH_KEYS) {
+ entry->key = key;
+ } else if (fKeyType > 0) {
+ unsigned* keyFrom = (unsigned*)key;
+ unsigned* keyTo = new unsigned[fKeyType];
+ for (int i = 0; i < fKeyType; ++i) keyTo[i] = keyFrom[i];
+
+ entry->key = (char const*)keyTo;
+ }
+}
+
+void BasicHashTable::deleteEntry(unsigned index, TableEntry* entry) {
+ TableEntry** ep = &fBuckets[index];
+
+ Boolean foundIt = False;
+ while (*ep != NULL) {
+ if (*ep == entry) {
+ foundIt = True;
+ *ep = entry->fNext;
+ break;
+ }
+ ep = &((*ep)->fNext);
+ }
+
+ if (!foundIt) { // shouldn't happen
+#ifdef DEBUG
+ fprintf(stderr, "BasicHashTable[%p]::deleteEntry(%d,%p): internal error - not found (first entry %p", this, index, entry, fBuckets[index]);
+ if (fBuckets[index] != NULL) fprintf(stderr, ", next entry %p", fBuckets[index]->fNext);
+ fprintf(stderr, ")\n");
+#endif
+ }
+
+ --fNumEntries;
+ deleteKey(entry);
+ delete entry;
+}
+
+void BasicHashTable::deleteKey(TableEntry* entry) {
+ // The way we delete the key depends upon its type:
+ if (fKeyType == ONE_WORD_HASH_KEYS) {
+ entry->key = NULL;
+ } else {
+ delete[] (char*)entry->key;
+ entry->key = NULL;
+ }
+}
+
+void BasicHashTable::rebuild() {
+ // Remember the existing table size:
+ unsigned oldSize = fNumBuckets;
+ TableEntry** oldBuckets = fBuckets;
+
+ // Create the new sized table:
+ fNumBuckets *= 4;
+ fBuckets = new TableEntry*[fNumBuckets];
+ for (unsigned i = 0; i < fNumBuckets; ++i) {
+ fBuckets[i] = NULL;
+ }
+ fRebuildSize *= 4;
+ fDownShift -= 2;
+ fMask = (fMask<<2)|0x3;
+
+ // Rehash the existing entries into the new table:
+ for (TableEntry** oldChainPtr = oldBuckets; oldSize > 0;
+ --oldSize, ++oldChainPtr) {
+ for (TableEntry* hPtr = *oldChainPtr; hPtr != NULL;
+ hPtr = *oldChainPtr) {
+ *oldChainPtr = hPtr->fNext;
+
+ unsigned index = hashIndexFromKey(hPtr->key);
+
+ hPtr->fNext = fBuckets[index];
+ fBuckets[index] = hPtr;
+ }
+ }
+
+ // Free the old bucket array, if it was dynamically allocated:
+ if (oldBuckets != fStaticBuckets) delete[] oldBuckets;
+}
+
+unsigned BasicHashTable::hashIndexFromKey(char const* key) const {
+ unsigned result = 0;
+
+ if (fKeyType == STRING_HASH_KEYS) {
+ while (1) {
+ char c = *key++;
+ if (c == 0) break;
+ result += (result<<3) + (unsigned)c;
+ }
+ result &= fMask;
+ } else if (fKeyType == ONE_WORD_HASH_KEYS) {
+ result = randomIndex((uintptr_t)key);
+ } else {
+ unsigned* k = (unsigned*)key;
+ uintptr_t sum = 0;
+ for (int i = 0; i < fKeyType; ++i) {
+ sum += k[i];
+ }
+ result = randomIndex(sum);
+ }
+
+ return result;
+}
diff --git a/BasicUsageEnvironment/BasicTaskScheduler.cpp b/BasicUsageEnvironment/BasicTaskScheduler.cpp
new file mode 100644
index 0000000..3634b8b
--- /dev/null
+++ b/BasicUsageEnvironment/BasicTaskScheduler.cpp
@@ -0,0 +1,256 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Basic Usage Environment: for a simple, non-scripted, console application
+// Implementation
+
+
+#include "BasicUsageEnvironment.hh"
+#include "HandlerSet.hh"
+#include <stdio.h>
+#if defined(_QNX4)
+#include <sys/select.h>
+#include <unix.h>
+#endif
+
+////////// BasicTaskScheduler //////////
+
+BasicTaskScheduler* BasicTaskScheduler::createNew(unsigned maxSchedulerGranularity) {
+ return new BasicTaskScheduler(maxSchedulerGranularity);
+}
+
+BasicTaskScheduler::BasicTaskScheduler(unsigned maxSchedulerGranularity)
+ : fMaxSchedulerGranularity(maxSchedulerGranularity), fMaxNumSockets(0)
+#if defined(__WIN32__) || defined(_WIN32)
+ , fDummySocketNum(-1)
+#endif
+{
+ FD_ZERO(&fReadSet);
+ FD_ZERO(&fWriteSet);
+ FD_ZERO(&fExceptionSet);
+
+ if (maxSchedulerGranularity > 0) schedulerTickTask(); // ensures that we handle events frequently
+}
+
+BasicTaskScheduler::~BasicTaskScheduler() {
+#if defined(__WIN32__) || defined(_WIN32)
+ if (fDummySocketNum >= 0) closeSocket(fDummySocketNum);
+#endif
+}
+
+void BasicTaskScheduler::schedulerTickTask(void* clientData) {
+ ((BasicTaskScheduler*)clientData)->schedulerTickTask();
+}
+
+void BasicTaskScheduler::schedulerTickTask() {
+ scheduleDelayedTask(fMaxSchedulerGranularity, schedulerTickTask, this);
+}
+
+#ifndef MILLION
+#define MILLION 1000000
+#endif
+
+void BasicTaskScheduler::SingleStep(unsigned maxDelayTime) {
+ fd_set readSet = fReadSet; // make a copy for this select() call
+ fd_set writeSet = fWriteSet; // ditto
+ fd_set exceptionSet = fExceptionSet; // ditto
+
+ DelayInterval const& timeToDelay = fDelayQueue.timeToNextAlarm();
+ struct timeval tv_timeToDelay;
+ tv_timeToDelay.tv_sec = timeToDelay.seconds();
+ tv_timeToDelay.tv_usec = timeToDelay.useconds();
+ // Very large "tv_sec" values cause select() to fail.
+ // Don't make it any larger than 1 million seconds (11.5 days)
+ const long MAX_TV_SEC = MILLION;
+ if (tv_timeToDelay.tv_sec > MAX_TV_SEC) {
+ tv_timeToDelay.tv_sec = MAX_TV_SEC;
+ }
+ // Also check our "maxDelayTime" parameter (if it's > 0):
+ if (maxDelayTime > 0 &&
+ (tv_timeToDelay.tv_sec > (long)maxDelayTime/MILLION ||
+ (tv_timeToDelay.tv_sec == (long)maxDelayTime/MILLION &&
+ tv_timeToDelay.tv_usec > (long)maxDelayTime%MILLION))) {
+ tv_timeToDelay.tv_sec = maxDelayTime/MILLION;
+ tv_timeToDelay.tv_usec = maxDelayTime%MILLION;
+ }
+
+ int selectResult = select(fMaxNumSockets, &readSet, &writeSet, &exceptionSet, &tv_timeToDelay);
+ if (selectResult < 0) {
+#if defined(__WIN32__) || defined(_WIN32)
+ int err = WSAGetLastError();
+ // For some unknown reason, select() in Windoze sometimes fails with WSAEINVAL if
+ // it was called with no entries set in "readSet". If this happens, ignore it:
+ if (err == WSAEINVAL && readSet.fd_count == 0) {
+ err = EINTR;
+ // To stop this from happening again, create a dummy socket:
+ if (fDummySocketNum >= 0) closeSocket(fDummySocketNum);
+ fDummySocketNum = socket(AF_INET, SOCK_DGRAM, 0);
+ FD_SET((unsigned)fDummySocketNum, &fReadSet);
+ }
+ if (err != EINTR) {
+#else
+ if (errno != EINTR && errno != EAGAIN) {
+#endif
+ // Unexpected error - treat this as fatal:
+#if !defined(_WIN32_WCE)
+ perror("BasicTaskScheduler::SingleStep(): select() fails");
+ // Because this failure is often "Bad file descriptor" - which is caused by an invalid socket number (i.e., a socket number
+ // that had already been closed) being used in "select()" - we print out the sockets that were being used in "select()",
+ // to assist in debugging:
+ fprintf(stderr, "socket numbers used in the select() call:");
+ for (int i = 0; i < 10000; ++i) {
+ if (FD_ISSET(i, &fReadSet) || FD_ISSET(i, &fWriteSet) || FD_ISSET(i, &fExceptionSet)) {
+ fprintf(stderr, " %d(", i);
+ if (FD_ISSET(i, &fReadSet)) fprintf(stderr, "r");
+ if (FD_ISSET(i, &fWriteSet)) fprintf(stderr, "w");
+ if (FD_ISSET(i, &fExceptionSet)) fprintf(stderr, "e");
+ fprintf(stderr, ")");
+ }
+ }
+ fprintf(stderr, "\n");
+#endif
+ internalError();
+ }
+ }
+
+ // Call the handler function for one readable socket:
+ HandlerIterator iter(*fHandlers);
+ HandlerDescriptor* handler;
+ // To ensure forward progress through the handlers, begin past the last
+ // socket number that we handled:
+ if (fLastHandledSocketNum >= 0) {
+ while ((handler = iter.next()) != NULL) {
+ if (handler->socketNum == fLastHandledSocketNum) break;
+ }
+ if (handler == NULL) {
+ fLastHandledSocketNum = -1;
+ iter.reset(); // start from the beginning instead
+ }
+ }
+ while ((handler = iter.next()) != NULL) {
+ int sock = handler->socketNum; // alias
+ int resultConditionSet = 0;
+ if (FD_ISSET(sock, &readSet) && FD_ISSET(sock, &fReadSet)/*sanity check*/) resultConditionSet |= SOCKET_READABLE;
+ if (FD_ISSET(sock, &writeSet) && FD_ISSET(sock, &fWriteSet)/*sanity check*/) resultConditionSet |= SOCKET_WRITABLE;
+ if (FD_ISSET(sock, &exceptionSet) && FD_ISSET(sock, &fExceptionSet)/*sanity check*/) resultConditionSet |= SOCKET_EXCEPTION;
+ if ((resultConditionSet&handler->conditionSet) != 0 && handler->handlerProc != NULL) {
+ fLastHandledSocketNum = sock;
+ // Note: we set "fLastHandledSocketNum" before calling the handler,
+ // in case the handler calls "doEventLoop()" reentrantly.
+ (*handler->handlerProc)(handler->clientData, resultConditionSet);
+ break;
+ }
+ }
+ if (handler == NULL && fLastHandledSocketNum >= 0) {
+ // We didn't call a handler, but we didn't get to check all of them,
+ // so try again from the beginning:
+ iter.reset();
+ while ((handler = iter.next()) != NULL) {
+ int sock = handler->socketNum; // alias
+ int resultConditionSet = 0;
+ if (FD_ISSET(sock, &readSet) && FD_ISSET(sock, &fReadSet)/*sanity check*/) resultConditionSet |= SOCKET_READABLE;
+ if (FD_ISSET(sock, &writeSet) && FD_ISSET(sock, &fWriteSet)/*sanity check*/) resultConditionSet |= SOCKET_WRITABLE;
+ if (FD_ISSET(sock, &exceptionSet) && FD_ISSET(sock, &fExceptionSet)/*sanity check*/) resultConditionSet |= SOCKET_EXCEPTION;
+ if ((resultConditionSet&handler->conditionSet) != 0 && handler->handlerProc != NULL) {
+ fLastHandledSocketNum = sock;
+ // Note: we set "fLastHandledSocketNum" before calling the handler,
+ // in case the handler calls "doEventLoop()" reentrantly.
+ (*handler->handlerProc)(handler->clientData, resultConditionSet);
+ break;
+ }
+ }
+ if (handler == NULL) fLastHandledSocketNum = -1;//because we didn't call a handler
+ }
+
+ // Also handle any newly-triggered event (Note that we do this *after* calling a socket handler,
+ // in case the triggered event handler modifies The set of readable sockets.)
+ if (fTriggersAwaitingHandling != 0) {
+ if (fTriggersAwaitingHandling == fLastUsedTriggerMask) {
+ // Common-case optimization for a single event trigger:
+ fTriggersAwaitingHandling &=~ fLastUsedTriggerMask;
+ if (fTriggeredEventHandlers[fLastUsedTriggerNum] != NULL) {
+ (*fTriggeredEventHandlers[fLastUsedTriggerNum])(fTriggeredEventClientDatas[fLastUsedTriggerNum]);
+ }
+ } else {
+ // Look for an event trigger that needs handling (making sure that we make forward progress through all possible triggers):
+ unsigned i = fLastUsedTriggerNum;
+ EventTriggerId mask = fLastUsedTriggerMask;
+
+ do {
+ i = (i+1)%MAX_NUM_EVENT_TRIGGERS;
+ mask >>= 1;
+ if (mask == 0) mask = 0x80000000;
+
+ if ((fTriggersAwaitingHandling&mask) != 0) {
+ fTriggersAwaitingHandling &=~ mask;
+ if (fTriggeredEventHandlers[i] != NULL) {
+ (*fTriggeredEventHandlers[i])(fTriggeredEventClientDatas[i]);
+ }
+
+ fLastUsedTriggerMask = mask;
+ fLastUsedTriggerNum = i;
+ break;
+ }
+ } while (i != fLastUsedTriggerNum);
+ }
+ }
+
+ // Also handle any delayed event that may have come due.
+ fDelayQueue.handleAlarm();
+}
+
+void BasicTaskScheduler
+ ::setBackgroundHandling(int socketNum, int conditionSet, BackgroundHandlerProc* handlerProc, void* clientData) {
+ if (socketNum < 0) return;
+#if !defined(__WIN32__) && !defined(_WIN32) && defined(FD_SETSIZE)
+ if (socketNum >= (int)(FD_SETSIZE)) return;
+#endif
+ FD_CLR((unsigned)socketNum, &fReadSet);
+ FD_CLR((unsigned)socketNum, &fWriteSet);
+ FD_CLR((unsigned)socketNum, &fExceptionSet);
+ if (conditionSet == 0) {
+ fHandlers->clearHandler(socketNum);
+ if (socketNum+1 == fMaxNumSockets) {
+ --fMaxNumSockets;
+ }
+ } else {
+ fHandlers->assignHandler(socketNum, conditionSet, handlerProc, clientData);
+ if (socketNum+1 > fMaxNumSockets) {
+ fMaxNumSockets = socketNum+1;
+ }
+ if (conditionSet&SOCKET_READABLE) FD_SET((unsigned)socketNum, &fReadSet);
+ if (conditionSet&SOCKET_WRITABLE) FD_SET((unsigned)socketNum, &fWriteSet);
+ if (conditionSet&SOCKET_EXCEPTION) FD_SET((unsigned)socketNum, &fExceptionSet);
+ }
+}
+
+void BasicTaskScheduler::moveSocketHandling(int oldSocketNum, int newSocketNum) {
+ if (oldSocketNum < 0 || newSocketNum < 0) return; // sanity check
+#if !defined(__WIN32__) && !defined(_WIN32) && defined(FD_SETSIZE)
+ if (oldSocketNum >= (int)(FD_SETSIZE) || newSocketNum >= (int)(FD_SETSIZE)) return; // sanity check
+#endif
+ if (FD_ISSET(oldSocketNum, &fReadSet)) {FD_CLR((unsigned)oldSocketNum, &fReadSet); FD_SET((unsigned)newSocketNum, &fReadSet);}
+ if (FD_ISSET(oldSocketNum, &fWriteSet)) {FD_CLR((unsigned)oldSocketNum, &fWriteSet); FD_SET((unsigned)newSocketNum, &fWriteSet);}
+ if (FD_ISSET(oldSocketNum, &fExceptionSet)) {FD_CLR((unsigned)oldSocketNum, &fExceptionSet); FD_SET((unsigned)newSocketNum, &fExceptionSet);}
+ fHandlers->moveHandler(oldSocketNum, newSocketNum);
+
+ if (oldSocketNum+1 == fMaxNumSockets) {
+ --fMaxNumSockets;
+ }
+ if (newSocketNum+1 > fMaxNumSockets) {
+ fMaxNumSockets = newSocketNum+1;
+ }
+}
diff --git a/BasicUsageEnvironment/BasicTaskScheduler0.cpp b/BasicUsageEnvironment/BasicTaskScheduler0.cpp
new file mode 100644
index 0000000..9cd352e
--- /dev/null
+++ b/BasicUsageEnvironment/BasicTaskScheduler0.cpp
@@ -0,0 +1,235 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Basic Usage Environment: for a simple, non-scripted, console application
+// Implementation
+
+#include "BasicUsageEnvironment0.hh"
+#include "HandlerSet.hh"
+
+////////// A subclass of DelayQueueEntry,
+////////// used to implement BasicTaskScheduler0::scheduleDelayedTask()
+
+class AlarmHandler: public DelayQueueEntry {
+public:
+ AlarmHandler(TaskFunc* proc, void* clientData, DelayInterval timeToDelay)
+ : DelayQueueEntry(timeToDelay), fProc(proc), fClientData(clientData) {
+ }
+
+private: // redefined virtual functions
+ virtual void handleTimeout() {
+ (*fProc)(fClientData);
+ DelayQueueEntry::handleTimeout();
+ }
+
+private:
+ TaskFunc* fProc;
+ void* fClientData;
+};
+
+
+////////// BasicTaskScheduler0 //////////
+
+BasicTaskScheduler0::BasicTaskScheduler0()
+ : fLastHandledSocketNum(-1), fTriggersAwaitingHandling(0), fLastUsedTriggerMask(1), fLastUsedTriggerNum(MAX_NUM_EVENT_TRIGGERS-1) {
+ fHandlers = new HandlerSet;
+ for (unsigned i = 0; i < MAX_NUM_EVENT_TRIGGERS; ++i) {
+ fTriggeredEventHandlers[i] = NULL;
+ fTriggeredEventClientDatas[i] = NULL;
+ }
+}
+
+BasicTaskScheduler0::~BasicTaskScheduler0() {
+ delete fHandlers;
+}
+
+TaskToken BasicTaskScheduler0::scheduleDelayedTask(int64_t microseconds,
+ TaskFunc* proc,
+ void* clientData) {
+ if (microseconds < 0) microseconds = 0;
+ DelayInterval timeToDelay((long)(microseconds/1000000), (long)(microseconds%1000000));
+ AlarmHandler* alarmHandler = new AlarmHandler(proc, clientData, timeToDelay);
+ fDelayQueue.addEntry(alarmHandler);
+
+ return (void*)(alarmHandler->token());
+}
+
+void BasicTaskScheduler0::unscheduleDelayedTask(TaskToken& prevTask) {
+ DelayQueueEntry* alarmHandler = fDelayQueue.removeEntry((intptr_t)prevTask);
+ prevTask = NULL;
+ delete alarmHandler;
+}
+
+void BasicTaskScheduler0::doEventLoop(char volatile* watchVariable) {
+ // Repeatedly loop, handling readble sockets and timed events:
+ while (1) {
+ if (watchVariable != NULL && *watchVariable != 0) break;
+ SingleStep();
+ }
+}
+
+EventTriggerId BasicTaskScheduler0::createEventTrigger(TaskFunc* eventHandlerProc) {
+ unsigned i = fLastUsedTriggerNum;
+ EventTriggerId mask = fLastUsedTriggerMask;
+
+ do {
+ i = (i+1)%MAX_NUM_EVENT_TRIGGERS;
+ mask >>= 1;
+ if (mask == 0) mask = 0x80000000;
+
+ if (fTriggeredEventHandlers[i] == NULL) {
+ // This trigger number is free; use it:
+ fTriggeredEventHandlers[i] = eventHandlerProc;
+ fTriggeredEventClientDatas[i] = NULL; // sanity
+
+ fLastUsedTriggerMask = mask;
+ fLastUsedTriggerNum = i;
+
+ return mask;
+ }
+ } while (i != fLastUsedTriggerNum);
+
+ // All available event triggers are allocated; return 0 instead:
+ return 0;
+}
+
+void BasicTaskScheduler0::deleteEventTrigger(EventTriggerId eventTriggerId) {
+ fTriggersAwaitingHandling &=~ eventTriggerId;
+
+ if (eventTriggerId == fLastUsedTriggerMask) { // common-case optimization:
+ fTriggeredEventHandlers[fLastUsedTriggerNum] = NULL;
+ fTriggeredEventClientDatas[fLastUsedTriggerNum] = NULL;
+ } else {
+ // "eventTriggerId" should have just one bit set.
+ // However, we do the reasonable thing if the user happened to 'or' together two or more "EventTriggerId"s:
+ EventTriggerId mask = 0x80000000;
+ for (unsigned i = 0; i < MAX_NUM_EVENT_TRIGGERS; ++i) {
+ if ((eventTriggerId&mask) != 0) {
+ fTriggeredEventHandlers[i] = NULL;
+ fTriggeredEventClientDatas[i] = NULL;
+ }
+ mask >>= 1;
+ }
+ }
+}
+
+void BasicTaskScheduler0::triggerEvent(EventTriggerId eventTriggerId, void* clientData) {
+ // First, record the "clientData". (Note that we allow "eventTriggerId" to be a combination of bits for multiple events.)
+ EventTriggerId mask = 0x80000000;
+ for (unsigned i = 0; i < MAX_NUM_EVENT_TRIGGERS; ++i) {
+ if ((eventTriggerId&mask) != 0) {
+ fTriggeredEventClientDatas[i] = clientData;
+ }
+ mask >>= 1;
+ }
+
+ // Then, note this event as being ready to be handled.
+ // (Note that because this function (unlike others in the library) can be called from an external thread, we do this last, to
+ // reduce the risk of a race condition.)
+ fTriggersAwaitingHandling |= eventTriggerId;
+}
+
+
+////////// HandlerSet (etc.) implementation //////////
+
+HandlerDescriptor::HandlerDescriptor(HandlerDescriptor* nextHandler)
+ : conditionSet(0), handlerProc(NULL) {
+ // Link this descriptor into a doubly-linked list:
+ if (nextHandler == this) { // initialization
+ fNextHandler = fPrevHandler = this;
+ } else {
+ fNextHandler = nextHandler;
+ fPrevHandler = nextHandler->fPrevHandler;
+ nextHandler->fPrevHandler = this;
+ fPrevHandler->fNextHandler = this;
+ }
+}
+
+HandlerDescriptor::~HandlerDescriptor() {
+ // Unlink this descriptor from a doubly-linked list:
+ fNextHandler->fPrevHandler = fPrevHandler;
+ fPrevHandler->fNextHandler = fNextHandler;
+}
+
+HandlerSet::HandlerSet()
+ : fHandlers(&fHandlers) {
+ fHandlers.socketNum = -1; // shouldn't ever get looked at, but in case...
+}
+
+HandlerSet::~HandlerSet() {
+ // Delete each handler descriptor:
+ while (fHandlers.fNextHandler != &fHandlers) {
+ delete fHandlers.fNextHandler; // changes fHandlers->fNextHandler
+ }
+}
+
+void HandlerSet
+::assignHandler(int socketNum, int conditionSet, TaskScheduler::BackgroundHandlerProc* handlerProc, void* clientData) {
+ // First, see if there's already a handler for this socket:
+ HandlerDescriptor* handler = lookupHandler(socketNum);
+ if (handler == NULL) { // No existing handler, so create a new descr:
+ handler = new HandlerDescriptor(fHandlers.fNextHandler);
+ handler->socketNum = socketNum;
+ }
+
+ handler->conditionSet = conditionSet;
+ handler->handlerProc = handlerProc;
+ handler->clientData = clientData;
+}
+
+void HandlerSet::clearHandler(int socketNum) {
+ HandlerDescriptor* handler = lookupHandler(socketNum);
+ delete handler;
+}
+
+void HandlerSet::moveHandler(int oldSocketNum, int newSocketNum) {
+ HandlerDescriptor* handler = lookupHandler(oldSocketNum);
+ if (handler != NULL) {
+ handler->socketNum = newSocketNum;
+ }
+}
+
+HandlerDescriptor* HandlerSet::lookupHandler(int socketNum) {
+ HandlerDescriptor* handler;
+ HandlerIterator iter(*this);
+ while ((handler = iter.next()) != NULL) {
+ if (handler->socketNum == socketNum) break;
+ }
+ return handler;
+}
+
+HandlerIterator::HandlerIterator(HandlerSet& handlerSet)
+ : fOurSet(handlerSet) {
+ reset();
+}
+
+HandlerIterator::~HandlerIterator() {
+}
+
+void HandlerIterator::reset() {
+ fNextPtr = fOurSet.fHandlers.fNextHandler;
+}
+
+HandlerDescriptor* HandlerIterator::next() {
+ HandlerDescriptor* result = fNextPtr;
+ if (result == &fOurSet.fHandlers) { // no more
+ result = NULL;
+ } else {
+ fNextPtr = fNextPtr->fNextHandler;
+ }
+
+ return result;
+}
diff --git a/BasicUsageEnvironment/BasicUsageEnvironment.cpp b/BasicUsageEnvironment/BasicUsageEnvironment.cpp
new file mode 100644
index 0000000..698ce00
--- /dev/null
+++ b/BasicUsageEnvironment/BasicUsageEnvironment.cpp
@@ -0,0 +1,80 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Basic Usage Environment: for a simple, non-scripted, console application
+// Implementation
+
+#include "BasicUsageEnvironment.hh"
+#include <stdio.h>
+
+////////// BasicUsageEnvironment //////////
+
+#if defined(__WIN32__) || defined(_WIN32)
+extern "C" int initializeWinsockIfNecessary();
+#endif
+
+BasicUsageEnvironment::BasicUsageEnvironment(TaskScheduler& taskScheduler)
+: BasicUsageEnvironment0(taskScheduler) {
+#if defined(__WIN32__) || defined(_WIN32)
+ if (!initializeWinsockIfNecessary()) {
+ setResultErrMsg("Failed to initialize 'winsock': ");
+ reportBackgroundError();
+ internalError();
+ }
+#endif
+}
+
+BasicUsageEnvironment::~BasicUsageEnvironment() {
+}
+
+BasicUsageEnvironment*
+BasicUsageEnvironment::createNew(TaskScheduler& taskScheduler) {
+ return new BasicUsageEnvironment(taskScheduler);
+}
+
+int BasicUsageEnvironment::getErrno() const {
+#if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_WCE)
+ return WSAGetLastError();
+#else
+ return errno;
+#endif
+}
+
+UsageEnvironment& BasicUsageEnvironment::operator<<(char const* str) {
+ if (str == NULL) str = "(NULL)"; // sanity check
+ fprintf(stderr, "%s", str);
+ return *this;
+}
+
+UsageEnvironment& BasicUsageEnvironment::operator<<(int i) {
+ fprintf(stderr, "%d", i);
+ return *this;
+}
+
+UsageEnvironment& BasicUsageEnvironment::operator<<(unsigned u) {
+ fprintf(stderr, "%u", u);
+ return *this;
+}
+
+UsageEnvironment& BasicUsageEnvironment::operator<<(double d) {
+ fprintf(stderr, "%f", d);
+ return *this;
+}
+
+UsageEnvironment& BasicUsageEnvironment::operator<<(void* p) {
+ fprintf(stderr, "%p", p);
+ return *this;
+}
diff --git a/BasicUsageEnvironment/BasicUsageEnvironment0.cpp b/BasicUsageEnvironment/BasicUsageEnvironment0.cpp
new file mode 100644
index 0000000..21163d3
--- /dev/null
+++ b/BasicUsageEnvironment/BasicUsageEnvironment0.cpp
@@ -0,0 +1,108 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Basic Usage Environment: for a simple, non-scripted, console application
+// Implementation
+
+#include "BasicUsageEnvironment0.hh"
+#include <stdio.h>
+#if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_WCE)
+#define snprintf _snprintf
+#endif
+
+
+////////// BasicUsageEnvironment //////////
+
+BasicUsageEnvironment0::BasicUsageEnvironment0(TaskScheduler& taskScheduler)
+ : UsageEnvironment(taskScheduler),
+ fBufferMaxSize(RESULT_MSG_BUFFER_MAX) {
+ reset();
+}
+
+BasicUsageEnvironment0::~BasicUsageEnvironment0() {
+}
+
+void BasicUsageEnvironment0::reset() {
+ fCurBufferSize = 0;
+ fResultMsgBuffer[fCurBufferSize] = '\0';
+}
+
+
+// Implementation of virtual functions:
+
+char const* BasicUsageEnvironment0::getResultMsg() const {
+ return fResultMsgBuffer;
+}
+
+void BasicUsageEnvironment0::setResultMsg(MsgString msg) {
+ reset();
+ appendToResultMsg(msg);
+}
+
+void BasicUsageEnvironment0::setResultMsg(MsgString msg1, MsgString msg2) {
+ setResultMsg(msg1);
+ appendToResultMsg(msg2);
+}
+
+void BasicUsageEnvironment0::setResultMsg(MsgString msg1, MsgString msg2,
+ MsgString msg3) {
+ setResultMsg(msg1, msg2);
+ appendToResultMsg(msg3);
+}
+
+void BasicUsageEnvironment0::setResultErrMsg(MsgString msg, int err) {
+ setResultMsg(msg);
+
+ if (err == 0) err = getErrno();
+#if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_WCE)
+#ifndef _UNICODE
+ char errMsg[RESULT_MSG_BUFFER_MAX] = "\0";
+ if (0 != FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, errMsg, sizeof(errMsg)/sizeof(errMsg[0]), NULL)) {
+ // Remove all trailing '\r', '\n' and '.'
+ for (char* p = errMsg + strlen(errMsg); p != errMsg && (*p == '\r' || *p == '\n' || *p == '.' || *p == '\0'); --p) {
+ *p = '\0';
+ }
+ } else
+ snprintf(errMsg, sizeof(errMsg)/sizeof(errMsg[0]), "error %d", err);
+ appendToResultMsg(errMsg);
+#endif
+#else
+ appendToResultMsg(strerror(err));
+#endif
+}
+
+
+
+
+void BasicUsageEnvironment0::appendToResultMsg(MsgString msg) {
+ char* curPtr = &fResultMsgBuffer[fCurBufferSize];
+ unsigned spaceAvailable = fBufferMaxSize - fCurBufferSize;
+ unsigned msgLength = strlen(msg);
+
+ // Copy only enough of "msg" as will fit:
+ if (msgLength > spaceAvailable-1) {
+ msgLength = spaceAvailable-1;
+ }
+
+ memmove(curPtr, (char*)msg, msgLength);
+ fCurBufferSize += msgLength;
+ fResultMsgBuffer[fCurBufferSize] = '\0';
+}
+
+void BasicUsageEnvironment0::reportBackgroundError() {
+ fputs(getResultMsg(), stderr);
+}
+
diff --git a/BasicUsageEnvironment/COPYING b/BasicUsageEnvironment/COPYING
new file mode 120000
index 0000000..012065c
--- /dev/null
+++ b/BasicUsageEnvironment/COPYING
@@ -0,0 +1 @@
+../COPYING
\ No newline at end of file
diff --git a/BasicUsageEnvironment/COPYING.LESSER b/BasicUsageEnvironment/COPYING.LESSER
new file mode 120000
index 0000000..ce9a3ce
--- /dev/null
+++ b/BasicUsageEnvironment/COPYING.LESSER
@@ -0,0 +1 @@
+../COPYING.LESSER
\ No newline at end of file
diff --git a/BasicUsageEnvironment/DelayQueue.cpp b/BasicUsageEnvironment/DelayQueue.cpp
new file mode 100644
index 0000000..0af3b9d
--- /dev/null
+++ b/BasicUsageEnvironment/DelayQueue.cpp
@@ -0,0 +1,233 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// Help by Carlo Bonamico to get working for Windows
+// Delay queue
+// Implementation
+
+#include "DelayQueue.hh"
+#include "GroupsockHelper.hh"
+
+static const int MILLION = 1000000;
+
+///// Timeval /////
+
+int Timeval::operator>=(const Timeval& arg2) const {
+ return seconds() > arg2.seconds()
+ || (seconds() == arg2.seconds()
+ && useconds() >= arg2.useconds());
+}
+
+void Timeval::operator+=(const DelayInterval& arg2) {
+ secs() += arg2.seconds(); usecs() += arg2.useconds();
+ if (useconds() >= MILLION) {
+ usecs() -= MILLION;
+ ++secs();
+ }
+}
+
+void Timeval::operator-=(const DelayInterval& arg2) {
+ secs() -= arg2.seconds(); usecs() -= arg2.useconds();
+ if ((int)useconds() < 0) {
+ usecs() += MILLION;
+ --secs();
+ }
+ if ((int)seconds() < 0)
+ secs() = usecs() = 0;
+
+}
+
+DelayInterval operator-(const Timeval& arg1, const Timeval& arg2) {
+ time_base_seconds secs = arg1.seconds() - arg2.seconds();
+ time_base_seconds usecs = arg1.useconds() - arg2.useconds();
+
+ if ((int)usecs < 0) {
+ usecs += MILLION;
+ --secs;
+ }
+ if ((int)secs < 0)
+ return DELAY_ZERO;
+ else
+ return DelayInterval(secs, usecs);
+}
+
+
+///// DelayInterval /////
+
+DelayInterval operator*(short arg1, const DelayInterval& arg2) {
+ time_base_seconds result_seconds = arg1*arg2.seconds();
+ time_base_seconds result_useconds = arg1*arg2.useconds();
+
+ time_base_seconds carry = result_useconds/MILLION;
+ result_useconds -= carry*MILLION;
+ result_seconds += carry;
+
+ return DelayInterval(result_seconds, result_useconds);
+}
+
+#ifndef INT_MAX
+#define INT_MAX 0x7FFFFFFF
+#endif
+const DelayInterval DELAY_ZERO(0, 0);
+const DelayInterval DELAY_SECOND(1, 0);
+const DelayInterval DELAY_MINUTE = 60*DELAY_SECOND;
+const DelayInterval DELAY_HOUR = 60*DELAY_MINUTE;
+const DelayInterval DELAY_DAY = 24*DELAY_HOUR;
+const DelayInterval ETERNITY(INT_MAX, MILLION-1);
+// used internally to make the implementation work
+
+
+///// DelayQueueEntry /////
+
+intptr_t DelayQueueEntry::tokenCounter = 0;
+
+DelayQueueEntry::DelayQueueEntry(DelayInterval delay)
+ : fDeltaTimeRemaining(delay) {
+ fNext = fPrev = this;
+ fToken = ++tokenCounter;
+}
+
+DelayQueueEntry::~DelayQueueEntry() {
+}
+
+void DelayQueueEntry::handleTimeout() {
+ delete this;
+}
+
+
+///// DelayQueue /////
+
+DelayQueue::DelayQueue()
+ : DelayQueueEntry(ETERNITY) {
+ fLastSyncTime = TimeNow();
+}
+
+DelayQueue::~DelayQueue() {
+ while (fNext != this) {
+ DelayQueueEntry* entryToRemove = fNext;
+ removeEntry(entryToRemove);
+ delete entryToRemove;
+ }
+}
+
+void DelayQueue::addEntry(DelayQueueEntry* newEntry) {
+ synchronize();
+
+ DelayQueueEntry* cur = head();
+ while (newEntry->fDeltaTimeRemaining >= cur->fDeltaTimeRemaining) {
+ newEntry->fDeltaTimeRemaining -= cur->fDeltaTimeRemaining;
+ cur = cur->fNext;
+ }
+
+ cur->fDeltaTimeRemaining -= newEntry->fDeltaTimeRemaining;
+
+ // Add "newEntry" to the queue, just before "cur":
+ newEntry->fNext = cur;
+ newEntry->fPrev = cur->fPrev;
+ cur->fPrev = newEntry->fPrev->fNext = newEntry;
+}
+
+void DelayQueue::updateEntry(DelayQueueEntry* entry, DelayInterval newDelay) {
+ if (entry == NULL) return;
+
+ removeEntry(entry);
+ entry->fDeltaTimeRemaining = newDelay;
+ addEntry(entry);
+}
+
+void DelayQueue::updateEntry(intptr_t tokenToFind, DelayInterval newDelay) {
+ DelayQueueEntry* entry = findEntryByToken(tokenToFind);
+ updateEntry(entry, newDelay);
+}
+
+void DelayQueue::removeEntry(DelayQueueEntry* entry) {
+ if (entry == NULL || entry->fNext == NULL) return;
+
+ entry->fNext->fDeltaTimeRemaining += entry->fDeltaTimeRemaining;
+ entry->fPrev->fNext = entry->fNext;
+ entry->fNext->fPrev = entry->fPrev;
+ entry->fNext = entry->fPrev = NULL;
+ // in case we should try to remove it again
+}
+
+DelayQueueEntry* DelayQueue::removeEntry(intptr_t tokenToFind) {
+ DelayQueueEntry* entry = findEntryByToken(tokenToFind);
+ removeEntry(entry);
+ return entry;
+}
+
+DelayInterval const& DelayQueue::timeToNextAlarm() {
+ if (head()->fDeltaTimeRemaining == DELAY_ZERO) return DELAY_ZERO; // a common case
+
+ synchronize();
+ return head()->fDeltaTimeRemaining;
+}
+
+void DelayQueue::handleAlarm() {
+ if (head()->fDeltaTimeRemaining != DELAY_ZERO) synchronize();
+
+ if (head()->fDeltaTimeRemaining == DELAY_ZERO) {
+ // This event is due to be handled:
+ DelayQueueEntry* toRemove = head();
+ removeEntry(toRemove); // do this first, in case handler accesses queue
+
+ toRemove->handleTimeout();
+ }
+}
+
+DelayQueueEntry* DelayQueue::findEntryByToken(intptr_t tokenToFind) {
+ DelayQueueEntry* cur = head();
+ while (cur != this) {
+ if (cur->token() == tokenToFind) return cur;
+ cur = cur->fNext;
+ }
+
+ return NULL;
+}
+
+void DelayQueue::synchronize() {
+ // First, figure out how much time has elapsed since the last sync:
+ _EventTime timeNow = TimeNow();
+ if (timeNow < fLastSyncTime) {
+ // The system clock has apparently gone back in time; reset our sync time and return:
+ fLastSyncTime = timeNow;
+ return;
+ }
+ DelayInterval timeSinceLastSync = timeNow - fLastSyncTime;
+ fLastSyncTime = timeNow;
+
+ // Then, adjust the delay queue for any entries whose time is up:
+ DelayQueueEntry* curEntry = head();
+ while (timeSinceLastSync >= curEntry->fDeltaTimeRemaining) {
+ timeSinceLastSync -= curEntry->fDeltaTimeRemaining;
+ curEntry->fDeltaTimeRemaining = DELAY_ZERO;
+ curEntry = curEntry->fNext;
+ }
+ curEntry->fDeltaTimeRemaining -= timeSinceLastSync;
+}
+
+
+///// _EventTime /////
+
+_EventTime TimeNow() {
+ struct timeval tvNow;
+
+ gettimeofday(&tvNow, NULL);
+
+ return _EventTime(tvNow.tv_sec, tvNow.tv_usec);
+}
+
+const _EventTime THE_END_OF_TIME(INT_MAX);
diff --git a/BasicUsageEnvironment/Makefile.head b/BasicUsageEnvironment/Makefile.head
new file mode 100644
index 0000000..f4e4414
--- /dev/null
+++ b/BasicUsageEnvironment/Makefile.head
@@ -0,0 +1,4 @@
+INCLUDES = -Iinclude -I../UsageEnvironment/include -I../groupsock/include
+PREFIX = /usr/local
+LIBDIR = $(PREFIX)/lib
+##### Change the following for your environment:
diff --git a/BasicUsageEnvironment/Makefile.tail b/BasicUsageEnvironment/Makefile.tail
new file mode 100644
index 0000000..5d04179
--- /dev/null
+++ b/BasicUsageEnvironment/Makefile.tail
@@ -0,0 +1,43 @@
+##### End of variables to change
+
+NAME = libBasicUsageEnvironment
+LIB_FILE = $(NAME).$(LIB_SUFFIX)
+ALL = $(LIB_FILE)
+all: $(ALL)
+
+OBJS = BasicUsageEnvironment0.$(OBJ) BasicUsageEnvironment.$(OBJ) \
+ BasicTaskScheduler0.$(OBJ) BasicTaskScheduler.$(OBJ) \
+ DelayQueue.$(OBJ) BasicHashTable.$(OBJ)
+
+libBasicUsageEnvironment.$(LIB_SUFFIX): $(OBJS)
+ $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) \
+ $(OBJS)
+
+.$(C).$(OBJ):
+ $(C_COMPILER) -c $(C_FLAGS) $<
+
+.$(CPP).$(OBJ):
+ $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $<
+
+BasicUsageEnvironment0.$(CPP): include/BasicUsageEnvironment0.hh
+include/BasicUsageEnvironment0.hh: include/BasicUsageEnvironment_version.hh include/DelayQueue.hh
+BasicUsageEnvironment.$(CPP): include/BasicUsageEnvironment.hh
+include/BasicUsageEnvironment.hh: include/BasicUsageEnvironment0.hh
+BasicTaskScheduler0.$(CPP): include/BasicUsageEnvironment0.hh include/HandlerSet.hh
+BasicTaskScheduler.$(CPP): include/BasicUsageEnvironment.hh include/HandlerSet.hh
+DelayQueue.$(CPP): include/DelayQueue.hh
+BasicHashTable.$(CPP): include/BasicHashTable.hh
+
+clean:
+ -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~
+
+install: install1 $(INSTALL2)
+install1: libBasicUsageEnvironment.$(LIB_SUFFIX)
+ install -d $(DESTDIR)$(PREFIX)/include/BasicUsageEnvironment $(DESTDIR)$(LIBDIR)
+ install -m 644 include/*.hh $(DESTDIR)$(PREFIX)/include/BasicUsageEnvironment
+ install -m 644 libBasicUsageEnvironment.$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)
+install_shared_libraries: libBasicUsageEnvironment.$(LIB_SUFFIX)
+ ln -fs $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).$(SHORT_LIB_SUFFIX)
+ ln -fs $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).so
+
+##### Any additional, platform-specific rules come here:
diff --git a/BasicUsageEnvironment/include/BasicHashTable.hh b/BasicUsageEnvironment/include/BasicHashTable.hh
new file mode 100644
index 0000000..6174eab
--- /dev/null
+++ b/BasicUsageEnvironment/include/BasicHashTable.hh
@@ -0,0 +1,104 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Basic Hash Table implementation
+// C++ header
+
+#ifndef _BASIC_HASH_TABLE_HH
+#define _BASIC_HASH_TABLE_HH
+
+#ifndef _HASH_TABLE_HH
+#include "HashTable.hh"
+#endif
+#ifndef _NET_COMMON_H
+#include <NetCommon.h> // to ensure that "uintptr_t" is defined
+#endif
+
+// A simple hash table implementation, inspired by the hash table
+// implementation used in Tcl 7.6: <http://www.tcl.tk/>
+
+#define SMALL_HASH_TABLE_SIZE 4
+
+class BasicHashTable: public HashTable {
+private:
+ class TableEntry; // forward
+
+public:
+ BasicHashTable(int keyType);
+ virtual ~BasicHashTable();
+
+ // Used to iterate through the members of the table:
+ class Iterator; friend class Iterator; // to make Sun's C++ compiler happy
+ class Iterator: public HashTable::Iterator {
+ public:
+ Iterator(BasicHashTable const& table);
+
+ private: // implementation of inherited pure virtual functions
+ void* next(char const*& key); // returns 0 if none
+
+ private:
+ BasicHashTable const& fTable;
+ unsigned fNextIndex; // index of next bucket to be enumerated after this
+ TableEntry* fNextEntry; // next entry in the current bucket
+ };
+
+private: // implementation of inherited pure virtual functions
+ virtual void* Add(char const* key, void* value);
+ // Returns the old value if different, otherwise 0
+ virtual Boolean Remove(char const* key);
+ virtual void* Lookup(char const* key) const;
+ // Returns 0 if not found
+ virtual unsigned numEntries() const;
+
+private:
+ class TableEntry {
+ public:
+ TableEntry* fNext;
+ char const* key;
+ void* value;
+ };
+
+ TableEntry* lookupKey(char const* key, unsigned& index) const;
+ // returns entry matching "key", or NULL if none
+ Boolean keyMatches(char const* key1, char const* key2) const;
+ // used to implement "lookupKey()"
+
+ TableEntry* insertNewEntry(unsigned index, char const* key);
+ // creates a new entry, and inserts it in the table
+ void assignKey(TableEntry* entry, char const* key);
+ // used to implement "insertNewEntry()"
+
+ void deleteEntry(unsigned index, TableEntry* entry);
+ void deleteKey(TableEntry* entry);
+ // used to implement "deleteEntry()"
+
+ void rebuild(); // rebuilds the table as its size increases
+
+ unsigned hashIndexFromKey(char const* key) const;
+ // used to implement many of the routines above
+
+ unsigned randomIndex(uintptr_t i) const {
+ return (unsigned)(((i*1103515245) >> fDownShift) & fMask);
+ }
+
+private:
+ TableEntry** fBuckets; // pointer to bucket array
+ TableEntry* fStaticBuckets[SMALL_HASH_TABLE_SIZE];// used for small tables
+ unsigned fNumBuckets, fNumEntries, fRebuildSize, fDownShift, fMask;
+ int fKeyType;
+};
+
+#endif
diff --git a/BasicUsageEnvironment/include/BasicUsageEnvironment.hh b/BasicUsageEnvironment/include/BasicUsageEnvironment.hh
new file mode 100644
index 0000000..d01dc31
--- /dev/null
+++ b/BasicUsageEnvironment/include/BasicUsageEnvironment.hh
@@ -0,0 +1,86 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Basic Usage Environment: for a simple, non-scripted, console application
+// C++ header
+
+#ifndef _BASIC_USAGE_ENVIRONMENT_HH
+#define _BASIC_USAGE_ENVIRONMENT_HH
+
+#ifndef _BASIC_USAGE_ENVIRONMENT0_HH
+#include "BasicUsageEnvironment0.hh"
+#endif
+
+class BasicUsageEnvironment: public BasicUsageEnvironment0 {
+public:
+ static BasicUsageEnvironment* createNew(TaskScheduler& taskScheduler);
+
+ // redefined virtual functions:
+ virtual int getErrno() const;
+
+ virtual UsageEnvironment& operator<<(char const* str);
+ virtual UsageEnvironment& operator<<(int i);
+ virtual UsageEnvironment& operator<<(unsigned u);
+ virtual UsageEnvironment& operator<<(double d);
+ virtual UsageEnvironment& operator<<(void* p);
+
+protected:
+ BasicUsageEnvironment(TaskScheduler& taskScheduler);
+ // called only by "createNew()" (or subclass constructors)
+ virtual ~BasicUsageEnvironment();
+};
+
+
+class BasicTaskScheduler: public BasicTaskScheduler0 {
+public:
+ static BasicTaskScheduler* createNew(unsigned maxSchedulerGranularity = 10000/*microseconds*/);
+ // "maxSchedulerGranularity" (default value: 10 ms) specifies the maximum time that we wait (in "select()") before
+ // returning to the event loop to handle non-socket or non-timer-based events, such as 'triggered events'.
+ // You can change this is you wish (but only if you know what you're doing!), or set it to 0, to specify no such maximum time.
+ // (You should set it to 0 only if you know that you will not be using 'event triggers'.)
+ virtual ~BasicTaskScheduler();
+
+protected:
+ BasicTaskScheduler(unsigned maxSchedulerGranularity);
+ // called only by "createNew()"
+
+ static void schedulerTickTask(void* clientData);
+ void schedulerTickTask();
+
+protected:
+ // Redefined virtual functions:
+ virtual void SingleStep(unsigned maxDelayTime);
+
+ virtual void setBackgroundHandling(int socketNum, int conditionSet, BackgroundHandlerProc* handlerProc, void* clientData);
+ virtual void moveSocketHandling(int oldSocketNum, int newSocketNum);
+
+protected:
+ unsigned fMaxSchedulerGranularity;
+
+ // To implement background operations:
+ int fMaxNumSockets;
+ fd_set fReadSet;
+ fd_set fWriteSet;
+ fd_set fExceptionSet;
+
+private:
+#if defined(__WIN32__) || defined(_WIN32)
+ // Hack to work around a bug in Windows' "select()" implementation:
+ int fDummySocketNum;
+#endif
+};
+
+#endif
diff --git a/BasicUsageEnvironment/include/BasicUsageEnvironment0.hh b/BasicUsageEnvironment/include/BasicUsageEnvironment0.hh
new file mode 100644
index 0000000..c0ec3bd
--- /dev/null
+++ b/BasicUsageEnvironment/include/BasicUsageEnvironment0.hh
@@ -0,0 +1,114 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Basic Usage Environment: for a simple, non-scripted, console application
+// C++ header
+
+#ifndef _BASIC_USAGE_ENVIRONMENT0_HH
+#define _BASIC_USAGE_ENVIRONMENT0_HH
+
+#ifndef _BASICUSAGEENVIRONMENT_VERSION_HH
+#include "BasicUsageEnvironment_version.hh"
+#endif
+
+#ifndef _USAGE_ENVIRONMENT_HH
+#include "UsageEnvironment.hh"
+#endif
+
+#ifndef _DELAY_QUEUE_HH
+#include "DelayQueue.hh"
+#endif
+
+#define RESULT_MSG_BUFFER_MAX 1000
+
+// An abstract base class, useful for subclassing
+// (e.g., to redefine the implementation of "operator<<")
+class BasicUsageEnvironment0: public UsageEnvironment {
+public:
+ // redefined virtual functions:
+ virtual MsgString getResultMsg() const;
+
+ virtual void setResultMsg(MsgString msg);
+ virtual void setResultMsg(MsgString msg1,
+ MsgString msg2);
+ virtual void setResultMsg(MsgString msg1,
+ MsgString msg2,
+ MsgString msg3);
+ virtual void setResultErrMsg(MsgString msg, int err = 0);
+
+ virtual void appendToResultMsg(MsgString msg);
+
+ virtual void reportBackgroundError();
+
+protected:
+ BasicUsageEnvironment0(TaskScheduler& taskScheduler);
+ virtual ~BasicUsageEnvironment0();
+
+private:
+ void reset();
+
+ char fResultMsgBuffer[RESULT_MSG_BUFFER_MAX];
+ unsigned fCurBufferSize;
+ unsigned fBufferMaxSize;
+};
+
+class HandlerSet; // forward
+
+#define MAX_NUM_EVENT_TRIGGERS 32
+
+// An abstract base class, useful for subclassing
+// (e.g., to redefine the implementation of socket event handling)
+class BasicTaskScheduler0: public TaskScheduler {
+public:
+ virtual ~BasicTaskScheduler0();
+
+ virtual void SingleStep(unsigned maxDelayTime = 0) = 0;
+ // "maxDelayTime" is in microseconds. It allows a subclass to impose a limit
+ // on how long "select()" can delay, in case it wants to also do polling.
+ // 0 (the default value) means: There's no maximum; just look at the delay queue
+
+public:
+ // Redefined virtual functions:
+ virtual TaskToken scheduleDelayedTask(int64_t microseconds, TaskFunc* proc,
+ void* clientData);
+ virtual void unscheduleDelayedTask(TaskToken& prevTask);
+
+ virtual void doEventLoop(char volatile* watchVariable);
+
+ virtual EventTriggerId createEventTrigger(TaskFunc* eventHandlerProc);
+ virtual void deleteEventTrigger(EventTriggerId eventTriggerId);
+ virtual void triggerEvent(EventTriggerId eventTriggerId, void* clientData = NULL);
+
+protected:
+ BasicTaskScheduler0();
+
+protected:
+ // To implement delayed operations:
+ DelayQueue fDelayQueue;
+
+ // To implement background reads:
+ HandlerSet* fHandlers;
+ int fLastHandledSocketNum;
+
+ // To implement event triggers:
+ EventTriggerId volatile fTriggersAwaitingHandling; // implemented as a 32-bit bitmap
+ EventTriggerId fLastUsedTriggerMask; // implemented as a 32-bit bitmap
+ TaskFunc* fTriggeredEventHandlers[MAX_NUM_EVENT_TRIGGERS];
+ void* fTriggeredEventClientDatas[MAX_NUM_EVENT_TRIGGERS];
+ unsigned fLastUsedTriggerNum; // in the range [0,MAX_NUM_EVENT_TRIGGERS)
+};
+
+#endif
diff --git a/BasicUsageEnvironment/include/BasicUsageEnvironment_version.hh b/BasicUsageEnvironment/include/BasicUsageEnvironment_version.hh
new file mode 100644
index 0000000..e7b141d
--- /dev/null
+++ b/BasicUsageEnvironment/include/BasicUsageEnvironment_version.hh
@@ -0,0 +1,10 @@
+// Version information for the "BasicUsageEnvironment" library
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+
+#ifndef _BASICUSAGEENVIRONMENT_VERSION_HH
+#define _BASICUSAGEENVIRONMENT_VERSION_HH
+
+#define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_STRING "2020.03.06"
+#define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_INT 1583452800
+
+#endif
diff --git a/BasicUsageEnvironment/include/DelayQueue.hh b/BasicUsageEnvironment/include/DelayQueue.hh
new file mode 100644
index 0000000..18247af
--- /dev/null
+++ b/BasicUsageEnvironment/include/DelayQueue.hh
@@ -0,0 +1,182 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+ // Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// Delay queue
+// C++ header
+
+#ifndef _DELAY_QUEUE_HH
+#define _DELAY_QUEUE_HH
+
+#ifndef _NET_COMMON_H
+#include "NetCommon.h"
+#endif
+
+#ifdef TIME_BASE
+typedef TIME_BASE time_base_seconds;
+#else
+typedef long time_base_seconds;
+#endif
+
+///// A "Timeval" can be either an absolute time, or a time interval /////
+
+class Timeval {
+public:
+ time_base_seconds seconds() const {
+ return fTv.tv_sec;
+ }
+ time_base_seconds seconds() {
+ return fTv.tv_sec;
+ }
+ time_base_seconds useconds() const {
+ return fTv.tv_usec;
+ }
+ time_base_seconds useconds() {
+ return fTv.tv_usec;
+ }
+
+ int operator>=(Timeval const& arg2) const;
+ int operator<=(Timeval const& arg2) const {
+ return arg2 >= *this;
+ }
+ int operator<(Timeval const& arg2) const {
+ return !(*this >= arg2);
+ }
+ int operator>(Timeval const& arg2) const {
+ return arg2 < *this;
+ }
+ int operator==(Timeval const& arg2) const {
+ return *this >= arg2 && arg2 >= *this;
+ }
+ int operator!=(Timeval const& arg2) const {
+ return !(*this == arg2);
+ }
+
+ void operator+=(class DelayInterval const& arg2);
+ void operator-=(class DelayInterval const& arg2);
+ // returns ZERO iff arg2 >= arg1
+
+protected:
+ Timeval(time_base_seconds seconds, time_base_seconds useconds) {
+ fTv.tv_sec = seconds; fTv.tv_usec = useconds;
+ }
+
+private:
+ time_base_seconds& secs() {
+ return (time_base_seconds&)fTv.tv_sec;
+ }
+ time_base_seconds& usecs() {
+ return (time_base_seconds&)fTv.tv_usec;
+ }
+
+ struct timeval fTv;
+};
+
+#ifndef max
+inline Timeval max(Timeval const& arg1, Timeval const& arg2) {
+ return arg1 >= arg2 ? arg1 : arg2;
+}
+#endif
+#ifndef min
+inline Timeval min(Timeval const& arg1, Timeval const& arg2) {
+ return arg1 <= arg2 ? arg1 : arg2;
+}
+#endif
+
+class DelayInterval operator-(Timeval const& arg1, Timeval const& arg2);
+// returns ZERO iff arg2 >= arg1
+
+
+///// DelayInterval /////
+
+class DelayInterval: public Timeval {
+public:
+ DelayInterval(time_base_seconds seconds, time_base_seconds useconds)
+ : Timeval(seconds, useconds) {}
+};
+
+DelayInterval operator*(short arg1, DelayInterval const& arg2);
+
+extern DelayInterval const DELAY_ZERO;
+extern DelayInterval const DELAY_SECOND;
+extern DelayInterval const DELAY_MINUTE;
+extern DelayInterval const DELAY_HOUR;
+extern DelayInterval const DELAY_DAY;
+
+///// _EventTime /////
+
+class _EventTime: public Timeval {
+public:
+ _EventTime(unsigned secondsSinceEpoch = 0,
+ unsigned usecondsSinceEpoch = 0)
+ // We use the Unix standard epoch: January 1, 1970
+ : Timeval(secondsSinceEpoch, usecondsSinceEpoch) {}
+};
+
+_EventTime TimeNow();
+
+extern _EventTime const THE_END_OF_TIME;
+
+
+///// DelayQueueEntry /////
+
+class DelayQueueEntry {
+public:
+ virtual ~DelayQueueEntry();
+
+ intptr_t token() {
+ return fToken;
+ }
+
+protected: // abstract base class
+ DelayQueueEntry(DelayInterval delay);
+
+ virtual void handleTimeout();
+
+private:
+ friend class DelayQueue;
+ DelayQueueEntry* fNext;
+ DelayQueueEntry* fPrev;
+ DelayInterval fDeltaTimeRemaining;
+
+ intptr_t fToken;
+ static intptr_t tokenCounter;
+};
+
+///// DelayQueue /////
+
+class DelayQueue: public DelayQueueEntry {
+public:
+ DelayQueue();
+ virtual ~DelayQueue();
+
+ void addEntry(DelayQueueEntry* newEntry); // returns a token for the entry
+ void updateEntry(DelayQueueEntry* entry, DelayInterval newDelay);
+ void updateEntry(intptr_t tokenToFind, DelayInterval newDelay);
+ void removeEntry(DelayQueueEntry* entry); // but doesn't delete it
+ DelayQueueEntry* removeEntry(intptr_t tokenToFind); // but doesn't delete it
+
+ DelayInterval const& timeToNextAlarm();
+ void handleAlarm();
+
+private:
+ DelayQueueEntry* head() { return fNext; }
+ DelayQueueEntry* findEntryByToken(intptr_t token);
+ void synchronize(); // bring the 'time remaining' fields up-to-date
+
+ _EventTime fLastSyncTime;
+};
+
+#endif
diff --git a/BasicUsageEnvironment/include/HandlerSet.hh b/BasicUsageEnvironment/include/HandlerSet.hh
new file mode 100644
index 0000000..74df53a
--- /dev/null
+++ b/BasicUsageEnvironment/include/HandlerSet.hh
@@ -0,0 +1,77 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Basic Usage Environment: for a simple, non-scripted, console application
+// C++ header
+
+#ifndef _HANDLER_SET_HH
+#define _HANDLER_SET_HH
+
+#ifndef _BOOLEAN_HH
+#include "Boolean.hh"
+#endif
+
+////////// HandlerSet (etc.) definition //////////
+
+class HandlerDescriptor {
+ HandlerDescriptor(HandlerDescriptor* nextHandler);
+ virtual ~HandlerDescriptor();
+
+public:
+ int socketNum;
+ int conditionSet;
+ TaskScheduler::BackgroundHandlerProc* handlerProc;
+ void* clientData;
+
+private:
+ // Descriptors are linked together in a doubly-linked list:
+ friend class HandlerSet;
+ friend class HandlerIterator;
+ HandlerDescriptor* fNextHandler;
+ HandlerDescriptor* fPrevHandler;
+};
+
+class HandlerSet {
+public:
+ HandlerSet();
+ virtual ~HandlerSet();
+
+ void assignHandler(int socketNum, int conditionSet, TaskScheduler::BackgroundHandlerProc* handlerProc, void* clientData);
+ void clearHandler(int socketNum);
+ void moveHandler(int oldSocketNum, int newSocketNum);
+
+private:
+ HandlerDescriptor* lookupHandler(int socketNum);
+
+private:
+ friend class HandlerIterator;
+ HandlerDescriptor fHandlers;
+};
+
+class HandlerIterator {
+public:
+ HandlerIterator(HandlerSet& handlerSet);
+ virtual ~HandlerIterator();
+
+ HandlerDescriptor* next(); // returns NULL if none
+ void reset();
+
+private:
+ HandlerSet& fOurSet;
+ HandlerDescriptor* fNextPtr;
+};
+
+#endif
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..94a9ed0
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/COPYING.LESSER b/COPYING.LESSER
new file mode 100644
index 0000000..65c5ca8
--- /dev/null
+++ b/COPYING.LESSER
@@ -0,0 +1,165 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..a0b3601
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,842 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
+
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/Makefile.head b/Makefile.head
new file mode 100644
index 0000000..458c54c
--- /dev/null
+++ b/Makefile.head
@@ -0,0 +1 @@
+##### Change the following for your environment:
diff --git a/Makefile.tail b/Makefile.tail
new file mode 100644
index 0000000..6c49b83
--- /dev/null
+++ b/Makefile.tail
@@ -0,0 +1,54 @@
+##### End of variables to change
+
+LIVEMEDIA_DIR = liveMedia
+GROUPSOCK_DIR = groupsock
+USAGE_ENVIRONMENT_DIR = UsageEnvironment
+BASIC_USAGE_ENVIRONMENT_DIR = BasicUsageEnvironment
+
+TESTPROGS_DIR = testProgs
+
+MEDIA_SERVER_DIR = mediaServer
+
+PROXY_SERVER_DIR = proxyServer
+
+HLS_PROXY_DIR = hlsProxy
+
+all:
+ cd $(LIVEMEDIA_DIR) ; $(MAKE)
+ cd $(GROUPSOCK_DIR) ; $(MAKE)
+ cd $(USAGE_ENVIRONMENT_DIR) ; $(MAKE)
+ cd $(BASIC_USAGE_ENVIRONMENT_DIR) ; $(MAKE)
+ cd $(TESTPROGS_DIR) ; $(MAKE)
+ cd $(MEDIA_SERVER_DIR) ; $(MAKE)
+ cd $(PROXY_SERVER_DIR) ; $(MAKE)
+ cd $(HLS_PROXY_DIR) ; $(MAKE)
+ @echo
+ @echo "For more information about this source code (including your obligations under the LGPL), please see our FAQ at http://live555.com/liveMedia/faq.html"
+
+install:
+ cd $(LIVEMEDIA_DIR) ; $(MAKE) install
+ cd $(GROUPSOCK_DIR) ; $(MAKE) install
+ cd $(USAGE_ENVIRONMENT_DIR) ; $(MAKE) install
+ cd $(BASIC_USAGE_ENVIRONMENT_DIR) ; $(MAKE) install
+ cd $(TESTPROGS_DIR) ; $(MAKE) install
+ cd $(MEDIA_SERVER_DIR) ; $(MAKE) install
+ cd $(PROXY_SERVER_DIR) ; $(MAKE) install
+ cd $(HLS_PROXY_DIR) ; $(MAKE) install
+
+clean:
+ cd $(LIVEMEDIA_DIR) ; $(MAKE) clean
+ cd $(GROUPSOCK_DIR) ; $(MAKE) clean
+ cd $(USAGE_ENVIRONMENT_DIR) ; $(MAKE) clean
+ cd $(BASIC_USAGE_ENVIRONMENT_DIR) ; $(MAKE) clean
+ cd $(TESTPROGS_DIR) ; $(MAKE) clean
+ cd $(MEDIA_SERVER_DIR) ; $(MAKE) clean
+ cd $(PROXY_SERVER_DIR) ; $(MAKE) clean
+ cd $(HLS_PROXY_DIR) ; $(MAKE) clean
+
+distclean: clean
+ -rm -f $(LIVEMEDIA_DIR)/Makefile $(GROUPSOCK_DIR)/Makefile \
+ $(USAGE_ENVIRONMENT_DIR)/Makefile $(BASIC_USAGE_ENVIRONMENT_DIR)/Makefile \
+ $(TESTPROGS_DIR)/Makefile $(MEDIA_SERVER_DIR)/Makefile \
+ $(PROXY_SERVER_DIR)/Makefile \
+ $(HLS_PROXY_DIR)/Makefile \
+ Makefile
diff --git a/README b/README
new file mode 100644
index 0000000..73874c2
--- /dev/null
+++ b/README
@@ -0,0 +1,2 @@
+For documentation and instructions for building this software,
+see <http://www.live555.com/liveMedia/>
diff --git a/UsageEnvironment/COPYING b/UsageEnvironment/COPYING
new file mode 120000
index 0000000..012065c
--- /dev/null
+++ b/UsageEnvironment/COPYING
@@ -0,0 +1 @@
+../COPYING
\ No newline at end of file
diff --git a/UsageEnvironment/COPYING.LESSER b/UsageEnvironment/COPYING.LESSER
new file mode 120000
index 0000000..ce9a3ce
--- /dev/null
+++ b/UsageEnvironment/COPYING.LESSER
@@ -0,0 +1 @@
+../COPYING.LESSER
\ No newline at end of file
diff --git a/UsageEnvironment/HashTable.cpp b/UsageEnvironment/HashTable.cpp
new file mode 100644
index 0000000..e9c0276
--- /dev/null
+++ b/UsageEnvironment/HashTable.cpp
@@ -0,0 +1,50 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Generic Hash Table
+// Implementation
+
+#include "HashTable.hh"
+
+HashTable::HashTable() {
+}
+
+HashTable::~HashTable() {
+}
+
+HashTable::Iterator::Iterator() {
+}
+
+HashTable::Iterator::~Iterator() {}
+
+void* HashTable::RemoveNext() {
+ Iterator* iter = Iterator::create(*this);
+ char const* key;
+ void* removedValue = iter->next(key);
+ if (removedValue != 0) Remove(key);
+
+ delete iter;
+ return removedValue;
+}
+
+void* HashTable::getFirst() {
+ Iterator* iter = Iterator::create(*this);
+ char const* key;
+ void* firstValue = iter->next(key);
+
+ delete iter;
+ return firstValue;
+}
diff --git a/UsageEnvironment/Makefile.head b/UsageEnvironment/Makefile.head
new file mode 100644
index 0000000..48b0268
--- /dev/null
+++ b/UsageEnvironment/Makefile.head
@@ -0,0 +1,4 @@
+INCLUDES = -Iinclude -I../groupsock/include
+PREFIX = /usr/local
+LIBDIR = $(PREFIX)/lib
+##### Change the following for your environment:
diff --git a/UsageEnvironment/Makefile.tail b/UsageEnvironment/Makefile.tail
new file mode 100644
index 0000000..a7c23df
--- /dev/null
+++ b/UsageEnvironment/Makefile.tail
@@ -0,0 +1,37 @@
+##### End of variables to change
+
+NAME = libUsageEnvironment
+USAGE_ENVIRONMENT_LIB = $(NAME).$(LIB_SUFFIX)
+ALL = $(USAGE_ENVIRONMENT_LIB)
+all: $(ALL)
+
+OBJS = UsageEnvironment.$(OBJ) HashTable.$(OBJ) strDup.$(OBJ)
+
+$(USAGE_ENVIRONMENT_LIB): $(OBJS)
+ $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) $(OBJS)
+
+.$(C).$(OBJ):
+ $(C_COMPILER) -c $(C_FLAGS) $<
+
+.$(CPP).$(OBJ):
+ $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $<
+
+UsageEnvironment.$(CPP): include/UsageEnvironment.hh
+include/UsageEnvironment.hh: include/UsageEnvironment_version.hh include/Boolean.hh include/strDup.hh
+HashTable.$(CPP): include/HashTable.hh
+include/HashTable.hh: include/Boolean.hh
+strDup.$(CPP): include/strDup.hh
+
+clean:
+ -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~
+
+install: install1 $(INSTALL2)
+install1: $(USAGE_ENVIRONMENT_LIB)
+ install -d $(DESTDIR)$(PREFIX)/include/UsageEnvironment $(DESTDIR)$(LIBDIR)
+ install -m 644 include/*.hh $(DESTDIR)$(PREFIX)/include/UsageEnvironment
+ install -m 644 $(USAGE_ENVIRONMENT_LIB) $(DESTDIR)$(LIBDIR)
+install_shared_libraries: $(USAGE_ENVIRONMENT_LIB)
+ ln -fs $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).$(SHORT_LIB_SUFFIX)
+ ln -fs $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).so
+
+##### Any additional, platform-specific rules come here:
diff --git a/UsageEnvironment/UsageEnvironment.cpp b/UsageEnvironment/UsageEnvironment.cpp
new file mode 100644
index 0000000..a181bd0
--- /dev/null
+++ b/UsageEnvironment/UsageEnvironment.cpp
@@ -0,0 +1,62 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Usage Environment
+// Implementation
+
+#include "UsageEnvironment.hh"
+
+Boolean UsageEnvironment::reclaim() {
+ // We delete ourselves only if we have no remainining state:
+ if (liveMediaPriv == NULL && groupsockPriv == NULL) {
+ delete this;
+ return True;
+ }
+
+ return False;
+}
+
+UsageEnvironment::UsageEnvironment(TaskScheduler& scheduler)
+ : liveMediaPriv(NULL), groupsockPriv(NULL), fScheduler(scheduler) {
+}
+
+UsageEnvironment::~UsageEnvironment() {
+}
+
+// By default, we handle 'should not occur'-type library errors by calling abort(). Subclasses can redefine this, if desired.
+// (If your runtime library doesn't define the "abort()" function, then define your own (e.g., that does nothing).)
+void UsageEnvironment::internalError() {
+ abort();
+}
+
+
+TaskScheduler::TaskScheduler() {
+}
+
+TaskScheduler::~TaskScheduler() {
+}
+
+void TaskScheduler::rescheduleDelayedTask(TaskToken& task,
+ int64_t microseconds, TaskFunc* proc,
+ void* clientData) {
+ unscheduleDelayedTask(task);
+ task = scheduleDelayedTask(microseconds, proc, clientData);
+}
+
+// By default, we handle 'should not occur'-type library errors by calling abort(). Subclasses can redefine this, if desired.
+void TaskScheduler::internalError() {
+ abort();
+}
diff --git a/UsageEnvironment/include/Boolean.hh b/UsageEnvironment/include/Boolean.hh
new file mode 100644
index 0000000..3d8401e
--- /dev/null
+++ b/UsageEnvironment/include/Boolean.hh
@@ -0,0 +1,37 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+#ifndef _BOOLEAN_HH
+#define _BOOLEAN_HH
+
+#if defined(__BORLANDC__) || (!defined(USE_LIVE555_BOOLEAN) && defined(_MSC_VER) && _MSC_VER >= 1400)
+// Use the "bool" type defined by the Borland compiler, and MSVC++ 8.0, Visual Studio 2005 and higher
+typedef bool Boolean;
+#define False false
+#define True true
+#else
+typedef unsigned char Boolean;
+#ifndef __MSHTML_LIBRARY_DEFINED__
+#ifndef False
+const Boolean False = 0;
+#endif
+#ifndef True
+const Boolean True = 1;
+#endif
+
+#endif
+#endif
+
+#endif
diff --git a/UsageEnvironment/include/HashTable.hh b/UsageEnvironment/include/HashTable.hh
new file mode 100644
index 0000000..3567c2d
--- /dev/null
+++ b/UsageEnvironment/include/HashTable.hh
@@ -0,0 +1,76 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Generic Hash Table
+// C++ header
+
+#ifndef _HASH_TABLE_HH
+#define _HASH_TABLE_HH
+
+#ifndef _BOOLEAN_HH
+#include "Boolean.hh"
+#endif
+
+class HashTable {
+public:
+ virtual ~HashTable();
+
+ // The following must be implemented by a particular
+ // implementation (subclass):
+ static HashTable* create(int keyType);
+
+ virtual void* Add(char const* key, void* value) = 0;
+ // Returns the old value if different, otherwise 0
+ virtual Boolean Remove(char const* key) = 0;
+ virtual void* Lookup(char const* key) const = 0;
+ // Returns 0 if not found
+ virtual unsigned numEntries() const = 0;
+ Boolean IsEmpty() const { return numEntries() == 0; }
+
+ // Used to iterate through the members of the table:
+ class Iterator {
+ public:
+ // The following must be implemented by a particular
+ // implementation (subclass):
+ static Iterator* create(HashTable const& hashTable);
+
+ virtual ~Iterator();
+
+ virtual void* next(char const*& key) = 0; // returns 0 if none
+
+ protected:
+ Iterator(); // abstract base class
+ };
+
+ // A shortcut that can be used to successively remove each of
+ // the entries in the table (e.g., so that their values can be
+ // deleted, if they happen to be pointers to allocated memory).
+ void* RemoveNext();
+
+ // Returns the first entry in the table.
+ // (This is useful for deleting each entry in the table, if the entry's destructor also removes itself from the table.)
+ void* getFirst();
+
+protected:
+ HashTable(); // abstract base class
+};
+
+// Warning: The following are deliberately the same as in
+// Tcl's hash table implementation
+int const STRING_HASH_KEYS = 0;
+int const ONE_WORD_HASH_KEYS = 1;
+
+#endif
diff --git a/UsageEnvironment/include/UsageEnvironment.hh b/UsageEnvironment/include/UsageEnvironment.hh
new file mode 100644
index 0000000..e481052
--- /dev/null
+++ b/UsageEnvironment/include/UsageEnvironment.hh
@@ -0,0 +1,172 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Usage Environment
+// C++ header
+
+#ifndef _USAGE_ENVIRONMENT_HH
+#define _USAGE_ENVIRONMENT_HH
+
+#ifndef _USAGEENVIRONMENT_VERSION_HH
+#include "UsageEnvironment_version.hh"
+#endif
+
+#ifndef _NETCOMMON_H
+#include "NetCommon.h"
+#endif
+
+#ifndef _BOOLEAN_HH
+#include "Boolean.hh"
+#endif
+
+#ifndef _STRDUP_HH
+// "strDup()" is used often, so include this here, so everyone gets it:
+#include "strDup.hh"
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifdef __BORLANDC__
+#define _setmode setmode
+#define _O_BINARY O_BINARY
+#endif
+
+class TaskScheduler; // forward
+
+// An abstract base class, subclassed for each use of the library
+
+class UsageEnvironment {
+public:
+ Boolean reclaim();
+ // returns True iff we were actually able to delete our object
+
+ // task scheduler:
+ TaskScheduler& taskScheduler() const {return fScheduler;}
+
+ // result message handling:
+ typedef char const* MsgString;
+ virtual MsgString getResultMsg() const = 0;
+
+ virtual void setResultMsg(MsgString msg) = 0;
+ virtual void setResultMsg(MsgString msg1, MsgString msg2) = 0;
+ virtual void setResultMsg(MsgString msg1, MsgString msg2, MsgString msg3) = 0;
+ virtual void setResultErrMsg(MsgString msg, int err = 0) = 0;
+ // like setResultMsg(), except that an 'errno' message is appended. (If "err == 0", the "getErrno()" code is used instead.)
+
+ virtual void appendToResultMsg(MsgString msg) = 0;
+
+ virtual void reportBackgroundError() = 0;
+ // used to report a (previously set) error message within
+ // a background event
+
+ virtual void internalError(); // used to 'handle' a 'should not occur'-type error condition within the library.
+
+ // 'errno'
+ virtual int getErrno() const = 0;
+
+ // 'console' output:
+ virtual UsageEnvironment& operator<<(char const* str) = 0;
+ virtual UsageEnvironment& operator<<(int i) = 0;
+ virtual UsageEnvironment& operator<<(unsigned u) = 0;
+ virtual UsageEnvironment& operator<<(double d) = 0;
+ virtual UsageEnvironment& operator<<(void* p) = 0;
+
+ // a pointer to additional, optional, client-specific state
+ void* liveMediaPriv;
+ void* groupsockPriv;
+
+protected:
+ UsageEnvironment(TaskScheduler& scheduler); // abstract base class
+ virtual ~UsageEnvironment(); // we are deleted only by reclaim()
+
+private:
+ TaskScheduler& fScheduler;
+};
+
+
+typedef void TaskFunc(void* clientData);
+typedef void* TaskToken;
+typedef u_int32_t EventTriggerId;
+
+class TaskScheduler {
+public:
+ virtual ~TaskScheduler();
+
+ virtual TaskToken scheduleDelayedTask(int64_t microseconds, TaskFunc* proc,
+ void* clientData) = 0;
+ // Schedules a task to occur (after a delay) when we next
+ // reach a scheduling point.
+ // (Does not delay if "microseconds" <= 0)
+ // Returns a token that can be used in a subsequent call to
+ // unscheduleDelayedTask() or rescheduleDelayedTask()
+ // (but only if the task has not yet occurred).
+
+ virtual void unscheduleDelayedTask(TaskToken& prevTask) = 0;
+ // (Has no effect if "prevTask" == NULL)
+ // Sets "prevTask" to NULL afterwards.
+ // Note: This MUST NOT be called if the scheduled task has already occurred.
+
+ virtual void rescheduleDelayedTask(TaskToken& task,
+ int64_t microseconds, TaskFunc* proc,
+ void* clientData);
+ // Combines "unscheduleDelayedTask()" with "scheduleDelayedTask()"
+ // (setting "task" to the new task token).
+ // Note: This MUST NOT be called if the scheduled task has already occurred.
+
+ // For handling socket operations in the background (from the event loop):
+ typedef void BackgroundHandlerProc(void* clientData, int mask);
+ // Possible bits to set in "mask". (These are deliberately defined
+ // the same as those in Tcl, to make a Tcl-based subclass easy.)
+ #define SOCKET_READABLE (1<<1)
+ #define SOCKET_WRITABLE (1<<2)
+ #define SOCKET_EXCEPTION (1<<3)
+ virtual void setBackgroundHandling(int socketNum, int conditionSet, BackgroundHandlerProc* handlerProc, void* clientData) = 0;
+ void disableBackgroundHandling(int socketNum) { setBackgroundHandling(socketNum, 0, NULL, NULL); }
+ virtual void moveSocketHandling(int oldSocketNum, int newSocketNum) = 0;
+ // Changes any socket handling for "oldSocketNum" so that occurs with "newSocketNum" instead.
+
+ virtual void doEventLoop(char volatile* watchVariable = NULL) = 0;
+ // Causes further execution to take place within the event loop.
+ // Delayed tasks, background I/O handling, and other events are handled, sequentially (as a single thread of control).
+ // (If "watchVariable" is not NULL, then we return from this routine when *watchVariable != 0)
+
+ virtual EventTriggerId createEventTrigger(TaskFunc* eventHandlerProc) = 0;
+ // Creates a 'trigger' for an event, which - if it occurs - will be handled (from the event loop) using "eventHandlerProc".
+ // (Returns 0 iff no such trigger can be created (e.g., because of implementation limits on the number of triggers).)
+ virtual void deleteEventTrigger(EventTriggerId eventTriggerId) = 0;
+
+ virtual void triggerEvent(EventTriggerId eventTriggerId, void* clientData = NULL) = 0;
+ // Causes the (previously-registered) handler function for the specified event to be handled (from the event loop).
+ // The handler function is called with "clientData" as parameter.
+ // Note: This function (unlike other library functions) may be called from an external thread
+ // - to signal an external event. (However, "triggerEvent()" should not be called with the
+ // same 'event trigger id' from different threads.)
+
+ // The following two functions are deprecated, and are provided for backwards-compatibility only:
+ void turnOnBackgroundReadHandling(int socketNum, BackgroundHandlerProc* handlerProc, void* clientData) {
+ setBackgroundHandling(socketNum, SOCKET_READABLE, handlerProc, clientData);
+ }
+ void turnOffBackgroundReadHandling(int socketNum) { disableBackgroundHandling(socketNum); }
+
+ virtual void internalError(); // used to 'handle' a 'should not occur'-type error condition within the library.
+
+protected:
+ TaskScheduler(); // abstract base class
+};
+
+#endif
diff --git a/UsageEnvironment/include/UsageEnvironment_version.hh b/UsageEnvironment/include/UsageEnvironment_version.hh
new file mode 100644
index 0000000..ffaac6f
--- /dev/null
+++ b/UsageEnvironment/include/UsageEnvironment_version.hh
@@ -0,0 +1,10 @@
+// Version information for the "UsageEnvironment" library
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+
+#ifndef _USAGEENVIRONMENT_VERSION_HH
+#define _USAGEENVIRONMENT_VERSION_HH
+
+#define USAGEENVIRONMENT_LIBRARY_VERSION_STRING "2020.03.06"
+#define USAGEENVIRONMENT_LIBRARY_VERSION_INT 1583452800
+
+#endif
diff --git a/UsageEnvironment/include/strDup.hh b/UsageEnvironment/include/strDup.hh
new file mode 100644
index 0000000..c17cdad
--- /dev/null
+++ b/UsageEnvironment/include/strDup.hh
@@ -0,0 +1,37 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+
+#ifndef _STRDUP_HH
+#define _STRDUP_HH
+
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A C++ equivalent to the standard C routine "strdup()".
+// This generates a char* that can be deleted using "delete[]"
+// Header
+
+#include <string.h>
+
+char* strDup(char const* str);
+// Note: strDup(NULL) returns NULL
+
+char* strDupSize(char const* str);
+// Like "strDup()", except that it *doesn't* copy the original.
+// (Instead, it just allocates a string of the same size as the original.)
+
+char* strDupSize(char const* str, size_t& resultBufSize);
+// An alternative form of "strDupSize()" that also returns the size of the allocated buffer.
+
+#endif
diff --git a/UsageEnvironment/strDup.cpp b/UsageEnvironment/strDup.cpp
new file mode 100644
index 0000000..396c748
--- /dev/null
+++ b/UsageEnvironment/strDup.cpp
@@ -0,0 +1,50 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A C++ equivalent to the standard C routine "strdup()".
+// This generates a char* that can be deleted using "delete[]"
+// Implementation
+
+#include "strDup.hh"
+
+char* strDup(char const* str) {
+ if (str == NULL) return NULL;
+ size_t len = strlen(str) + 1;
+ char* copy = new char[len];
+
+ if (copy != NULL) {
+ memcpy(copy, str, len);
+ }
+ return copy;
+}
+
+char* strDupSize(char const* str) {
+ size_t dummy;
+
+ return strDupSize(str, dummy);
+}
+
+char* strDupSize(char const* str, size_t& resultBufSize) {
+ if (str == NULL) {
+ resultBufSize = 0;
+ return NULL;
+ }
+
+ resultBufSize = strlen(str) + 1;
+ char* copy = new char[resultBufSize];
+
+ return copy;
+}
diff --git a/WindowsAudioInputDevice/WindowsAudioInputDevice.mak b/WindowsAudioInputDevice/WindowsAudioInputDevice.mak
new file mode 100644
index 0000000..50230e5
--- /dev/null
+++ b/WindowsAudioInputDevice/WindowsAudioInputDevice.mak
@@ -0,0 +1,108 @@
+INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include
+##### Change the following for your environment:
+# Comment out the following line to produce Makefiles that generate debuggable code:
+NODEBUG=1
+
+# The following definition ensures that we are properly matching
+# the WinSock2 library file with the correct header files.
+# (will link with "ws2_32.lib" and include "winsock2.h" & "Ws2tcpip.h")
+TARGETOS = WINNT
+
+# If for some reason you wish to use WinSock1 instead, uncomment the
+# following two definitions.
+# (will link with "wsock32.lib" and include "winsock.h")
+#TARGETOS = WIN95
+#APPVER = 4.0
+
+!include <ntwin32.mak>
+
+UI_OPTS = $(guilflags) $(guilibsdll)
+# Use the following to get a console (e.g., for debugging):
+CONSOLE_UI_OPTS = $(conlflags) $(conlibsdll)
+CPU=i386
+
+TOOLS32 = c:\Program Files\DevStudio\Vc
+COMPILE_OPTS = $(INCLUDES) $(cdebug) $(cflags) $(cvarsdll) -I. -I"$(TOOLS32)\include"
+C = c
+C_COMPILER = "$(TOOLS32)\bin\cl"
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = $(C_COMPILER)
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS)
+OBJ = obj
+LINK = $(link) -out:
+LIBRARY_LINK = lib -out:
+LINK_OPTS_0 = $(linkdebug) msvcirt.lib
+LIBRARY_LINK_OPTS =
+LINK_OPTS = $(LINK_OPTS_0) $(UI_OPTS)
+CONSOLE_LINK_OPTS = $(LINK_OPTS_0) $(CONSOLE_UI_OPTS)
+SERVICE_LINK_OPTS = kernel32.lib advapi32.lib shell32.lib -subsystem:console,$(APPVER)
+LIB_SUFFIX = lib
+LIBS_FOR_CONSOLE_APPLICATION =
+LIBS_FOR_GUI_APPLICATION =
+MULTIMEDIA_LIBS = winmm.lib
+EXE = .exe
+
+rc32 = "$(TOOLS32)\bin\rc"
+.rc.res:
+ $(rc32) $<
+##### End of variables to change
+
+WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB = libWindowsAudioInputDevice_noMixer.$(LIB_SUFFIX)
+WINDOWSAUDIOINPUTDEVICE_MIXER_LIB = libWindowsAudioInputDevice_mixer.$(LIB_SUFFIX)
+ALL = $(WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB) $(WINDOWSAUDIOINPUTDEVICE_MIXER_LIB) \
+ showAudioInputPorts_noMixer$(EXE) showAudioInputPorts_mixer$(EXE)
+all:: $(ALL)
+
+.$(C).$(OBJ):
+ $(C_COMPILER) -c $(C_FLAGS) $<
+
+.$(CPP).$(OBJ):
+ $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $<
+
+WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB_OBJS = WindowsAudioInputDevice_common.$(OBJ) WindowsAudioInputDevice_noMixer.$(OBJ)
+WINDOWSAUDIOINPUTDEVICE_MIXER_LIB_OBJS = WindowsAudioInputDevice_common.$(OBJ) WindowsAudioInputDevice_mixer.$(OBJ)
+
+WindowsAudioInputDevice_common.$(CPP): WindowsAudioInputDevice_common.hh
+WindowsAudioInputDevice_noMixer.$(CPP): WindowsAudioInputDevice_noMixer.hh
+WindowsAudioInputDevice_noMixer.hh: WindowsAudioInputDevice_common.hh
+WindowsAudioInputDevice_mixer.$(CPP): WindowsAudioInputDevice_mixer.hh
+WindowsAudioInputDevice_mixer.hh: WindowsAudioInputDevice_common.hh
+
+$(WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB): $(WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB_OBJS) \
+ $(PLATFORM_SPECIFIC_LIB_OBJS)
+ $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) \
+ $(WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB_OBJS)
+$(WINDOWSAUDIOINPUTDEVICE_MIXER_LIB): $(WINDOWSAUDIOINPUTDEVICE_MIXER_LIB_OBJS) \
+ $(PLATFORM_SPECIFIC_LIB_OBJS)
+ $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) \
+ $(WINDOWSAUDIOINPUTDEVICE_MIXER_LIB_OBJS)
+
+USAGE_ENVIRONMENT_DIR = ../UsageEnvironment
+USAGE_ENVIRONMENT_LIB = $(USAGE_ENVIRONMENT_DIR)/libUsageEnvironment.$(LIB_SUFFIX)
+BASIC_USAGE_ENVIRONMENT_DIR = ../BasicUsageEnvironment
+BASIC_USAGE_ENVIRONMENT_LIB = $(BASIC_USAGE_ENVIRONMENT_DIR)/libBasicUsageEnvironment.$(LIB_SUFFIX)
+LIVEMEDIA_DIR = ../liveMedia
+LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(LIB_SUFFIX)
+GROUPSOCK_DIR = ../groupsock
+GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(LIB_SUFFIX)
+LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \
+ $(USAGE_ENVIRONMENT_LIB) $(BASIC_USAGE_ENVIRONMENT_LIB)
+LOCAL_LIBS_NOMIXER = $(WINDOWSAUDIOINPUTDEVICE_NOMIXER_LIB) $(LOCAL_LIBS)
+LOCAL_LIBS_MIXER = $(WINDOWSAUDIOINPUTDEVICE_MIXER_LIB) $(LOCAL_LIBS)
+MULTIMEDIA_LIBS = winmm.lib
+LIBS_NOMIXER = $(LOCAL_LIBS_NOMIXER) $(LIBS_FOR_CONSOLE_APPLICATION) $(MULTIMEDIA_LIBS)
+LIBS_MIXER = $(LOCAL_LIBS_MIXER) $(LIBS_FOR_CONSOLE_APPLICATION) $(MULTIMEDIA_LIBS)
+
+SHOW_AUDIO_INPUT_PORTS_OBJS = showAudioInputPorts.$(OBJ)
+
+showAudioInputPorts_noMixer$(EXE): $(SHOW_AUDIO_INPUT_PORTS_OBJS) $(LOCAL_LIBS_NOMIXER)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(SHOW_AUDIO_INPUT_PORTS_OBJS) $(LIBS_NOMIXER)
+showAudioInputPorts_mixer$(EXE): $(SHOW_AUDIO_INPUT_PORTS_OBJS) $(LOCAL_LIBS_MIXER)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(SHOW_AUDIO_INPUT_PORTS_OBJS) $(LIBS_MIXER)
+
+clean:
+ -rm -rf *.$(OBJ) $(ALL) tcl2array$(EXE) core *.core *~
+ -rm -rf $(TCL_EMBEDDED_CPLUSPLUS_FILES) $(TK_EMBEDDED_CPLUSPLUS_FILES) $(MISC_EMBEDDED_CPLUSPLUS_FILES)
+
+##### Any additional, platform-specific rules come here:
diff --git a/WindowsAudioInputDevice/WindowsAudioInputDevice_common.cpp b/WindowsAudioInputDevice/WindowsAudioInputDevice_common.cpp
new file mode 100644
index 0000000..57ab203
--- /dev/null
+++ b/WindowsAudioInputDevice/WindowsAudioInputDevice_common.cpp
@@ -0,0 +1,325 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 2001-2004 Live Networks, Inc. All rights reserved.
+// Windows implementation of a generic audio input device
+// Base class for both library versions:
+// One that uses Windows' built-in software mixer; another that doesn't.
+// Implementation
+
+#include "WindowsAudioInputDevice_common.hh"
+#include <GroupsockHelper.hh>
+
+////////// WindowsAudioInputDevice_common implementation //////////
+
+unsigned WindowsAudioInputDevice_common::_bitsPerSample = 16;
+
+WindowsAudioInputDevice_common
+::WindowsAudioInputDevice_common(UsageEnvironment& env, int inputPortNumber,
+ unsigned char bitsPerSample,
+ unsigned char numChannels,
+ unsigned samplingFrequency,
+ unsigned granularityInMS)
+ : AudioInputDevice(env, bitsPerSample, numChannels, samplingFrequency, granularityInMS),
+ fCurPortIndex(-1), fHaveStarted(False) {
+ _bitsPerSample = bitsPerSample;
+}
+
+WindowsAudioInputDevice_common::~WindowsAudioInputDevice_common() {
+}
+
+Boolean WindowsAudioInputDevice_common::initialSetInputPort(int portIndex) {
+ if (!setInputPort(portIndex)) {
+ char errMsgPrefix[100];
+ sprintf(errMsgPrefix, "Failed to set audio input port number to %d: ", portIndex);
+ char* errMsgSuffix = strDup(envir().getResultMsg());
+ envir().setResultMsg(errMsgPrefix, errMsgSuffix);
+ delete[] errMsgSuffix;
+ return False;
+ } else {
+ return True;
+ }
+}
+
+void WindowsAudioInputDevice_common::doGetNextFrame() {
+ if (!fHaveStarted) {
+ // Before reading the first audio data, flush any existing data:
+ while (readHead != NULL) releaseHeadBuffer();
+ fHaveStarted = True;
+ }
+ fTotalPollingDelay = 0;
+ audioReadyPoller1();
+}
+
+void WindowsAudioInputDevice_common::doStopGettingFrames() {
+ // Turn off the audio poller:
+ envir().taskScheduler().unscheduleDelayedTask(nextTask());
+}
+
+double WindowsAudioInputDevice_common::getAverageLevel() const {
+ // If the input audio queue is empty, return the previous level,
+ // otherwise use the input queue to recompute "averageLevel":
+ if (readHead != NULL) {
+ double levelTotal = 0.0;
+ unsigned totNumSamples = 0;
+ WAVEHDR* curHdr = readHead;
+ while (1) {
+ short* samplePtr = (short*)(curHdr->lpData);
+ unsigned numSamples = blockSize/2;
+ totNumSamples += numSamples;
+
+ while (numSamples-- > 0) {
+ short sample = *samplePtr++;
+ if (sample < 0) sample = -sample;
+ levelTotal += (unsigned short)sample;
+ }
+
+ if (curHdr == readTail) break;
+ curHdr = curHdr->lpNext;
+ }
+ averageLevel = levelTotal/(totNumSamples*(double)0x8000);
+ }
+ return averageLevel;
+}
+
+void WindowsAudioInputDevice_common::audioReadyPoller(void* clientData) {
+ WindowsAudioInputDevice_common* inputDevice = (WindowsAudioInputDevice_common*)clientData;
+ inputDevice->audioReadyPoller1();
+}
+
+void WindowsAudioInputDevice_common::audioReadyPoller1() {
+ nextTask() = NULL;
+ if (readHead != NULL) {
+ onceAudioIsReady();
+ } else {
+ unsigned const maxPollingDelay = (100 + fGranularityInMS)*1000;
+ if (fTotalPollingDelay > maxPollingDelay) {
+ // We've waited too long for the audio device - assume it's down:
+ handleClosure(this);
+ return;
+ }
+
+ // Try again after a short delay:
+ unsigned const uSecondsToDelay = fGranularityInMS*1000;
+ fTotalPollingDelay += uSecondsToDelay;
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecondsToDelay,
+ (TaskFunc*)audioReadyPoller, this);
+ }
+}
+
+void WindowsAudioInputDevice_common::onceAudioIsReady() {
+ fFrameSize = readFromBuffers(fTo, fMaxSize, fPresentationTime);
+ if (fFrameSize == 0) {
+ // The source is no longer readable
+ handleClosure(this);
+ return;
+ }
+ fDurationInMicroseconds = 1000000/fSamplingFrequency;
+
+ // Call our own 'after getting' function. Because we sometimes get here
+ // after returning from a delay, we can call this directly, without risking
+ // infinite recursion
+ afterGetting(this);
+}
+
+static void CALLBACK waveInCallback(HWAVEIN /*hwi*/, UINT uMsg,
+ DWORD /*dwInstance*/, DWORD dwParam1, DWORD /*dwParam2*/) {
+ switch (uMsg) {
+ case WIM_DATA:
+ WAVEHDR* hdr = (WAVEHDR*)dwParam1;
+ WindowsAudioInputDevice_common::waveInProc(hdr);
+ break;
+ }
+}
+
+Boolean WindowsAudioInputDevice_common::openWavInPort(int index, unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS) {
+ uSecsPerByte = (8*1e6)/(_bitsPerSample*numChannels*samplingFrequency);
+
+ // Configure the port, based on the specified parameters:
+ WAVEFORMATEX wfx;
+ wfx.wFormatTag = WAVE_FORMAT_PCM;
+ wfx.nChannels = numChannels;
+ wfx.nSamplesPerSec = samplingFrequency;
+ wfx.wBitsPerSample = _bitsPerSample;
+ wfx.nBlockAlign = (numChannels*_bitsPerSample)/8;
+ wfx.nAvgBytesPerSec = samplingFrequency*wfx.nBlockAlign;
+ wfx.cbSize = 0;
+
+ blockSize = (wfx.nAvgBytesPerSec*granularityInMS)/1000;
+
+ // Use a 10-second input buffer, to allow for CPU competition from video, etc.,
+ // and also for some audio cards that buffer as much as 5 seconds of audio.
+ unsigned const bufferSeconds = 10;
+ numBlocks = (bufferSeconds*1000)/granularityInMS;
+
+ if (!waveIn_open(index, wfx)) return False;
+
+ // Set this process's priority high. I'm not sure how much this is really needed,
+ // but the "rat" code does this:
+ SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);
+ return True;
+}
+
+Boolean WindowsAudioInputDevice_common::waveIn_open(unsigned uid, WAVEFORMATEX& wfx) {
+ if (shWaveIn != NULL) return True; // already open
+
+ do {
+ waveIn_reset();
+ if (waveInOpen(&shWaveIn, uid, &wfx,
+ (DWORD)waveInCallback, 0, CALLBACK_FUNCTION) != MMSYSERR_NOERROR) break;
+
+ // Allocate read buffers, and headers:
+ readData = new unsigned char[numBlocks*blockSize];
+ if (readData == NULL) break;
+
+ readHdrs = new WAVEHDR[numBlocks];
+ if (readHdrs == NULL) break;
+ readHead = readTail = NULL;
+
+ readTimes = new struct timeval[numBlocks];
+ if (readTimes == NULL) break;
+
+ // Initialize headers:
+ for (unsigned i = 0; i < numBlocks; ++i) {
+ readHdrs[i].lpData = (char*)&readData[i*blockSize];
+ readHdrs[i].dwBufferLength = blockSize;
+ readHdrs[i].dwFlags = 0;
+ if (waveInPrepareHeader(shWaveIn, &readHdrs[i], sizeof (WAVEHDR)) != MMSYSERR_NOERROR) break;
+ if (waveInAddBuffer(shWaveIn, &readHdrs[i], sizeof (WAVEHDR)) != MMSYSERR_NOERROR) break;
+ }
+
+ if (waveInStart(shWaveIn) != MMSYSERR_NOERROR) break;
+
+#ifdef UNICODE
+ hAudioReady = CreateEvent(NULL, TRUE, FALSE, L"waveIn Audio Ready");
+#else
+ hAudioReady = CreateEvent(NULL, TRUE, FALSE, "waveIn Audio Ready");
+#endif
+ return True;
+ } while (0);
+
+ waveIn_reset();
+ return False;
+}
+
+void WindowsAudioInputDevice_common::waveIn_close() {
+ if (shWaveIn == NULL) return; // already closed
+
+ waveInStop(shWaveIn);
+ waveInReset(shWaveIn);
+
+ for (unsigned i = 0; i < numBlocks; ++i) {
+ if (readHdrs[i].dwFlags & WHDR_PREPARED) {
+ waveInUnprepareHeader(shWaveIn, &readHdrs[i], sizeof (WAVEHDR));
+ }
+ }
+
+ waveInClose(shWaveIn);
+ waveIn_reset();
+}
+
+void WindowsAudioInputDevice_common::waveIn_reset() {
+ shWaveIn = NULL;
+
+ delete[] readData; readData = NULL;
+ bytesUsedAtReadHead = 0;
+
+ delete[] readHdrs; readHdrs = NULL;
+ readHead = readTail = NULL;
+
+ delete[] readTimes; readTimes = NULL;
+
+ hAudioReady = NULL;
+}
+
+unsigned WindowsAudioInputDevice_common::readFromBuffers(unsigned char* to, unsigned numBytesWanted, struct timeval& creationTime) {
+ // Begin by computing the creation time of (the first bytes of) this returned audio data:
+ if (readHead != NULL) {
+ int hdrIndex = readHead - readHdrs;
+ creationTime = readTimes[hdrIndex];
+
+ // Adjust this time to allow for any data that's already been read from this buffer:
+ if (bytesUsedAtReadHead > 0) {
+ creationTime.tv_usec += (unsigned)(uSecsPerByte*bytesUsedAtReadHead);
+ creationTime.tv_sec += creationTime.tv_usec/1000000;
+ creationTime.tv_usec %= 1000000;
+ }
+ }
+
+ // Then, read from each available buffer, until we have the data that we want:
+ unsigned numBytesRead = 0;
+ while (readHead != NULL && numBytesRead < numBytesWanted) {
+ unsigned thisRead = min(readHead->dwBytesRecorded - bytesUsedAtReadHead, numBytesWanted - numBytesRead);
+ memmove(&to[numBytesRead], &readHead->lpData[bytesUsedAtReadHead], thisRead);
+ numBytesRead += thisRead;
+ bytesUsedAtReadHead += thisRead;
+ if (bytesUsedAtReadHead == readHead->dwBytesRecorded) {
+ // We're finished with the block; give it back to the device:
+ releaseHeadBuffer();
+ }
+ }
+
+ return numBytesRead;
+}
+
+void WindowsAudioInputDevice_common::releaseHeadBuffer() {
+ WAVEHDR* toRelease = readHead;
+ if (readHead == NULL) return;
+
+ readHead = readHead->lpNext;
+ if (readHead == NULL) readTail = NULL;
+
+ toRelease->lpNext = NULL;
+ toRelease->dwBytesRecorded = 0;
+ toRelease->dwFlags &= ~WHDR_DONE;
+ waveInAddBuffer(shWaveIn, toRelease, sizeof (WAVEHDR));
+ bytesUsedAtReadHead = 0;
+}
+
+void WindowsAudioInputDevice_common::waveInProc(WAVEHDR* hdr) {
+ unsigned hdrIndex = hdr - readHdrs;
+
+ // Record the time that the data arrived:
+ int dontCare;
+ gettimeofday(&readTimes[hdrIndex], &dontCare);
+
+ // Add the block to the tail of the queue:
+ hdr->lpNext = NULL;
+ if (readTail != NULL) {
+ readTail->lpNext = hdr;
+ readTail = hdr;
+ } else {
+ readHead = readTail = hdr;
+ }
+ SetEvent(hAudioReady);
+}
+
+HWAVEIN WindowsAudioInputDevice_common::shWaveIn = NULL;
+
+unsigned WindowsAudioInputDevice_common::blockSize = 0;
+unsigned WindowsAudioInputDevice_common::numBlocks = 0;
+
+unsigned char* WindowsAudioInputDevice_common::readData = NULL;
+DWORD WindowsAudioInputDevice_common::bytesUsedAtReadHead = 0;
+double WindowsAudioInputDevice_common::uSecsPerByte = 0.0;
+double WindowsAudioInputDevice_common::averageLevel = 0.0;
+
+WAVEHDR* WindowsAudioInputDevice_common::readHdrs = NULL;
+WAVEHDR* WindowsAudioInputDevice_common::readHead = NULL;
+WAVEHDR* WindowsAudioInputDevice_common::readTail = NULL;
+
+struct timeval* WindowsAudioInputDevice_common::readTimes = NULL;
+
+HANDLE WindowsAudioInputDevice_common::hAudioReady = NULL;
diff --git a/WindowsAudioInputDevice/WindowsAudioInputDevice_common.hh b/WindowsAudioInputDevice/WindowsAudioInputDevice_common.hh
new file mode 100644
index 0000000..353b872
--- /dev/null
+++ b/WindowsAudioInputDevice/WindowsAudioInputDevice_common.hh
@@ -0,0 +1,82 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Windows implementation of a generic audio input device
+// Base class for both library versions:
+// One that uses Windows' built-in software mixer; another that doesn't.
+// C++ header
+
+#ifndef _WINDOWS_AUDIO_INPUT_DEVICE_COMMON_HH
+#define _WINDOWS_AUDIO_INPUT_DEVICE_COMMON_HH
+
+#ifndef _AUDIO_INPUT_DEVICE_HH
+#include "AudioInputDevice.hh"
+#endif
+
+class WindowsAudioInputDevice_common: public AudioInputDevice {
+public:
+ static Boolean openWavInPort(int index, unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS);
+ static void waveIn_close();
+ static void waveInProc(WAVEHDR* hdr); // Windows audio callback function
+
+protected:
+ WindowsAudioInputDevice_common(UsageEnvironment& env, int inputPortNumber,
+ unsigned char bitsPerSample, unsigned char numChannels,
+ unsigned samplingFrequency, unsigned granularityInMS);
+ // virtual base class
+
+ virtual ~WindowsAudioInputDevice_common();
+
+ Boolean initialSetInputPort(int portIndex);
+
+protected:
+ int fCurPortIndex;
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+ virtual double getAverageLevel() const;
+
+private:
+ static void audioReadyPoller(void* clientData);
+
+ void audioReadyPoller1();
+ void onceAudioIsReady();
+
+ // Audio input buffering:
+ static Boolean waveIn_open(unsigned uid, WAVEFORMATEX& wfx);
+ static void waveIn_reset(); // used to implement both of the above
+ static unsigned readFromBuffers(unsigned char* to, unsigned numBytesWanted, struct timeval& creationTime);
+ static void releaseHeadBuffer(); // from the input header queue
+
+private:
+ static unsigned _bitsPerSample;
+ static HWAVEIN shWaveIn;
+ static unsigned blockSize, numBlocks;
+ static unsigned char* readData; // buffer for incoming audio data
+ static DWORD bytesUsedAtReadHead; // number of bytes that have already been read at head
+ static double uSecsPerByte; // used to adjust the time for # bytes consumed since arrival
+ static double averageLevel;
+ static WAVEHDR *readHdrs, *readHead, *readTail; // input header queue
+ static struct timeval* readTimes;
+ static HANDLE hAudioReady; // audio ready event
+
+ Boolean fHaveStarted;
+ unsigned fTotalPollingDelay; // uSeconds
+};
+
+#endif
diff --git a/WindowsAudioInputDevice/WindowsAudioInputDevice_mixer.cpp b/WindowsAudioInputDevice/WindowsAudioInputDevice_mixer.cpp
new file mode 100644
index 0000000..4391844
--- /dev/null
+++ b/WindowsAudioInputDevice/WindowsAudioInputDevice_mixer.cpp
@@ -0,0 +1,496 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 2001-2004 Live Networks, Inc. All rights reserved.
+// Windows implementation of a generic audio input device
+// This version uses Windows' built-in software mixer.
+// Implementation
+
+#include <WindowsAudioInputDevice_mixer.hh>
+
+////////// Mixer and AudioInputPort definition //////////
+
+class AudioInputPort {
+public:
+ int tag;
+ DWORD dwComponentType;
+ char name[MIXER_LONG_NAME_CHARS];
+};
+
+class Mixer {
+public:
+ Mixer();
+ virtual ~Mixer();
+
+ void open(unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS);
+ void open(); // open with default parameters
+ void getPortsInfo();
+ Boolean enableInputPort(unsigned portIndex, char const*& errReason, MMRESULT& errCode);
+ void close();
+
+ unsigned index;
+ HMIXER hMixer; // valid when open
+ DWORD dwRecLineID; // valid when open
+ unsigned numPorts;
+ AudioInputPort* ports;
+ char name[MAXPNAMELEN];
+};
+
+
+////////// AudioInputDevice (remaining) implementation //////////
+
+AudioInputDevice*
+AudioInputDevice::createNew(UsageEnvironment& env, int inputPortNumber,
+ unsigned char bitsPerSample,
+ unsigned char numChannels,
+ unsigned samplingFrequency,
+ unsigned granularityInMS) {
+ Boolean success;
+ WindowsAudioInputDevice* newSource
+ = new WindowsAudioInputDevice(env, inputPortNumber,
+ bitsPerSample, numChannels,
+ samplingFrequency, granularityInMS,
+ success);
+ if (!success) {delete newSource; newSource = NULL;}
+
+ return newSource;
+}
+
+AudioPortNames* AudioInputDevice::getPortNames() {
+ WindowsAudioInputDevice::initializeIfNecessary();
+
+ AudioPortNames* portNames = new AudioPortNames;
+ portNames->numPorts = WindowsAudioInputDevice::numInputPortsTotal;
+ portNames->portName = new char*[WindowsAudioInputDevice::numInputPortsTotal];
+
+ // If there's more than one mixer, print only the port name.
+ // If there's two or more mixers, also include the mixer name
+ // (to disambiguate port names that may be the same name in different mixers)
+ char portNameBuffer[2*MAXPNAMELEN+10/*slop*/];
+ char mixerNameBuffer[MAXPNAMELEN];
+ char const* portNameFmt;
+ if (WindowsAudioInputDevice::numMixers <= 1) {
+ portNameFmt = "%s";
+ } else {
+ portNameFmt = "%s (%s)";
+ }
+
+ unsigned curPortNum = 0;
+ for (unsigned i = 0; i < WindowsAudioInputDevice::numMixers; ++i) {
+ Mixer& mixer = WindowsAudioInputDevice::ourMixers[i];
+
+ if (WindowsAudioInputDevice::numMixers <= 1) {
+ mixerNameBuffer[0] = '\0';
+ } else {
+ strncpy(mixerNameBuffer, mixer.name, sizeof mixerNameBuffer);
+#if 0
+ // Hack: Simplify the mixer name, by truncating after the first space character:
+ for (int k = 0; k < sizeof mixerNameBuffer && mixerNameBuffer[k] != '\0'; ++k) {
+ if (mixerNameBuffer[k] == ' ') {
+ mixerNameBuffer[k] = '\0';
+ break;
+ }
+ }
+#endif
+ }
+
+ for (unsigned j = 0; j < mixer.numPorts; ++j) {
+ sprintf(portNameBuffer, portNameFmt, mixer.ports[j].name, mixerNameBuffer);
+ portNames->portName[curPortNum++] = strDup(portNameBuffer);
+ }
+ }
+
+ return portNames;
+}
+
+
+////////// WindowsAudioInputDevice implementation //////////
+
+WindowsAudioInputDevice
+::WindowsAudioInputDevice(UsageEnvironment& env, int inputPortNumber,
+ unsigned char bitsPerSample,
+ unsigned char numChannels,
+ unsigned samplingFrequency,
+ unsigned granularityInMS,
+ Boolean& success)
+ : WindowsAudioInputDevice_common(env, inputPortNumber,
+ bitsPerSample, numChannels, samplingFrequency, granularityInMS),
+ fCurMixerId(-1) {
+ success = initialSetInputPort(inputPortNumber);
+}
+
+WindowsAudioInputDevice::~WindowsAudioInputDevice() {
+ if (fCurMixerId >= 0) ourMixers[fCurMixerId].close();
+
+ delete[] ourMixers; ourMixers = NULL;
+ numMixers = numInputPortsTotal = 0;
+}
+
+void WindowsAudioInputDevice::initializeIfNecessary() {
+ if (ourMixers != NULL) return; // we've already been initialized
+ numMixers = mixerGetNumDevs();
+ ourMixers = new Mixer[numMixers];
+
+ // Initialize each mixer:
+ numInputPortsTotal = 0;
+ for (unsigned i = 0; i < numMixers; ++i) {
+ Mixer& mixer = ourMixers[i];
+ mixer.index = i;
+ mixer.open();
+ if (mixer.hMixer != NULL) {
+ // This device has a valid mixer. Get information about its ports:
+ mixer.getPortsInfo();
+ mixer.close();
+
+ if (mixer.numPorts == 0) continue;
+
+ numInputPortsTotal += mixer.numPorts;
+ } else {
+ mixer.ports = NULL;
+ mixer.numPorts = 0;
+ }
+ }
+}
+
+Boolean WindowsAudioInputDevice::setInputPort(int portIndex) {
+ initializeIfNecessary();
+
+ if (portIndex < 0 || portIndex >= (int)numInputPortsTotal) { // bad index
+ envir().setResultMsg("Bad input port index\n");
+ return False;
+ }
+
+ // Find the mixer and port that corresponds to "portIndex":
+ int newMixerId, portWithinMixer, portIndexCount = 0;
+ for (newMixerId = 0; newMixerId < (int)numMixers; ++newMixerId) {
+ int prevPortIndexCount = portIndexCount;
+ portIndexCount += ourMixers[newMixerId].numPorts;
+ if (portIndexCount > portIndex) { // it's with this mixer
+ portWithinMixer = portIndex - prevPortIndexCount;
+ break;
+ }
+ }
+
+ // Check that this mixer is allowed:
+ if (allowedDeviceNames != NULL) {
+ int i;
+ for (i = 0; allowedDeviceNames[i] != NULL; ++i) {
+ if (strncmp(ourMixers[newMixerId].name, allowedDeviceNames[i],
+ strlen(allowedDeviceNames[i])) == 0) {
+ // The allowed device name is a prefix of this mixer's name
+ break; // this mixer is allowed
+ }
+ }
+ if (allowedDeviceNames[i] == NULL) { // this mixer is not on the allowed list
+ envir().setResultMsg("Access to this audio device is not allowed\n");
+ return False;
+ }
+ }
+
+ if (newMixerId != fCurMixerId) {
+ // The mixer has changed, so close the old one and open the new one:
+ if (fCurMixerId >= 0) ourMixers[fCurMixerId].close();
+ fCurMixerId = newMixerId;
+ ourMixers[fCurMixerId].open(fNumChannels, fSamplingFrequency, fGranularityInMS);
+ }
+ if (portIndex != fCurPortIndex) {
+ // Change the input port:
+ fCurPortIndex = portIndex;
+ char const* errReason;
+ MMRESULT errCode;
+ if (!ourMixers[newMixerId].enableInputPort(portWithinMixer, errReason, errCode)) {
+ char resultMsg[100];
+ sprintf(resultMsg, "Failed to enable input port: %s failed (0x%08x)\n", errReason, errCode);
+ envir().setResultMsg(resultMsg);
+ return False;
+ }
+ // Later, may also need to transfer 'gain' to new port #####
+ }
+ return True;
+}
+
+unsigned WindowsAudioInputDevice::numMixers = 0;
+
+Mixer* WindowsAudioInputDevice::ourMixers = NULL;
+
+unsigned WindowsAudioInputDevice::numInputPortsTotal = 0;
+
+
+////////// Mixer and AudioInputPort implementation //////////
+
+Mixer::Mixer()
+ : hMixer(NULL), dwRecLineID(0), numPorts(0), ports(NULL) {
+}
+
+Mixer::~Mixer() {
+ delete[] ports;
+}
+
+void Mixer::open(unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS) {
+ HMIXER newHMixer = NULL;
+ do {
+ MIXERCAPS mc;
+ if (mixerGetDevCaps(index, &mc, sizeof mc) != MMSYSERR_NOERROR) break;
+
+ #ifdef UNICODE
+ // Copy the mixer name:
+ wcstombs(name, mc.szPname, MAXPNAMELEN);
+ #else
+ strncpy(name, mc.szPname, MAXPNAMELEN);
+ #endif
+
+ // Find the correct line for this mixer:
+ unsigned i, uWavIn;
+ unsigned nWavIn = waveInGetNumDevs();
+ for (i = 0; i < nWavIn; ++i) {
+ WAVEINCAPS wic;
+ if (waveInGetDevCaps(i, &wic, sizeof wic) != MMSYSERR_NOERROR) continue;
+
+ MIXERLINE ml;
+ ml.cbStruct = sizeof ml;
+ ml.Target.dwType = MIXERLINE_TARGETTYPE_WAVEIN;
+
+ #ifdef UNICODE
+ wcsncpy(ml.Target.szPname, wic.szPname, MAXPNAMELEN);
+ #else
+ strncpy(ml.Target.szPname, wic.szPname, MAXPNAMELEN);
+ #endif
+
+ ml.Target.vDriverVersion = wic.vDriverVersion;
+ ml.Target.wMid = wic.wMid;
+ ml.Target.wPid = wic.wPid;
+
+ if (mixerGetLineInfo((HMIXEROBJ)index, &ml, MIXER_GETLINEINFOF_TARGETTYPE/*|MIXER_OBJECTF_MIXER*/) == MMSYSERR_NOERROR) {
+ // this is the right line
+ uWavIn = i;
+ dwRecLineID = ml.dwLineID;
+ break;
+ }
+ }
+ if (i >= nWavIn) break; // error: we couldn't find the right line
+
+ if (mixerOpen(&newHMixer, index, (unsigned long)NULL, (unsigned long)NULL, MIXER_OBJECTF_MIXER) != MMSYSERR_NOERROR) break;
+ if (newHMixer == NULL) break;
+
+ // Sanity check: re-call "mixerGetDevCaps()" using the mixer device handle:
+ if (mixerGetDevCaps((UINT)newHMixer, &mc, sizeof mc) != MMSYSERR_NOERROR) break;
+ if (mc.cDestinations < 1) break; // error: this mixer has no destinations
+
+ if (!WindowsAudioInputDevice_common::openWavInPort(uWavIn, numChannels, samplingFrequency, granularityInMS)) break;
+
+ hMixer = newHMixer;
+ return;
+ } while (0);
+
+ // An error occurred:
+ close();
+}
+
+void Mixer::open() {
+ open(1, 8000, 20);
+}
+
+void Mixer::getPortsInfo() {
+ MIXERCAPS mc;
+ mixerGetDevCaps((UINT)hMixer, &mc, sizeof mc);
+
+ MIXERLINE mlt;
+ unsigned i;
+ for (i = 0; i < mc.cDestinations; ++i) {
+ memset(&mlt, 0, sizeof mlt);
+ mlt.cbStruct = sizeof mlt;
+ mlt.dwDestination = i;
+ if (mixerGetLineInfo((HMIXEROBJ)hMixer, &mlt, MIXER_GETLINEINFOF_DESTINATION) != MMSYSERR_NOERROR) continue;
+ if (mlt.dwLineID == dwRecLineID) break; // this is the destination we're interested in
+ }
+ ports = new AudioInputPort[mlt.cConnections];
+
+ numPorts = mlt.cConnections;
+ for (i = 0; i < numPorts; ++i) {
+ MIXERLINE mlc;
+ memcpy(&mlc, &mlt, sizeof mlc);
+ mlc.dwSource = i;
+ mixerGetLineInfo((HMIXEROBJ)hMixer, &mlc, MIXER_GETLINEINFOF_SOURCE/*|MIXER_OBJECTF_HMIXER*/);
+ ports[i].tag = mlc.dwLineID;
+ ports[i].dwComponentType = mlc.dwComponentType;
+#ifdef UNICODE
+ wcstombs(ports[i].name, mlc.szName, MIXER_LONG_NAME_CHARS);
+#else
+ strncpy(ports[i].name, mlc.szName, MIXER_LONG_NAME_CHARS);
+#endif
+ }
+
+ // Make the microphone the first port in the list:
+ for (i = 1; i < numPorts; ++i) {
+#ifdef OLD_MICROPHONE_TESTING_CODE
+ if (_strnicmp("mic", ports[i].name, 3) == 0 ||
+ _strnicmp("mik", ports[i].name, 3) == 0) {
+#else
+ if (ports[i].dwComponentType == MIXERLINE_COMPONENTTYPE_SRC_MICROPHONE) {
+#endif
+ AudioInputPort tmp = ports[0];
+ ports[0] = ports[i];
+ ports[i] = tmp;
+ }
+ }
+}
+
+Boolean Mixer::enableInputPort(unsigned portIndex, char const*& errReason, MMRESULT& errCode) {
+ errReason = NULL; // unless there's an error
+ AudioInputPort& port = ports[portIndex];
+
+ MIXERCONTROL mc;
+ mc.cMultipleItems = 1; // in case it doesn't get set below
+ MIXERLINECONTROLS mlc;
+#if 0 // the following doesn't seem to be needed, and can fail:
+ mlc.cbStruct = sizeof mlc;
+ mlc.pamxctrl = &mc;
+ mlc.cbmxctrl = sizeof (MIXERCONTROL);
+ mlc.dwLineID = port.tag;
+ mlc.dwControlType = MIXERCONTROL_CONTROLTYPE_VOLUME;
+ if ((errCode = mixerGetLineControls((HMIXEROBJ)hMixer, &mlc, MIXER_GETLINECONTROLSF_ONEBYTYPE/*|MIXER_OBJECTF_HMIXER*/)) != MMSYSERR_NOERROR) {
+ errReason = "mixerGetLineControls()";
+ return False;
+ }
+#endif
+
+ MIXERLINE ml;
+ memset(&ml, 0, sizeof (MIXERLINE));
+ ml.cbStruct = sizeof (MIXERLINE);
+ ml.dwLineID = port.tag;
+ if ((errCode = mixerGetLineInfo((HMIXEROBJ)hMixer, &ml, MIXER_GETLINEINFOF_LINEID)) != MMSYSERR_NOERROR) {
+ errReason = "mixerGetLineInfo()1";
+ return False;
+ }
+
+
+
+ #ifdef UNICODE
+ wchar_t portname[MIXER_LONG_NAME_CHARS+1];
+ wcsncpy(portname, ml.szName, MIXER_LONG_NAME_CHARS);
+ #else
+ char portname[MIXER_LONG_NAME_CHARS+1];
+ strncpy(portname, ml.szName, MIXER_LONG_NAME_CHARS);
+ #endif
+
+ memset(&ml, 0, sizeof (MIXERLINE));
+ ml.cbStruct = sizeof (MIXERLINE);
+ ml.dwLineID = dwRecLineID;
+ if ((errCode = mixerGetLineInfo((HMIXEROBJ)hMixer, &ml, MIXER_GETLINEINFOF_LINEID/*|MIXER_OBJECTF_HMIXER*/)) != MMSYSERR_NOERROR) {
+ errReason = "mixerGetLineInfo()2";
+ return False;
+ }
+
+ // Get Mixer/MUX control information (need control id to set and get control details)
+ mlc.cbStruct = sizeof mlc;
+ mlc.dwLineID = ml.dwLineID;
+ mlc.cControls = 1;
+ mc.cbStruct = sizeof mc; // Needed???#####
+ mc.dwControlID = 0xDEADBEEF; // For testing #####
+ mlc.pamxctrl = &mc;
+ mlc.cbmxctrl = sizeof mc;
+ mlc.dwControlType = MIXERCONTROL_CONTROLTYPE_MUX; // Single Select
+ if ((errCode = mixerGetLineControls((HMIXEROBJ)hMixer, &mlc, MIXER_GETLINECONTROLSF_ONEBYTYPE/*|MIXER_OBJECTF_HMIXER*/)) != MMSYSERR_NOERROR) {
+ mlc.dwControlType = MIXERCONTROL_CONTROLTYPE_MIXER; // Multiple Select
+ mixerGetLineControls((HMIXEROBJ)hMixer, &mlc, MIXER_GETLINECONTROLSF_ONEBYTYPE/*|MIXER_OBJECTF_HMIXER*/);
+ }
+
+ unsigned matchLine = 0;
+ if (mc.cMultipleItems > 1) {
+ // Before getting control, we need to know which line to grab.
+ // We figure this out by listing the lines, and comparing names:
+ MIXERCONTROLDETAILS mcd;
+ mcd.cbStruct = sizeof mcd;
+ mcd.cChannels = ml.cChannels;
+ mcd.cMultipleItems = mc.cMultipleItems;
+ MIXERCONTROLDETAILS_LISTTEXT* mcdlText = new MIXERCONTROLDETAILS_LISTTEXT[mc.cMultipleItems];
+ mcd.cbDetails = sizeof (MIXERCONTROLDETAILS_LISTTEXT);
+ mcd.paDetails = mcdlText;
+
+ if (mc.dwControlID != 0xDEADBEEF) { // we know the control id for real
+ mcd.dwControlID = mc.dwControlID;
+ if ((errCode = mixerGetControlDetails((HMIXEROBJ)hMixer, &mcd, MIXER_GETCONTROLDETAILSF_LISTTEXT/*|MIXER_OBJECTF_HMIXER*/)) != MMSYSERR_NOERROR) {
+ delete[] mcdlText;
+ errReason = "mixerGetControlDetails()1";
+ return False;
+ }
+ } else {
+ // Hack: We couldn't find a MUX or MIXER control, so try to guess the control id:
+ for (mc.dwControlID = 0; mc.dwControlID < 32; ++mc.dwControlID) {
+ mcd.dwControlID = mc.dwControlID;
+ if ((errCode = mixerGetControlDetails((HMIXEROBJ)hMixer, &mcd, MIXER_GETCONTROLDETAILSF_LISTTEXT/*|MIXER_OBJECTF_HMIXER*/)) == MMSYSERR_NOERROR) break;
+ }
+ if (mc.dwControlID == 32) { // unable to guess mux/mixer control id
+ delete[] mcdlText;
+ errReason = "mixerGetControlDetails()2";
+ return False;
+ }
+ }
+
+ #ifdef UNICODE
+ for (unsigned i = 0; i < mcd.cMultipleItems; ++i) {
+ if (wcscmp(mcdlText[i].szName, portname) == 0) {
+ matchLine = i;
+ break;
+ }
+ }
+ #else
+ for (unsigned i = 0; i < mcd.cMultipleItems; ++i) {
+ if (strcmp(mcdlText[i].szName, portname) == 0) {
+ matchLine = i;
+ break;
+ }
+ }
+ #endif
+
+ delete[] mcdlText;
+ }
+
+ // Now get control itself:
+ MIXERCONTROLDETAILS mcd;
+ mcd.cbStruct = sizeof mcd;
+ mcd.dwControlID = mc.dwControlID;
+ mcd.cChannels = ml.cChannels;
+ mcd.cMultipleItems = mc.cMultipleItems;
+ MIXERCONTROLDETAILS_BOOLEAN* mcdbState = new MIXERCONTROLDETAILS_BOOLEAN[mc.cMultipleItems];
+ mcd.paDetails = mcdbState;
+ mcd.cbDetails = sizeof (MIXERCONTROLDETAILS_BOOLEAN);
+
+ if ((errCode = mixerGetControlDetails((HMIXEROBJ)hMixer, &mcd, MIXER_GETCONTROLDETAILSF_VALUE/*|MIXER_OBJECTF_HMIXER*/)) != MMSYSERR_NOERROR) {
+ delete[] mcdbState;
+ errReason = "mixerGetControlDetails()3";
+ return False;
+ }
+
+ for (unsigned j = 0; j < mcd.cMultipleItems; ++j) {
+ mcdbState[j].fValue = (j == matchLine);
+ }
+
+ if ((errCode = mixerSetControlDetails((HMIXEROBJ)hMixer, &mcd, MIXER_OBJECTF_HMIXER)) != MMSYSERR_NOERROR) {
+ delete[] mcdbState;
+ errReason = "mixerSetControlDetails()";
+ return False;
+ }
+ delete[] mcdbState;
+
+ return True;
+}
+
+
+void Mixer::close() {
+ WindowsAudioInputDevice_common::waveIn_close();
+ if (hMixer != NULL) mixerClose(hMixer);
+ hMixer = NULL; dwRecLineID = 0;
+}
diff --git a/WindowsAudioInputDevice/WindowsAudioInputDevice_mixer.hh b/WindowsAudioInputDevice/WindowsAudioInputDevice_mixer.hh
new file mode 100644
index 0000000..7ae4355
--- /dev/null
+++ b/WindowsAudioInputDevice/WindowsAudioInputDevice_mixer.hh
@@ -0,0 +1,57 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Windows implementation of a generic audio input device
+// This version uses Windows' built-in software mixer.
+// C++ header
+//
+// To use this, call "AudioInputDevice::createNew()".
+// You can also call "AudioInputDevice::getPortNames()" to get a list
+// of port names.
+
+#ifndef _WINDOWS_AUDIO_INPUT_DEVICE_MIXER_HH
+#define _WINDOWS_AUDIO_INPUT_DEVICE_MIXER_HH
+
+#ifndef _WINDOWS_AUDIO_INPUT_DEVICE_COMMON_HH
+#include "WindowsAudioInputDevice_common.hh"
+#endif
+
+class WindowsAudioInputDevice: public WindowsAudioInputDevice_common {
+private:
+ friend class AudioInputDevice;
+ WindowsAudioInputDevice(UsageEnvironment& env, int inputPortNumber,
+ unsigned char bitsPerSample, unsigned char numChannels,
+ unsigned samplingFrequency, unsigned granularityInMS,
+ Boolean& success);
+ // called only by createNew()
+
+ virtual ~WindowsAudioInputDevice();
+
+ static void initializeIfNecessary();
+
+private:
+ // redefined virtual functions:
+ virtual Boolean setInputPort(int portIndex);
+
+private:
+ static unsigned numMixers;
+ static class Mixer* ourMixers;
+ static unsigned numInputPortsTotal;
+
+ int fCurMixerId;
+};
+
+#endif
diff --git a/WindowsAudioInputDevice/WindowsAudioInputDevice_noMixer.cpp b/WindowsAudioInputDevice/WindowsAudioInputDevice_noMixer.cpp
new file mode 100644
index 0000000..f9d9058
--- /dev/null
+++ b/WindowsAudioInputDevice/WindowsAudioInputDevice_noMixer.cpp
@@ -0,0 +1,182 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 2001-2004 Live Networks, Inc. All rights reserved.
+// Windows implementation of a generic audio input device
+// This version does not use Windows' built-in software mixer.
+// Implementation
+
+#include <WindowsAudioInputDevice_noMixer.hh>
+
+////////// AudioInputPort definition //////////
+
+class AudioInputPort {
+public:
+ void open(unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS);
+ void open(); // open with default parameters
+ void close();
+
+public:
+ int index;
+ char name[MAXPNAMELEN];
+};
+
+
+////////// AudioInputDevice (remaining) implementation //////////
+
+AudioInputDevice*
+AudioInputDevice::createNew(UsageEnvironment& env, int inputPortNumber,
+ unsigned char bitsPerSample,
+ unsigned char numChannels,
+ unsigned samplingFrequency,
+ unsigned granularityInMS) {
+ Boolean success;
+ WindowsAudioInputDevice* newSource
+ = new WindowsAudioInputDevice(env, inputPortNumber,
+ bitsPerSample, numChannels,
+ samplingFrequency, granularityInMS,
+ success);
+ if (!success) {delete newSource; newSource = NULL;}
+
+ return newSource;
+}
+
+AudioPortNames* AudioInputDevice::getPortNames() {
+ WindowsAudioInputDevice::initializeIfNecessary();
+
+ AudioPortNames* portNames = new AudioPortNames;
+ portNames->numPorts = WindowsAudioInputDevice::numAudioInputPorts;
+ portNames->portName = new char*[WindowsAudioInputDevice::numAudioInputPorts];
+
+ for (unsigned i = 0; i < WindowsAudioInputDevice::numAudioInputPorts; ++i) {
+ AudioInputPort& audioInputPort = WindowsAudioInputDevice::ourAudioInputPorts[i];
+
+ portNames->portName[i] = strDup(audioInputPort.name);
+ }
+
+ return portNames;
+}
+
+
+////////// WindowsAudioInputDevice implementation //////////
+
+WindowsAudioInputDevice
+::WindowsAudioInputDevice(UsageEnvironment& env, int inputPortNumber,
+ unsigned char bitsPerSample,
+ unsigned char numChannels,
+ unsigned samplingFrequency,
+ unsigned granularityInMS,
+ Boolean& success)
+ : WindowsAudioInputDevice_common(env, inputPortNumber,
+ bitsPerSample, numChannels, samplingFrequency, granularityInMS) {
+ success = initialSetInputPort(inputPortNumber);
+}
+
+WindowsAudioInputDevice::~WindowsAudioInputDevice() {
+ if (fCurPortIndex >= 0) ourAudioInputPorts[fCurPortIndex].close();
+
+ delete[] ourAudioInputPorts; ourAudioInputPorts = NULL;
+ numAudioInputPorts = 0;
+}
+
+void WindowsAudioInputDevice::initializeIfNecessary() {
+ if (ourAudioInputPorts != NULL) return; // we've already been initialized
+ numAudioInputPorts = waveInGetNumDevs();
+ ourAudioInputPorts = new AudioInputPort[numAudioInputPorts];
+
+ // Initialize each audio input port
+ for (unsigned i = 0; i < numAudioInputPorts; ++i) {
+ AudioInputPort& port = ourAudioInputPorts[i];
+ port.index = i;
+ port.open(); // to set the port name
+ port.close();
+ }
+}
+
+Boolean WindowsAudioInputDevice::setInputPort(int portIndex) {
+ initializeIfNecessary();
+
+ if (portIndex < 0 || portIndex >= (int)numAudioInputPorts) { // bad index
+ envir().setResultMsg("Bad input port index\n");
+ return False;
+ }
+
+ // Check that this port is allowed:
+ if (allowedDeviceNames != NULL) {
+ int i;
+ for (i = 0; allowedDeviceNames[i] != NULL; ++i) {
+ if (strncmp(ourAudioInputPorts[portIndex].name, allowedDeviceNames[i],
+ strlen(allowedDeviceNames[i])) == 0) {
+ // The allowed device name is a prefix of this port's name
+ break; // this port is allowed
+ }
+ }
+ if (allowedDeviceNames[i] == NULL) { // this port is not on the allowed list
+ envir().setResultMsg("Access to this audio device is not allowed\n");
+ return False;
+ }
+ }
+
+ if (portIndex != fCurPortIndex) {
+ // The port has changed, so close the old one and open the new one:
+ if (fCurPortIndex >= 0) ourAudioInputPorts[fCurPortIndex].close();;
+ fCurPortIndex = portIndex;
+ ourAudioInputPorts[fCurPortIndex].open(fNumChannels, fSamplingFrequency, fGranularityInMS);
+ }
+ fCurPortIndex = portIndex;
+ return True;
+}
+
+unsigned WindowsAudioInputDevice::numAudioInputPorts = 0;
+
+AudioInputPort* WindowsAudioInputDevice::ourAudioInputPorts = NULL;
+
+
+////////// AudioInputPort implementation //////////
+
+void AudioInputPort::open(unsigned numChannels, unsigned samplingFrequency, unsigned granularityInMS) {
+ do {
+ // Get the port name:
+ WAVEINCAPS wic;
+ if (waveInGetDevCaps(index, &wic, sizeof wic) != MMSYSERR_NOERROR) {
+ name[0] = '\0';
+ break;
+ }
+
+ #ifdef UNICODE
+ // Copy the mixer name:
+ wcstombs(name, wic.szPname, MAXPNAMELEN);
+ #else
+ strncpy(name, wic.szPname, MAXPNAMELEN);
+ #endif
+
+ if (!WindowsAudioInputDevice_common::openWavInPort(index, numChannels, samplingFrequency, granularityInMS)) break;
+
+ return;
+ } while (0);
+
+ // An error occurred:
+ close();
+}
+
+void AudioInputPort::open() {
+ open(1, 8000, 20);
+}
+
+void AudioInputPort::close() {
+ WindowsAudioInputDevice_common::waveIn_close();
+}
+
+
diff --git a/WindowsAudioInputDevice/WindowsAudioInputDevice_noMixer.hh b/WindowsAudioInputDevice/WindowsAudioInputDevice_noMixer.hh
new file mode 100644
index 0000000..22135d6
--- /dev/null
+++ b/WindowsAudioInputDevice/WindowsAudioInputDevice_noMixer.hh
@@ -0,0 +1,54 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Windows implementation of a generic audio input device
+// This version does not use Windows' built-in software mixer.
+// C++ header
+//
+// To use this, call "AudioInputDevice::createNew()".
+// You can also call "AudioInputDevice::getPortNames()" to get a list
+// of port names.
+
+#ifndef _WINDOWS_AUDIO_INPUT_DEVICE_NOMIXER_HH
+#define _WINDOWS_AUDIO_INPUT_DEVICE_NOMIXER_HH
+
+#ifndef _WINDOWS_AUDIO_INPUT_DEVICE_COMMON_HH
+#include "WindowsAudioInputDevice_common.hh"
+#endif
+
+class WindowsAudioInputDevice: public WindowsAudioInputDevice_common {
+private:
+ friend class AudioInputDevice;
+ WindowsAudioInputDevice(UsageEnvironment& env, int inputPortNumber,
+ unsigned char bitsPerSample, unsigned char numChannels,
+ unsigned samplingFrequency, unsigned granularityInMS,
+ Boolean& success);
+ // called only by createNew()
+
+ virtual ~WindowsAudioInputDevice();
+
+ static void initializeIfNecessary();
+
+private:
+ // redefined virtual functions:
+ virtual Boolean setInputPort(int portIndex);
+
+private:
+ static unsigned numAudioInputPorts;
+ static class AudioInputPort* ourAudioInputPorts;
+};
+
+#endif
diff --git a/WindowsAudioInputDevice/showAudioInputPorts.cpp b/WindowsAudioInputDevice/showAudioInputPorts.cpp
new file mode 100644
index 0000000..37bd438
--- /dev/null
+++ b/WindowsAudioInputDevice/showAudioInputPorts.cpp
@@ -0,0 +1,35 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A program that prints out this computer's audio input ports
+
+#include "AudioInputDevice.hh"
+#include <stdio.h>
+
+int main(int argc, char** argv) {
+ AudioPortNames* portNames = AudioInputDevice::getPortNames();
+ if (portNames == NULL) {
+ fprintf(stderr, "AudioInputDevice::getPortNames() failed!\n");
+ exit(1);
+ }
+
+ printf("%d available audio input ports:\n", portNames->numPorts);
+ for (unsigned i = 0; i < portNames->numPorts; ++i) {
+ printf("%d\t%s\n", i, portNames->portName[i]);
+ }
+
+ return 0;
+}
diff --git a/config.armeb-uclibc b/config.armeb-uclibc
new file mode 100644
index 0000000..a88289f
--- /dev/null
+++ b/config.armeb-uclibc
@@ -0,0 +1,19 @@
+CROSS_COMPILE= armeb-linux-uclibc-
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -Os -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D
+LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
+C = c
+C_COMPILER = $(CROSS_COMPILE)gcc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = $(CROSS_COMPILE)g++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
+OBJ = o
+LINK = $(CROSS_COMPILE)gcc -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = $(CROSS_COMPILE)ar cr
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.armlinux b/config.armlinux
new file mode 100644
index 0000000..5a7f8a8
--- /dev/null
+++ b/config.armlinux
@@ -0,0 +1,18 @@
+CROSS_COMPILE?= arm-elf-
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -O2 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
+C = c
+C_COMPILER = $(CROSS_COMPILE)gcc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = $(CROSS_COMPILE)g++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
+OBJ = o
+LINK = $(CROSS_COMPILE)g++ -o
+LINK_OPTS =
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = $(CROSS_COMPILE)ar cr
+LIBRARY_LINK_OPTS = $(LINK_OPTS)
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.avr32-linux b/config.avr32-linux
new file mode 100644
index 0000000..db89d9b
--- /dev/null
+++ b/config.avr32-linux
@@ -0,0 +1,15 @@
+CROSS_COMPILE= avr32-linux-uclibc-
+COMPILE_OPTS = -I/usr/local/include -Os $(INCLUDES) -msoft-float -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 C = c
+C_COMPILER = $(CROSS_COMPILE)gcc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = $(CROSS_COMPILE)c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -fuse-cxa-atexit -DBSD=1 OBJ = o
+LINK = $(CROSS_COMPILE)c++ -o
+LINK_OPTS =
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = $(CROSS_COMPILE)ar cr LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.bfin-linux-uclibc b/config.bfin-linux-uclibc
new file mode 100644
index 0000000..a316d4c
--- /dev/null
+++ b/config.bfin-linux-uclibc
@@ -0,0 +1,18 @@
+CROSS_COMPILER = bfin-linux-uclibc-
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -DUCLINUX -D_FILE_OFFSET_BITS=64
+C = c
+C_COMPILER = $(CROSS_COMPILER)gcc
+C_FLAGS = $(COMPILE_OPTS) -Wall
+CPP = cpp
+CPLUSPLUS_COMPILER = $(CROSS_COMPILER)g++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
+OBJ = o
+LINK = $(CROSS_COMPILER)g++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = $(CROSS_COMPILER)ar cr
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.bfin-uclinux b/config.bfin-uclinux
new file mode 100644
index 0000000..9424110
--- /dev/null
+++ b/config.bfin-uclinux
@@ -0,0 +1,18 @@
+CROSS_COMPILER= bfin-uclinux-
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -DUCLINUX -D_FILE_OFFSET_BITS=64
+C = c
+C_COMPILER = $(CROSS_COMPILER)gcc
+C_FLAGS = $(COMPILE_OPTS) -Wall
+CPP = cpp
+CPLUSPLUS_COMPILER = $(CROSS_COMPILER)g++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
+OBJ = o
+LINK = $(CROSS_COMPILER)g++ -Wl,-elf2flt -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = $(CROSS_COMPILER)ar cr
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.bsplinux b/config.bsplinux
new file mode 100644
index 0000000..8290ce5
--- /dev/null
+++ b/config.bsplinux
@@ -0,0 +1,18 @@
+CROSS_COMPILE=
+COMPILE_OPTS = $(INCLUDES) -I. -I/usr/local/include -O2 -DSOCKLEN_T=socklen_t -DNO_SSTREAM=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
+C = c
+C_COMPILER = $(CROSS_COMPILE)ecc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = $(CROSS_COMPILE)e++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
+OBJ = o
+LINK = $(CROSS_COMPILE)e++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = $(CROSS_COMPILE)eld -o
+LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lm -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.cris-axis-linux-gnu b/config.cris-axis-linux-gnu
new file mode 100644
index 0000000..ce1195f
--- /dev/null
+++ b/config.cris-axis-linux-gnu
@@ -0,0 +1,23 @@
+# Note: AXIS_TOP_DIR is assumed to already be set in your environment.
+# You can set this using the "init_env" script.
+# See http://developer.axis.com/doc/software/apps/apps-howto.html
+# for more information.
+AXIS_DIR = $(AXIS_TOP_DIR)/target/cris-axis-linux-gnu
+COMPILE_OPTS = $(INCLUDES) -I. -mlinux -isystem $(AXIS_DIR)/include -I/usr/local/include -Wall -O2 -DSOCKLEN_T=socklen_t -DCRIS -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
+C = c
+C_COMPILER = gcc-cris
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++-cris
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wno-ctor-dtor-privacy -ansi -pipe
+OBJ = o
+LINK = c++-cris -static -o
+AXIS_LINK_OPTS = -L$(AXIS_DIR)/lib
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS) -L$(AXIS_DIR)/lib -mlinux
+LIBRARY_LINK = ld-cris -mcrislinux -o
+LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.cygwin b/config.cygwin
new file mode 100644
index 0000000..2e7a9c0
--- /dev/null
+++ b/config.cygwin
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -O -DSOCKLEN_T=socklen_t -DNEWLOCALE_NOT_USED=1
+C = c
+C_COMPILER = gcc
+C_FLAGS = $(COMPILE_OPTS) -DUSE_OUR_BZERO=1 -D__CYGWIN__
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = ld -o
+LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.cygwin-for-vlc b/config.cygwin-for-vlc
new file mode 100644
index 0000000..b1e19e8
--- /dev/null
+++ b/config.cygwin-for-vlc
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -O -DSOCKLEN_T=socklen_t -DNEWLOCALE_NOT_USED=1
+C = c
+C_COMPILER = gcc
+C_FLAGS = $(COMPILE_OPTS) -DUSE_OUR_BZERO=1 -D_WIN32 -mno-cygwin
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 -D_WIN32 -Wno-deprecated -mno-cygwin
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = ld -o
+LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.freebsd b/config.freebsd
new file mode 100644
index 0000000..00cfe21
--- /dev/null
+++ b/config.freebsd
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -O -DBSD=1 -DNEWLOCALE_NOT_USED=1 -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1
+C = c
+C_COMPILER = cc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = ar cr
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.freebsd-no-openssl b/config.freebsd-no-openssl
new file mode 100644
index 0000000..2170518
--- /dev/null
+++ b/config.freebsd-no-openssl
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -I. -O -DBSD=1 -DNEWLOCALE_NOT_USED=1 -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -DNO_OPENSSL=1
+C = c
+C_COMPILER = cc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = ar cr
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION =
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.iphone-simulator b/config.iphone-simulator
new file mode 100644
index 0000000..ef7660a
--- /dev/null
+++ b/config.iphone-simulator
@@ -0,0 +1,28 @@
+# **Note: You must install the relevant "Command line tools (OSX *.*) for Xcode - Xcode *.*"
+# for this configuration file to work.
+
+# Change the following version number, if necessary, before running "genMakefiles iphone-simulator"
+IOS_VERSION = 8.3
+MIN_IOS_VERSION = 7.0
+
+DEVELOPER_PATH = /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer
+TOOL_PATH = $(DEVELOPER_PATH)/usr/bin
+SDK_PATH = $(DEVELOPER_PATH)/SDKs
+SDK = $(SDK_PATH)/iPhoneSimulator$(IOS_VERSION).sdk
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. $(EXTRA_LDFLAGS) -DBSD=1 -O2 -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -miphoneos-version-min=$(MIN_IOS_VERSION) -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -fPIC -arch i386 --sysroot=$(SDK) -isysroot $(SDK)
+C = c
+C_COMPILER = /usr/bin/xcrun clang
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = /usr/bin/xcrun clang
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
+OBJ = o
+LINK = /usr/bin/xcrun clang -o
+LINK_OPTS = -L. -arch i386 -miphoneos-version-min=$(MIN_IOS_VERSION) --sysroot=$(SDK) -isysroot -L$(SDK)/usr/lib/system -I$(SDK)/usr/lib /usr/lib/libc++.dylib
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = /usr/bin/xcrun libtool -static -o
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.iphoneos b/config.iphoneos
new file mode 100644
index 0000000..5df689c
--- /dev/null
+++ b/config.iphoneos
@@ -0,0 +1,27 @@
+# **Note: You must install the relevant "Command line tools (OSX *.*) for Xcode - Xcode *.*"
+# for this configuration file to work.
+#
+# Change the following version number, if necessary, before running "genMakefiles iphoneos"
+IOS_VERSION = 8.3
+
+DEVELOPER_PATH = /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer
+TOOL_PATH = $(DEVELOPER_PATH)/usr/bin
+SDK_PATH = $(DEVELOPER_PATH)/SDKs
+SDK = $(SDK_PATH)/iPhoneOS$(IOS_VERSION).sdk
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. $(EXTRA_LDFLAGS) -DBSD=1 -O2 -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -fPIC -arch armv7 --sysroot=$(SDK)
+C = c
+C_COMPILER = /usr/bin/xcrun clang
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = /usr/bin/xcrun clang
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
+OBJ = o
+LINK = /usr/bin/xcrun clang -o
+LINK_OPTS = -v -L. -arch armv7 --sysroot=$(SDK) -L$(SDK)/usr/lib/system /usr/lib/libc++.dylib
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = /usr/bin/xcrun libtool -static -o
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.linux b/config.linux
new file mode 100644
index 0000000..b4021ef
--- /dev/null
+++ b/config.linux
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
+C = c
+C_COMPILER = cc
+C_FLAGS = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 $(CPPFLAGS) $(CXXFLAGS)
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L. $(LDFLAGS)
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = ar cr
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.linux-64bit b/config.linux-64bit
new file mode 100644
index 0000000..84b9623
--- /dev/null
+++ b/config.linux-64bit
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -m64 -fPIC -I/usr/local/include -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
+C = c
+C_COMPILER = cc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = ar cr
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.linux-gdb b/config.linux-gdb
new file mode 100644
index 0000000..800e0d3
--- /dev/null
+++ b/config.linux-gdb
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -O -DSOCKLEN_T=socklen_t -g -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
+C = c
+C_COMPILER = cc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = ar cr
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.linux-no-openssl b/config.linux-no-openssl
new file mode 100644
index 0000000..0de38c4
--- /dev/null
+++ b/config.linux-no-openssl
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -DNO_OPENSSL=1
+C = c
+C_COMPILER = cc
+C_FLAGS = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 $(CPPFLAGS) $(CXXFLAGS)
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L. $(LDFLAGS)
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = ar cr
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION =
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.linux-with-shared-libraries b/config.linux-with-shared-libraries
new file mode 100644
index 0000000..faaf227
--- /dev/null
+++ b/config.linux-with-shared-libraries
@@ -0,0 +1,45 @@
+# 'CURRENT':'REVISION':'AGE' are updated - whenever a library changes - as follows:
+# The library code changes, but without any changes to the API (i.e., interfaces) => increment REVISION
+# At least one interface changes, or is removed => CURRENT += 1; REVISION = 0; AGE = 0
+# One or more interfaces were added, but no existing interfaces were changed or removed => CURRENT += 1; REVISION = 0; AGE += 1
+
+libliveMedia_VERSION_CURRENT=78
+libliveMedia_VERSION_REVISION=3
+libliveMedia_VERSION_AGE=0
+libliveMedia_LIB_SUFFIX=so.$(shell expr $(libliveMedia_VERSION_CURRENT) - $(libliveMedia_VERSION_AGE)).$(libliveMedia_VERSION_AGE).$(libliveMedia_VERSION_REVISION)
+
+libBasicUsageEnvironment_VERSION_CURRENT=1
+libBasicUsageEnvironment_VERSION_REVISION=1
+libBasicUsageEnvironment_VERSION_AGE=0
+libBasicUsageEnvironment_LIB_SUFFIX=so.$(shell expr $(libBasicUsageEnvironment_VERSION_CURRENT) - $(libBasicUsageEnvironment_VERSION_AGE)).$(libBasicUsageEnvironment_VERSION_AGE).$(libBasicUsageEnvironment_VERSION_REVISION)
+
+libUsageEnvironment_VERSION_CURRENT=4
+libUsageEnvironment_VERSION_REVISION=0
+libUsageEnvironment_VERSION_AGE=1
+libUsageEnvironment_LIB_SUFFIX=so.$(shell expr $(libUsageEnvironment_VERSION_CURRENT) - $(libUsageEnvironment_VERSION_AGE)).$(libUsageEnvironment_VERSION_AGE).$(libUsageEnvironment_VERSION_REVISION)
+
+libgroupsock_VERSION_CURRENT=10
+libgroupsock_VERSION_REVISION=4
+libgroupsock_VERSION_AGE=2
+libgroupsock_LIB_SUFFIX=so.$(shell expr $(libgroupsock_VERSION_CURRENT) - $(libgroupsock_VERSION_AGE)).$(libgroupsock_VERSION_AGE).$(libgroupsock_VERSION_REVISION)
+#####
+
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64 -fPIC
+C = c
+C_COMPILER = $(CC)
+C_FLAGS = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS)
+CPP = cpp
+CPLUSPLUS_COMPILER = $(CXX)
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1 $(CPPFLAGS) $(CXXFLAGS)
+OBJ = o
+LINK = $(CXX) -o
+LINK_OPTS = -L. $(LDFLAGS)
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = $(CC) -o
+SHORT_LIB_SUFFIX = so.$(shell expr $($(NAME)_VERSION_CURRENT) - $($(NAME)_VERSION_AGE))
+LIB_SUFFIX = $(SHORT_LIB_SUFFIX).$($(NAME)_VERSION_AGE).$($(NAME)_VERSION_REVISION)
+LIBRARY_LINK_OPTS = -shared -Wl,-soname,$(NAME).$(SHORT_LIB_SUFFIX) $(LDFLAGS)
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
+INSTALL2 = install_shared_libraries
diff --git a/config.macosx b/config.macosx
new file mode 100644
index 0000000..972757b
--- /dev/null
+++ b/config.macosx
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -I. -I/usr/local/include $(EXTRA_LDFLAGS) -DBSD=1 -O -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -DTIME_BASE=int -DNEED_XLOCALE_H=1
+C = c
+C_COMPILER = cc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = libtool -s -o
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = /usr/lib/libssl.46.dylib /usr/lib/libcrypto.44.dylib
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.macosx-no-openssl b/config.macosx-no-openssl
new file mode 100644
index 0000000..6d73be5
--- /dev/null
+++ b/config.macosx-no-openssl
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -I. -I/usr/local/include $(EXTRA_LDFLAGS) -DBSD=1 -O -DSOCKLEN_T=socklen_t -DHAVE_SOCKADDR_LEN=1 -DTIME_BASE=int -DNEED_XLOCALE_H=1 -DNO_OPENSSL=1
+C = c
+C_COMPILER = cc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = libtool -s -o
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = /usr/lib/libssl.46.dylib /usr/lib/libcrypto.44.dylib
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.mingw b/config.mingw
new file mode 100644
index 0000000..e0106c2
--- /dev/null
+++ b/config.mingw
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -O -DSOCKLEN_T=int -DLOCALE_NOT_USED
+C = c
+C_COMPILER = $(CC)
+C_FLAGS = $(COMPILE_OPTS) -DUSE_OUR_BZERO=1 -D__MINGW32__
+CPP = cpp
+CPLUSPLUS_COMPILER = $(CXX)
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -D__MINGW32__ -Wall -Wno-deprecated
+OBJ = o
+LINK = $(CXX) -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = $(LD) -o
+LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -Bstatic
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lws2_32 -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION = -lws2_32
+EXE =
diff --git a/config.openbsd b/config.openbsd
new file mode 100644
index 0000000..7fa50a3
--- /dev/null
+++ b/config.openbsd
@@ -0,0 +1,18 @@
+.SUFFIXES: .cpp
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -DBSD=1 -O -DSOCKLEN_T=socklen_t
+C = c
+C_COMPILER = cc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DAIX=1
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = ld -o
+LIBRARY_LINK_OPTS = $(LINK_OPTS) -r
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION =
+EXE =
diff --git a/config.qnx4 b/config.qnx4
new file mode 100644
index 0000000..52cccdb
--- /dev/null
+++ b/config.qnx4
@@ -0,0 +1,23 @@
+#
+# Requires:
+# QNX 4.25
+# Watcom 10.6
+# TCP/IP 5.0
+#
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -D_QNX4 -DBSD -DSOCKLEN_T=uint32_t -I/usr/watcom/10.6/usr/include
+C = c
+C_COMPILER = cc32
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = cc32
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -WC,-xs
+OBJ = o
+LINK = cc32 -b -M -N30000 -o
+LINK_OPTS = -l.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = wlib -n -b -c
+LIBRARY_LINK_OPTS = $(LINK_OPTS)
+LIB_SUFFIX = lib
+LIBS_FOR_CONSOLE_APPLICATION = -lsocket -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION)
+EXE =
diff --git a/config.solaris-32bit b/config.solaris-32bit
new file mode 100644
index 0000000..9956849
--- /dev/null
+++ b/config.solaris-32bit
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -O -DSOLARIS -DNEWLOCALE_NOT_USED -DSOCKLEN_T=socklen_t
+C = c
+C_COMPILER = cc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
+OBJ = o
+LINK = c++ -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = ld -o
+LIBRARY_LINK_OPTS = $(LINK_OPTS) -r -dn
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lsocket -lnsl -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION)
+EXE =
diff --git a/config.solaris-64bit b/config.solaris-64bit
new file mode 100644
index 0000000..97f934e
--- /dev/null
+++ b/config.solaris-64bit
@@ -0,0 +1,17 @@
+COMPILE_OPTS = $(INCLUDES) -m64 -I/usr/local/include -I. -O -DSOLARIS -DNEWLOCALE_NOT_USED -DSOCKLEN_T=socklen_t
+C = c
+C_COMPILER = cc
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = c++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall
+OBJ = o
+LINK = c++ -m64 -o
+LINK_OPTS = -L.
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = ld -o
+LIBRARY_LINK_OPTS = $(LINK_OPTS) -64 -r -dn
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = -lsocket -lnsl -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION)
+EXE =
diff --git a/config.uClinux b/config.uClinux
new file mode 100644
index 0000000..289fcfc
--- /dev/null
+++ b/config.uClinux
@@ -0,0 +1,20 @@
+CROSS_COMPILE= arc-linux-uclibc-
+COMPILE_OPTS = $(INCLUDES) -I/usr/local/include -I. -O2 -DSOCKLEN_T=socklen_t -D_LARGEFILE_SOURCE=1 -D_FILE_OFFSET_BITS=64
+C = c
+C_COMPILER = $(CROSS_COMPILE)gcc
+CFLAGS += $(COMPILE_OPTS)
+C_FLAGS = $(CFLAGS)
+CPP = cpp
+CPLUSPLUS_COMPILER = $(CROSS_COMPILE)g++
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS) -Wall -DBSD=1
+CPLUSPLUS_FLAGS += $(CPPFLAGS) -fexceptions
+OBJ = o
+LINK = $(CROSS_COMPILE)g++ -o
+LINK_OPTS = -L. $(LDFLAGS)
+CONSOLE_LINK_OPTS = $(LINK_OPTS)
+LIBRARY_LINK = $(CROSS_COMPILE)ar cr
+LIBRARY_LINK_OPTS =
+LIB_SUFFIX = a
+LIBS_FOR_CONSOLE_APPLICATION = $(CXXLIBS) -lssl -lcrypto
+LIBS_FOR_GUI_APPLICATION = $(LIBS_FOR_CONSOLE_APPLICATION)
+EXE =
diff --git a/configure b/configure
new file mode 100755
index 0000000..c5f48b7
--- /dev/null
+++ b/configure
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+echo "Whoa! This software distribution does NOT use the normal Unix \"configure\" mechanism for generating a Makefile. For instructions on how to build this software, see <http://www.live555.com/liveMedia/>."
+echo "Also, please make sure that you're using the most up-to-date version of the source code - available from <http://www.live555.com/liveMedia/public/>."
diff --git a/fix-makefile b/fix-makefile
new file mode 100755
index 0000000..1b4aeca
--- /dev/null
+++ b/fix-makefile
@@ -0,0 +1,26 @@
+#!/bin/sh
+# the next line restarts using tclsh \
+exec tclsh "$0" "$@"
+
+set makefileName [lindex $argv 0]
+set tmpfileName /tmp/rsftmp
+
+set inFid [open $makefileName r]
+set outFid [open $tmpfileName w]
+
+while {![eof $inFid]} {
+ set line [gets $inFid]
+ if {[string match *\)\$* $line]} {
+ set pos [string first \)\$ $line]
+ set prefix [string range $line 0 $pos]
+ incr pos
+ set suffix [string range $line $pos end]
+ set line $prefix\ $suffix
+ }
+
+ puts $outFid $line
+}
+
+close $inFid
+close $outFid
+file rename -force $tmpfileName $makefileName
diff --git a/genMakefiles b/genMakefiles
new file mode 100755
index 0000000..a139f54
--- /dev/null
+++ b/genMakefiles
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+usage() {
+ echo "Usage: $0 <os-platform>"
+ exit 1
+}
+
+if [ $# -ne 1 ]
+then
+ usage $*
+fi
+
+platform=$1
+subdirs="liveMedia groupsock UsageEnvironment BasicUsageEnvironment testProgs mediaServer proxyServer hlsProxy"
+
+for subdir in $subdirs
+do
+ /bin/rm -f $subdir/Makefile
+ cat $subdir/Makefile.head config.$platform $subdir/Makefile.tail > $subdir/Makefile
+ chmod a-w $subdir/Makefile
+done
+
+/bin/rm -f Makefile
+cat Makefile.head config.$1 Makefile.tail > Makefile
+chmod a-w Makefile
diff --git a/genWindowsMakefiles b/genWindowsMakefiles
new file mode 100755
index 0000000..5deee12
--- /dev/null
+++ b/genWindowsMakefiles
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+cd liveMedia
+/bin/rm -f liveMedia.mak
+/bin/rm -f Makefile
+cat Makefile.head ../win32config Makefile.tail > liveMedia.mak
+
+cd ../groupsock
+/bin/rm -f groupsock.mak
+/bin/rm -f Makefile
+cat Makefile.head ../win32config Makefile.tail > groupsock.mak
+
+cd ../UsageEnvironment
+/bin/rm -f UsageEnvironment.mak
+/bin/rm -f Makefile
+cat Makefile.head ../win32config Makefile.tail > UsageEnvironment.mak
+
+cd ../BasicUsageEnvironment
+/bin/rm -f BasicUsageEnvironment.mak
+/bin/rm -f Makefile
+cat Makefile.head ../win32config Makefile.tail > BasicUsageEnvironment.mak
+
+cd ../testProgs
+/bin/rm -f testProgs.mak
+/bin/rm -f Makefile
+cat Makefile.head ../win32config Makefile.tail > testProgs.mak
+
+cd ../mediaServer
+/bin/rm -f mediaServer.mak
+/bin/rm -f Makefile
+cat Makefile.head ../win32config Makefile.tail > mediaServer.mak
+
+cd ../proxyServer
+/bin/rm -f proxyServer.mak
+/bin/rm -f Makefile
+cat Makefile.head ../win32config Makefile.tail > proxyServer.mak
+
+cd ../hlsProxy
+/bin/rm -f hlsProxy.mak
+/bin/rm -f Makefile
+cat Makefile.head ../win32config Makefile.tail > hlsProxy.mak
diff --git a/genWindowsMakefiles.cmd b/genWindowsMakefiles.cmd
new file mode 100644
index 0000000..3d1c27c
--- /dev/null
+++ b/genWindowsMakefiles.cmd
@@ -0,0 +1,30 @@
+@Echo OFF
+SETLOCAL
+for %%I in (%0) do %%~dI
+for %%I in (%0) do cd "%%~pI"
+cd liveMedia
+del /Q liveMedia.mak
+type Makefile.head ..\win32config Makefile.tail > liveMedia.mak
+cd ../groupsock
+del /Q groupsock.mak
+type Makefile.head ..\win32config Makefile.tail > groupsock.mak
+cd ../UsageEnvironment
+del /Q UsageEnvironment.mak
+type Makefile.head ..\win32config Makefile.tail > UsageEnvironment.mak
+cd ../BasicUsageEnvironment
+del /Q BasicUsageEnvironment.mak
+type Makefile.head ..\win32config Makefile.tail > BasicUsageEnvironment.mak
+cd ../testProgs
+del /Q testProgs.mak
+type Makefile.head ..\win32config Makefile.tail > testProgs.mak
+cd ../mediaServer
+del /Q mediaServer.mak
+type Makefile.head ..\win32config Makefile.tail > mediaServer.mak
+cd ../proxyServer
+del /Q proxyServer.mak
+type Makefile.head ..\win32config Makefile.tail > proxyServer.mak
+cd ../hlsProxy
+del /Q hlsProxy.mak
+type Makefile.head ..\win32config Makefile.tail > hlsProxy.mak
+
+ENDLOCAL
diff --git a/groupsock/COPYING b/groupsock/COPYING
new file mode 120000
index 0000000..012065c
--- /dev/null
+++ b/groupsock/COPYING
@@ -0,0 +1 @@
+../COPYING
\ No newline at end of file
diff --git a/groupsock/COPYING.LESSER b/groupsock/COPYING.LESSER
new file mode 120000
index 0000000..ce9a3ce
--- /dev/null
+++ b/groupsock/COPYING.LESSER
@@ -0,0 +1 @@
+../COPYING.LESSER
\ No newline at end of file
diff --git a/groupsock/GroupEId.cpp b/groupsock/GroupEId.cpp
new file mode 100644
index 0000000..25d8aa2
--- /dev/null
+++ b/groupsock/GroupEId.cpp
@@ -0,0 +1,49 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// "Group Endpoint Id"
+// Implementation
+
+#include "GroupEId.hh"
+
+
+GroupEId::GroupEId(struct in_addr const& groupAddr,
+ portNumBits portNum, u_int8_t ttl) {
+ struct in_addr sourceFilterAddr;
+ sourceFilterAddr.s_addr = ~0; // indicates no source filter
+
+ init(groupAddr, sourceFilterAddr, portNum, ttl);
+}
+
+GroupEId::GroupEId(struct in_addr const& groupAddr,
+ struct in_addr const& sourceFilterAddr,
+ portNumBits portNum) {
+ init(groupAddr, sourceFilterAddr, portNum, 255);
+}
+
+Boolean GroupEId::isSSM() const {
+ return fSourceFilterAddress.s_addr != netAddressBits(~0);
+}
+
+void GroupEId::init(struct in_addr const& groupAddr,
+ struct in_addr const& sourceFilterAddr,
+ portNumBits portNum,
+ u_int8_t ttl) {
+ fGroupAddress = groupAddr;
+ fSourceFilterAddress = sourceFilterAddr;
+ fPortNum = portNum;
+ fTTL = ttl;
+}
diff --git a/groupsock/Groupsock.cpp b/groupsock/Groupsock.cpp
new file mode 100644
index 0000000..41fa001
--- /dev/null
+++ b/groupsock/Groupsock.cpp
@@ -0,0 +1,675 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// 'Group sockets'
+// Implementation
+
+#include "Groupsock.hh"
+#include "GroupsockHelper.hh"
+//##### Eventually fix the following #include; we shouldn't know about tunnels
+#include "TunnelEncaps.hh"
+
+#ifndef NO_SSTREAM
+#include <sstream>
+#endif
+#include <stdio.h>
+
+///////// OutputSocket //////////
+
+OutputSocket::OutputSocket(UsageEnvironment& env)
+ : Socket(env, 0 /* let kernel choose port */),
+ fSourcePort(0), fLastSentTTL(256/*hack: a deliberately invalid value*/) {
+}
+
+OutputSocket::OutputSocket(UsageEnvironment& env, Port port)
+ : Socket(env, port),
+ fSourcePort(0), fLastSentTTL(256/*hack: a deliberately invalid value*/) {
+}
+
+OutputSocket::~OutputSocket() {
+}
+
+Boolean OutputSocket::write(netAddressBits address, portNumBits portNum, u_int8_t ttl,
+ unsigned char* buffer, unsigned bufferSize) {
+ struct in_addr destAddr; destAddr.s_addr = address;
+ if ((unsigned)ttl == fLastSentTTL) {
+ // Optimization: Don't do a 'set TTL' system call again
+ if (!writeSocket(env(), socketNum(), destAddr, portNum, buffer, bufferSize)) return False;
+ } else {
+ if (!writeSocket(env(), socketNum(), destAddr, portNum, ttl, buffer, bufferSize)) return False;
+ fLastSentTTL = (unsigned)ttl;
+ }
+
+ if (sourcePortNum() == 0) {
+ // Now that we've sent a packet, we can find out what the
+ // kernel chose as our ephemeral source port number:
+ if (!getSourcePort(env(), socketNum(), fSourcePort)) {
+ if (DebugLevel >= 1)
+ env() << *this
+ << ": failed to get source port: "
+ << env().getResultMsg() << "\n";
+ return False;
+ }
+ }
+
+ return True;
+}
+
+// By default, we don't do reads:
+Boolean OutputSocket
+::handleRead(unsigned char* /*buffer*/, unsigned /*bufferMaxSize*/,
+ unsigned& /*bytesRead*/, struct sockaddr_in& /*fromAddressAndPort*/) {
+ return True;
+}
+
+
+///////// destRecord //////////
+
+destRecord
+::destRecord(struct in_addr const& addr, Port const& port, u_int8_t ttl, unsigned sessionId,
+ destRecord* next)
+ : fNext(next), fGroupEId(addr, port.num(), ttl), fSessionId(sessionId) {
+}
+
+destRecord::~destRecord() {
+ delete fNext;
+}
+
+
+///////// Groupsock //////////
+
+NetInterfaceTrafficStats Groupsock::statsIncoming;
+NetInterfaceTrafficStats Groupsock::statsOutgoing;
+NetInterfaceTrafficStats Groupsock::statsRelayedIncoming;
+NetInterfaceTrafficStats Groupsock::statsRelayedOutgoing;
+
+// Constructor for a source-independent multicast group
+Groupsock::Groupsock(UsageEnvironment& env, struct in_addr const& groupAddr,
+ Port port, u_int8_t ttl)
+ : OutputSocket(env, port),
+ deleteIfNoMembers(False), isSlave(False),
+ fDests(new destRecord(groupAddr, port, ttl, 0, NULL)),
+ fIncomingGroupEId(groupAddr, port.num(), ttl) {
+
+ if (!socketJoinGroup(env, socketNum(), groupAddr.s_addr)) {
+ if (DebugLevel >= 1) {
+ env << *this << ": failed to join group: "
+ << env.getResultMsg() << "\n";
+ }
+ }
+
+ // Make sure we can get our source address:
+ if (ourIPAddress(env) == 0) {
+ if (DebugLevel >= 0) { // this is a fatal error
+ env << "Unable to determine our source address: "
+ << env.getResultMsg() << "\n";
+ }
+ }
+
+ if (DebugLevel >= 2) env << *this << ": created\n";
+}
+
+// Constructor for a source-specific multicast group
+Groupsock::Groupsock(UsageEnvironment& env, struct in_addr const& groupAddr,
+ struct in_addr const& sourceFilterAddr,
+ Port port)
+ : OutputSocket(env, port),
+ deleteIfNoMembers(False), isSlave(False),
+ fDests(new destRecord(groupAddr, port, 255, 0, NULL)),
+ fIncomingGroupEId(groupAddr, sourceFilterAddr, port.num()) {
+ // First try a SSM join. If that fails, try a regular join:
+ if (!socketJoinGroupSSM(env, socketNum(), groupAddr.s_addr,
+ sourceFilterAddr.s_addr)) {
+ if (DebugLevel >= 3) {
+ env << *this << ": SSM join failed: "
+ << env.getResultMsg();
+ env << " - trying regular join instead\n";
+ }
+ if (!socketJoinGroup(env, socketNum(), groupAddr.s_addr)) {
+ if (DebugLevel >= 1) {
+ env << *this << ": failed to join group: "
+ << env.getResultMsg() << "\n";
+ }
+ }
+ }
+
+ if (DebugLevel >= 2) env << *this << ": created\n";
+}
+
+Groupsock::~Groupsock() {
+ if (isSSM()) {
+ if (!socketLeaveGroupSSM(env(), socketNum(), groupAddress().s_addr,
+ sourceFilterAddress().s_addr)) {
+ socketLeaveGroup(env(), socketNum(), groupAddress().s_addr);
+ }
+ } else {
+ socketLeaveGroup(env(), socketNum(), groupAddress().s_addr);
+ }
+
+ delete fDests;
+
+ if (DebugLevel >= 2) env() << *this << ": deleting\n";
+}
+
+destRecord* Groupsock
+::createNewDestRecord(struct in_addr const& addr, Port const& port, u_int8_t ttl,
+ unsigned sessionId, destRecord* next) {
+ // Default implementation:
+ return new destRecord(addr, port, ttl, sessionId, next);
+}
+
+void
+Groupsock::changeDestinationParameters(struct in_addr const& newDestAddr,
+ Port newDestPort, int newDestTTL, unsigned sessionId) {
+ destRecord* dest;
+ for (dest = fDests; dest != NULL && dest->fSessionId != sessionId; dest = dest->fNext) {}
+
+ if (dest == NULL) { // There's no existing 'destRecord' for this "sessionId"; add a new one:
+ fDests = createNewDestRecord(newDestAddr, newDestPort, newDestTTL, sessionId, fDests);
+ return;
+ }
+
+ // "dest" is an existing 'destRecord' for this "sessionId"; change its values to the new ones:
+ struct in_addr destAddr = dest->fGroupEId.groupAddress();
+ if (newDestAddr.s_addr != 0) {
+ if (newDestAddr.s_addr != destAddr.s_addr
+ && IsMulticastAddress(newDestAddr.s_addr)) {
+ // If the new destination is a multicast address, then we assume that
+ // we want to join it also. (If this is not in fact the case, then
+ // call "multicastSendOnly()" afterwards.)
+ socketLeaveGroup(env(), socketNum(), destAddr.s_addr);
+ socketJoinGroup(env(), socketNum(), newDestAddr.s_addr);
+ }
+ destAddr.s_addr = newDestAddr.s_addr;
+ }
+
+ portNumBits destPortNum = dest->fGroupEId.portNum();
+ if (newDestPort.num() != 0) {
+ if (newDestPort.num() != destPortNum
+ && IsMulticastAddress(destAddr.s_addr)) {
+ // Also bind to the new port number:
+ changePort(newDestPort);
+ // And rejoin the multicast group:
+ socketJoinGroup(env(), socketNum(), destAddr.s_addr);
+ }
+ destPortNum = newDestPort.num();
+ }
+
+ u_int8_t destTTL = ttl();
+ if (newDestTTL != ~0) destTTL = (u_int8_t)newDestTTL;
+
+ dest->fGroupEId = GroupEId(destAddr, destPortNum, destTTL);
+
+ // Finally, remove any other 'destRecord's that might also have this "sessionId":
+ removeDestinationFrom(dest->fNext, sessionId);
+}
+
+unsigned Groupsock
+::lookupSessionIdFromDestination(struct sockaddr_in const& destAddrAndPort) const {
+ destRecord* dest = lookupDestRecordFromDestination(destAddrAndPort);
+ if (dest == NULL) return 0;
+
+ return dest->fSessionId;
+}
+
+void Groupsock::addDestination(struct in_addr const& addr, Port const& port, unsigned sessionId) {
+ // Default implementation:
+ // If there's no existing 'destRecord' with the same "addr", "port", and "sessionId", add a new one:
+ for (destRecord* dest = fDests; dest != NULL; dest = dest->fNext) {
+ if (sessionId == dest->fSessionId
+ && addr.s_addr == dest->fGroupEId.groupAddress().s_addr
+ && port.num() == dest->fGroupEId.portNum()) {
+ return;
+ }
+ }
+
+ fDests = createNewDestRecord(addr, port, 255, sessionId, fDests);
+}
+
+void Groupsock::removeDestination(unsigned sessionId) {
+ // Default implementation:
+ removeDestinationFrom(fDests, sessionId);
+}
+
+void Groupsock::removeAllDestinations() {
+ delete fDests; fDests = NULL;
+}
+
+void Groupsock::multicastSendOnly() {
+ // We disable this code for now, because - on some systems - leaving the multicast group seems to cause sent packets
+ // to not be received by other applications (at least, on the same host).
+#if 0
+ socketLeaveGroup(env(), socketNum(), fIncomingGroupEId.groupAddress().s_addr);
+ for (destRecord* dests = fDests; dests != NULL; dests = dests->fNext) {
+ socketLeaveGroup(env(), socketNum(), dests->fGroupEId.groupAddress().s_addr);
+ }
+#endif
+}
+
+Boolean Groupsock::output(UsageEnvironment& env, unsigned char* buffer, unsigned bufferSize,
+ DirectedNetInterface* interfaceNotToFwdBackTo) {
+ do {
+ // First, do the datagram send, to each destination:
+ Boolean writeSuccess = True;
+ for (destRecord* dests = fDests; dests != NULL; dests = dests->fNext) {
+ if (!write(dests->fGroupEId.groupAddress().s_addr, dests->fGroupEId.portNum(), dests->fGroupEId.ttl(),
+ buffer, bufferSize)) {
+ writeSuccess = False;
+ break;
+ }
+ }
+ if (!writeSuccess) break;
+ statsOutgoing.countPacket(bufferSize);
+ statsGroupOutgoing.countPacket(bufferSize);
+
+ // Then, forward to our members:
+ int numMembers = 0;
+ if (!members().IsEmpty()) {
+ numMembers =
+ outputToAllMembersExcept(interfaceNotToFwdBackTo,
+ ttl(), buffer, bufferSize,
+ ourIPAddress(env));
+ if (numMembers < 0) break;
+ }
+
+ if (DebugLevel >= 3) {
+ env << *this << ": wrote " << bufferSize << " bytes, ttl " << (unsigned)ttl();
+ if (numMembers > 0) {
+ env << "; relayed to " << numMembers << " members";
+ }
+ env << "\n";
+ }
+ return True;
+ } while (0);
+
+ if (DebugLevel >= 0) { // this is a fatal error
+ UsageEnvironment::MsgString msg = strDup(env.getResultMsg());
+ env.setResultMsg("Groupsock write failed: ", msg);
+ delete[] (char*)msg;
+ }
+ return False;
+}
+
+Boolean Groupsock::handleRead(unsigned char* buffer, unsigned bufferMaxSize,
+ unsigned& bytesRead,
+ struct sockaddr_in& fromAddressAndPort) {
+ // Read data from the socket, and relay it across any attached tunnels
+ //##### later make this code more general - independent of tunnels
+
+ bytesRead = 0;
+
+ int maxBytesToRead = bufferMaxSize - TunnelEncapsulationTrailerMaxSize;
+ int numBytes = readSocket(env(), socketNum(),
+ buffer, maxBytesToRead, fromAddressAndPort);
+ if (numBytes < 0) {
+ if (DebugLevel >= 0) { // this is a fatal error
+ UsageEnvironment::MsgString msg = strDup(env().getResultMsg());
+ env().setResultMsg("Groupsock read failed: ", msg);
+ delete[] (char*)msg;
+ }
+ return False;
+ }
+
+ // If we're a SSM group, make sure the source address matches:
+ if (isSSM()
+ && fromAddressAndPort.sin_addr.s_addr != sourceFilterAddress().s_addr) {
+ return True;
+ }
+
+ // We'll handle this data.
+ // Also write it (with the encapsulation trailer) to each member,
+ // unless the packet was originally sent by us to begin with.
+ bytesRead = numBytes;
+
+ int numMembers = 0;
+ if (!wasLoopedBackFromUs(env(), fromAddressAndPort)) {
+ statsIncoming.countPacket(numBytes);
+ statsGroupIncoming.countPacket(numBytes);
+ numMembers =
+ outputToAllMembersExcept(NULL, ttl(),
+ buffer, bytesRead,
+ fromAddressAndPort.sin_addr.s_addr);
+ if (numMembers > 0) {
+ statsRelayedIncoming.countPacket(numBytes);
+ statsGroupRelayedIncoming.countPacket(numBytes);
+ }
+ }
+ if (DebugLevel >= 3) {
+ env() << *this << ": read " << bytesRead << " bytes from " << AddressString(fromAddressAndPort).val() << ", port " << ntohs(fromAddressAndPort.sin_port);
+ if (numMembers > 0) {
+ env() << "; relayed to " << numMembers << " members";
+ }
+ env() << "\n";
+ }
+
+ return True;
+}
+
+Boolean Groupsock::wasLoopedBackFromUs(UsageEnvironment& env,
+ struct sockaddr_in& fromAddressAndPort) {
+ if (fromAddressAndPort.sin_addr.s_addr == ourIPAddress(env) ||
+ fromAddressAndPort.sin_addr.s_addr == 0x7F000001/*127.0.0.1*/) {
+ if (fromAddressAndPort.sin_port == sourcePortNum()) {
+#ifdef DEBUG_LOOPBACK_CHECKING
+ if (DebugLevel >= 3) {
+ env() << *this << ": got looped-back packet\n";
+ }
+#endif
+ return True;
+ }
+ }
+
+ return False;
+}
+
+destRecord* Groupsock
+::lookupDestRecordFromDestination(struct sockaddr_in const& destAddrAndPort) const {
+ for (destRecord* dest = fDests; dest != NULL; dest = dest->fNext) {
+ if (destAddrAndPort.sin_addr.s_addr == dest->fGroupEId.groupAddress().s_addr
+ && destAddrAndPort.sin_port == dest->fGroupEId.portNum()) {
+ return dest;
+ }
+ }
+ return NULL;
+}
+
+void Groupsock::removeDestinationFrom(destRecord*& dests, unsigned sessionId) {
+ destRecord** destsPtr = &dests;
+ while (*destsPtr != NULL) {
+ if (sessionId == (*destsPtr)->fSessionId) {
+ // Remove the record pointed to by *destsPtr :
+ destRecord* next = (*destsPtr)->fNext;
+ (*destsPtr)->fNext = NULL;
+ delete (*destsPtr);
+ *destsPtr = next;
+ } else {
+ destsPtr = &((*destsPtr)->fNext);
+ }
+ }
+}
+
+int Groupsock::outputToAllMembersExcept(DirectedNetInterface* exceptInterface,
+ u_int8_t ttlToFwd,
+ unsigned char* data, unsigned size,
+ netAddressBits sourceAddr) {
+ // Don't forward TTL-0 packets
+ if (ttlToFwd == 0) return 0;
+
+ DirectedNetInterfaceSet::Iterator iter(members());
+ unsigned numMembers = 0;
+ DirectedNetInterface* interf;
+ while ((interf = iter.next()) != NULL) {
+ // Check whether we've asked to exclude this interface:
+ if (interf == exceptInterface)
+ continue;
+
+ // Check that the packet's source address makes it OK to
+ // be relayed across this interface:
+ UsageEnvironment& saveEnv = env();
+ // because the following call may delete "this"
+ if (!interf->SourceAddrOKForRelaying(saveEnv, sourceAddr)) {
+ if (strcmp(saveEnv.getResultMsg(), "") != 0) {
+ // Treat this as a fatal error
+ return -1;
+ } else {
+ continue;
+ }
+ }
+
+ if (numMembers == 0) {
+ // We know that we're going to forward to at least one
+ // member, so fill in the tunnel encapsulation trailer.
+ // (Note: Allow for it not being 4-byte-aligned.)
+ TunnelEncapsulationTrailer* trailerInPacket
+ = (TunnelEncapsulationTrailer*)&data[size];
+ TunnelEncapsulationTrailer* trailer;
+
+ Boolean misaligned = ((uintptr_t)trailerInPacket & 3) != 0;
+ unsigned trailerOffset;
+ u_int8_t tunnelCmd;
+ if (isSSM()) {
+ // add an 'auxilliary address' before the trailer
+ trailerOffset = TunnelEncapsulationTrailerAuxSize;
+ tunnelCmd = TunnelDataAuxCmd;
+ } else {
+ trailerOffset = 0;
+ tunnelCmd = TunnelDataCmd;
+ }
+ unsigned trailerSize = TunnelEncapsulationTrailerSize + trailerOffset;
+ unsigned tmpTr[TunnelEncapsulationTrailerMaxSize];
+ if (misaligned) {
+ trailer = (TunnelEncapsulationTrailer*)&tmpTr;
+ } else {
+ trailer = trailerInPacket;
+ }
+ trailer += trailerOffset;
+
+ if (fDests != NULL) {
+ trailer->address() = fDests->fGroupEId.groupAddress().s_addr;
+ Port destPort(ntohs(fDests->fGroupEId.portNum()));
+ trailer->port() = destPort; // structure copy
+ }
+ trailer->ttl() = ttlToFwd;
+ trailer->command() = tunnelCmd;
+
+ if (isSSM()) {
+ trailer->auxAddress() = sourceFilterAddress().s_addr;
+ }
+
+ if (misaligned) {
+ memmove(trailerInPacket, trailer-trailerOffset, trailerSize);
+ }
+
+ size += trailerSize;
+ }
+
+ interf->write(data, size);
+ ++numMembers;
+ }
+
+ return numMembers;
+}
+
+UsageEnvironment& operator<<(UsageEnvironment& s, const Groupsock& g) {
+ UsageEnvironment& s1 = s << timestampString() << " Groupsock("
+ << g.socketNum() << ": "
+ << AddressString(g.groupAddress()).val()
+ << ", " << g.port() << ", ";
+ if (g.isSSM()) {
+ return s1 << "SSM source: "
+ << AddressString(g.sourceFilterAddress()).val() << ")";
+ } else {
+ return s1 << (unsigned)(g.ttl()) << ")";
+ }
+}
+
+
+////////// GroupsockLookupTable //////////
+
+
+// A hash table used to index Groupsocks by socket number.
+
+static HashTable*& getSocketTable(UsageEnvironment& env) {
+ _groupsockPriv* priv = groupsockPriv(env);
+ if (priv->socketTable == NULL) { // We need to create it
+ priv->socketTable = HashTable::create(ONE_WORD_HASH_KEYS);
+ }
+ return priv->socketTable;
+}
+
+static Boolean unsetGroupsockBySocket(Groupsock const* groupsock) {
+ do {
+ if (groupsock == NULL) break;
+
+ int sock = groupsock->socketNum();
+ // Make sure "sock" is in bounds:
+ if (sock < 0) break;
+
+ HashTable*& sockets = getSocketTable(groupsock->env());
+
+ Groupsock* gs = (Groupsock*)sockets->Lookup((char*)(long)sock);
+ if (gs == NULL || gs != groupsock) break;
+ sockets->Remove((char*)(long)sock);
+
+ if (sockets->IsEmpty()) {
+ // We can also delete the table (to reclaim space):
+ delete sockets; sockets = NULL;
+ reclaimGroupsockPriv(gs->env());
+ }
+
+ return True;
+ } while (0);
+
+ return False;
+}
+
+static Boolean setGroupsockBySocket(UsageEnvironment& env, int sock,
+ Groupsock* groupsock) {
+ do {
+ // Make sure the "sock" parameter is in bounds:
+ if (sock < 0) {
+ char buf[100];
+ sprintf(buf, "trying to use bad socket (%d)", sock);
+ env.setResultMsg(buf);
+ break;
+ }
+
+ HashTable* sockets = getSocketTable(env);
+
+ // Make sure we're not replacing an existing Groupsock (although that shouldn't happen)
+ Boolean alreadyExists
+ = (sockets->Lookup((char*)(long)sock) != 0);
+ if (alreadyExists) {
+ char buf[100];
+ sprintf(buf, "Attempting to replace an existing socket (%d)", sock);
+ env.setResultMsg(buf);
+ break;
+ }
+
+ sockets->Add((char*)(long)sock, groupsock);
+ return True;
+ } while (0);
+
+ return False;
+}
+
+static Groupsock* getGroupsockBySocket(UsageEnvironment& env, int sock) {
+ do {
+ // Make sure the "sock" parameter is in bounds:
+ if (sock < 0) break;
+
+ HashTable* sockets = getSocketTable(env);
+ return (Groupsock*)sockets->Lookup((char*)(long)sock);
+ } while (0);
+
+ return NULL;
+}
+
+Groupsock*
+GroupsockLookupTable::Fetch(UsageEnvironment& env,
+ netAddressBits groupAddress,
+ Port port, u_int8_t ttl,
+ Boolean& isNew) {
+ isNew = False;
+ Groupsock* groupsock;
+ do {
+ groupsock = (Groupsock*) fTable.Lookup(groupAddress, (~0), port);
+ if (groupsock == NULL) { // we need to create one:
+ groupsock = AddNew(env, groupAddress, (~0), port, ttl);
+ if (groupsock == NULL) break;
+ isNew = True;
+ }
+ } while (0);
+
+ return groupsock;
+}
+
+Groupsock*
+GroupsockLookupTable::Fetch(UsageEnvironment& env,
+ netAddressBits groupAddress,
+ netAddressBits sourceFilterAddr, Port port,
+ Boolean& isNew) {
+ isNew = False;
+ Groupsock* groupsock;
+ do {
+ groupsock
+ = (Groupsock*) fTable.Lookup(groupAddress, sourceFilterAddr, port);
+ if (groupsock == NULL) { // we need to create one:
+ groupsock = AddNew(env, groupAddress, sourceFilterAddr, port, 0);
+ if (groupsock == NULL) break;
+ isNew = True;
+ }
+ } while (0);
+
+ return groupsock;
+}
+
+Groupsock*
+GroupsockLookupTable::Lookup(netAddressBits groupAddress, Port port) {
+ return (Groupsock*) fTable.Lookup(groupAddress, (~0), port);
+}
+
+Groupsock*
+GroupsockLookupTable::Lookup(netAddressBits groupAddress,
+ netAddressBits sourceFilterAddr, Port port) {
+ return (Groupsock*) fTable.Lookup(groupAddress, sourceFilterAddr, port);
+}
+
+Groupsock* GroupsockLookupTable::Lookup(UsageEnvironment& env, int sock) {
+ return getGroupsockBySocket(env, sock);
+}
+
+Boolean GroupsockLookupTable::Remove(Groupsock const* groupsock) {
+ unsetGroupsockBySocket(groupsock);
+ return fTable.Remove(groupsock->groupAddress().s_addr,
+ groupsock->sourceFilterAddress().s_addr,
+ groupsock->port());
+}
+
+Groupsock* GroupsockLookupTable::AddNew(UsageEnvironment& env,
+ netAddressBits groupAddress,
+ netAddressBits sourceFilterAddress,
+ Port port, u_int8_t ttl) {
+ Groupsock* groupsock;
+ do {
+ struct in_addr groupAddr; groupAddr.s_addr = groupAddress;
+ if (sourceFilterAddress == netAddressBits(~0)) {
+ // regular, ISM groupsock
+ groupsock = new Groupsock(env, groupAddr, port, ttl);
+ } else {
+ // SSM groupsock
+ struct in_addr sourceFilterAddr;
+ sourceFilterAddr.s_addr = sourceFilterAddress;
+ groupsock = new Groupsock(env, groupAddr, sourceFilterAddr, port);
+ }
+
+ if (groupsock == NULL || groupsock->socketNum() < 0) break;
+
+ if (!setGroupsockBySocket(env, groupsock->socketNum(), groupsock)) break;
+
+ fTable.Add(groupAddress, sourceFilterAddress, port, (void*)groupsock);
+ } while (0);
+
+ return groupsock;
+}
+
+GroupsockLookupTable::Iterator::Iterator(GroupsockLookupTable& groupsocks)
+ : fIter(AddressPortLookupTable::Iterator(groupsocks.fTable)) {
+}
+
+Groupsock* GroupsockLookupTable::Iterator::next() {
+ return (Groupsock*) fIter.next();
+};
diff --git a/groupsock/GroupsockHelper.cpp b/groupsock/GroupsockHelper.cpp
new file mode 100644
index 0000000..1ddf91f
--- /dev/null
+++ b/groupsock/GroupsockHelper.cpp
@@ -0,0 +1,898 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "mTunnel" multicast access service
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Helper routines to implement 'group sockets'
+// Implementation
+
+#include "GroupsockHelper.hh"
+
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(__MINGW32__)
+#include <time.h>
+extern "C" int initializeWinsockIfNecessary();
+#else
+#include <stdarg.h>
+#include <time.h>
+#include <sys/time.h>
+#if !defined(_WIN32)
+#include <netinet/tcp.h>
+#ifdef __ANDROID_NDK__
+#include <android/ndk-version.h>
+#define ANDROID_OLD_NDK __NDK_MAJOR__ < 17
+#endif
+#endif
+#include <fcntl.h>
+#define initializeWinsockIfNecessary() 1
+#endif
+#if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4)
+#else
+#include <signal.h>
+#define USE_SIGNALS 1
+#endif
+#include <stdio.h>
+
+// By default, use INADDR_ANY for the sending and receiving interfaces:
+netAddressBits SendingInterfaceAddr = INADDR_ANY;
+netAddressBits ReceivingInterfaceAddr = INADDR_ANY;
+
+static void socketErr(UsageEnvironment& env, char const* errorMsg) {
+ env.setResultErrMsg(errorMsg);
+}
+
+NoReuse::NoReuse(UsageEnvironment& env)
+ : fEnv(env) {
+ groupsockPriv(fEnv)->reuseFlag = 0;
+}
+
+NoReuse::~NoReuse() {
+ groupsockPriv(fEnv)->reuseFlag = 1;
+ reclaimGroupsockPriv(fEnv);
+}
+
+
+_groupsockPriv* groupsockPriv(UsageEnvironment& env) {
+ if (env.groupsockPriv == NULL) { // We need to create it
+ _groupsockPriv* result = new _groupsockPriv;
+ result->socketTable = NULL;
+ result->reuseFlag = 1; // default value => allow reuse of socket numbers
+ env.groupsockPriv = result;
+ }
+ return (_groupsockPriv*)(env.groupsockPriv);
+}
+
+void reclaimGroupsockPriv(UsageEnvironment& env) {
+ _groupsockPriv* priv = (_groupsockPriv*)(env.groupsockPriv);
+ if (priv->socketTable == NULL && priv->reuseFlag == 1/*default value*/) {
+ // We can delete the structure (to save space); it will get created again, if needed:
+ delete priv;
+ env.groupsockPriv = NULL;
+ }
+}
+
+static int createSocket(int type) {
+ // Call "socket()" to create a (IPv4) socket of the specified type.
+ // But also set it to have the 'close on exec' property (if we can)
+ int sock;
+
+#ifdef SOCK_CLOEXEC
+ sock = socket(AF_INET, type|SOCK_CLOEXEC, 0);
+ if (sock != -1 || errno != EINVAL) return sock;
+ // An "errno" of EINVAL likely means that the system wasn't happy with the SOCK_CLOEXEC; fall through and try again without it:
+#endif
+
+ sock = socket(AF_INET, type, 0);
+#ifdef FD_CLOEXEC
+ if (sock != -1) fcntl(sock, F_SETFD, FD_CLOEXEC);
+#endif
+ return sock;
+}
+
+int setupDatagramSocket(UsageEnvironment& env, Port port) {
+ if (!initializeWinsockIfNecessary()) {
+ socketErr(env, "Failed to initialize 'winsock': ");
+ return -1;
+ }
+
+ int newSocket = createSocket(SOCK_DGRAM);
+ if (newSocket < 0) {
+ socketErr(env, "unable to create datagram socket: ");
+ return newSocket;
+ }
+
+ int reuseFlag = groupsockPriv(env)->reuseFlag;
+ reclaimGroupsockPriv(env);
+ if (setsockopt(newSocket, SOL_SOCKET, SO_REUSEADDR,
+ (const char*)&reuseFlag, sizeof reuseFlag) < 0) {
+ socketErr(env, "setsockopt(SO_REUSEADDR) error: ");
+ closeSocket(newSocket);
+ return -1;
+ }
+
+#if defined(__WIN32__) || defined(_WIN32)
+ // Windoze doesn't properly handle SO_REUSEPORT or IP_MULTICAST_LOOP
+#else
+#ifdef SO_REUSEPORT
+ if (setsockopt(newSocket, SOL_SOCKET, SO_REUSEPORT,
+ (const char*)&reuseFlag, sizeof reuseFlag) < 0) {
+ socketErr(env, "setsockopt(SO_REUSEPORT) error: ");
+ closeSocket(newSocket);
+ return -1;
+ }
+#endif
+
+#ifdef IP_MULTICAST_LOOP
+ const u_int8_t loop = 1;
+ if (setsockopt(newSocket, IPPROTO_IP, IP_MULTICAST_LOOP,
+ (const char*)&loop, sizeof loop) < 0) {
+ socketErr(env, "setsockopt(IP_MULTICAST_LOOP) error: ");
+ closeSocket(newSocket);
+ return -1;
+ }
+#endif
+#endif
+
+ // Note: Windoze requires binding, even if the port number is 0
+ netAddressBits addr = INADDR_ANY;
+#if defined(__WIN32__) || defined(_WIN32)
+#else
+ if (port.num() != 0 || ReceivingInterfaceAddr != INADDR_ANY) {
+#endif
+ if (port.num() == 0) addr = ReceivingInterfaceAddr;
+ MAKE_SOCKADDR_IN(name, addr, port.num());
+ if (bind(newSocket, (struct sockaddr*)&name, sizeof name) != 0) {
+ char tmpBuffer[100];
+ sprintf(tmpBuffer, "bind() error (port number: %d): ",
+ ntohs(port.num()));
+ socketErr(env, tmpBuffer);
+ closeSocket(newSocket);
+ return -1;
+ }
+#if defined(__WIN32__) || defined(_WIN32)
+#else
+ }
+#endif
+
+ // Set the sending interface for multicasts, if it's not the default:
+ if (SendingInterfaceAddr != INADDR_ANY) {
+ struct in_addr addr;
+ addr.s_addr = SendingInterfaceAddr;
+
+ if (setsockopt(newSocket, IPPROTO_IP, IP_MULTICAST_IF,
+ (const char*)&addr, sizeof addr) < 0) {
+ socketErr(env, "error setting outgoing multicast interface: ");
+ closeSocket(newSocket);
+ return -1;
+ }
+ }
+
+ return newSocket;
+}
+
+Boolean makeSocketNonBlocking(int sock) {
+#if defined(__WIN32__) || defined(_WIN32)
+ unsigned long arg = 1;
+ return ioctlsocket(sock, FIONBIO, &arg) == 0;
+#elif defined(VXWORKS)
+ int arg = 1;
+ return ioctl(sock, FIONBIO, (int)&arg) == 0;
+#else
+ int curFlags = fcntl(sock, F_GETFL, 0);
+ return fcntl(sock, F_SETFL, curFlags|O_NONBLOCK) >= 0;
+#endif
+}
+
+Boolean makeSocketBlocking(int sock, unsigned writeTimeoutInMilliseconds) {
+ Boolean result;
+#if defined(__WIN32__) || defined(_WIN32)
+ unsigned long arg = 0;
+ result = ioctlsocket(sock, FIONBIO, &arg) == 0;
+#elif defined(VXWORKS)
+ int arg = 0;
+ result = ioctl(sock, FIONBIO, (int)&arg) == 0;
+#else
+ int curFlags = fcntl(sock, F_GETFL, 0);
+ result = fcntl(sock, F_SETFL, curFlags&(~O_NONBLOCK)) >= 0;
+#endif
+
+ if (writeTimeoutInMilliseconds > 0) {
+#ifdef SO_SNDTIMEO
+#if defined(__WIN32__) || defined(_WIN32)
+ DWORD msto = (DWORD)writeTimeoutInMilliseconds;
+ setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&msto, sizeof(msto) );
+#else
+ struct timeval tv;
+ tv.tv_sec = writeTimeoutInMilliseconds/1000;
+ tv.tv_usec = (writeTimeoutInMilliseconds%1000)*1000;
+ setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, (char *)&tv, sizeof tv);
+#endif
+#endif
+ }
+
+ return result;
+}
+
+Boolean setSocketKeepAlive(int sock) {
+#if defined(__WIN32__) || defined(_WIN32)
+ // How do we do this in Windows? For now, just make this a no-op in Windows:
+#else
+ int const keepalive_enabled = 1;
+ if (setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (void*)&keepalive_enabled, sizeof keepalive_enabled) < 0) {
+ return False;
+ }
+
+#ifdef TCP_KEEPIDLE
+ int const keepalive_time = 180;
+ if (setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE, (void*)&keepalive_time, sizeof keepalive_time) < 0) {
+ return False;
+ }
+#endif
+
+#ifdef TCP_KEEPCNT
+ int const keepalive_count = 5;
+ if (setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT, (void*)&keepalive_count, sizeof keepalive_count) < 0) {
+ return False;
+ }
+#endif
+
+#ifdef TCP_KEEPINTVL
+ int const keepalive_interval = 20;
+ if (setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL, (void*)&keepalive_interval, sizeof keepalive_interval) < 0) {
+ return False;
+ }
+#endif
+#endif
+
+ return True;
+}
+
+int setupStreamSocket(UsageEnvironment& env,
+ Port port, Boolean makeNonBlocking, Boolean setKeepAlive) {
+ if (!initializeWinsockIfNecessary()) {
+ socketErr(env, "Failed to initialize 'winsock': ");
+ return -1;
+ }
+
+ int newSocket = createSocket(SOCK_STREAM);
+ if (newSocket < 0) {
+ socketErr(env, "unable to create stream socket: ");
+ return newSocket;
+ }
+
+ int reuseFlag = groupsockPriv(env)->reuseFlag;
+ reclaimGroupsockPriv(env);
+ if (setsockopt(newSocket, SOL_SOCKET, SO_REUSEADDR,
+ (const char*)&reuseFlag, sizeof reuseFlag) < 0) {
+ socketErr(env, "setsockopt(SO_REUSEADDR) error: ");
+ closeSocket(newSocket);
+ return -1;
+ }
+
+ // SO_REUSEPORT doesn't really make sense for TCP sockets, so we
+ // normally don't set them. However, if you really want to do this
+ // #define REUSE_FOR_TCP
+#ifdef REUSE_FOR_TCP
+#if defined(__WIN32__) || defined(_WIN32)
+ // Windoze doesn't properly handle SO_REUSEPORT
+#else
+#ifdef SO_REUSEPORT
+ if (setsockopt(newSocket, SOL_SOCKET, SO_REUSEPORT,
+ (const char*)&reuseFlag, sizeof reuseFlag) < 0) {
+ socketErr(env, "setsockopt(SO_REUSEPORT) error: ");
+ closeSocket(newSocket);
+ return -1;
+ }
+#endif
+#endif
+#endif
+
+ // Note: Windoze requires binding, even if the port number is 0
+#if defined(__WIN32__) || defined(_WIN32)
+#else
+ if (port.num() != 0 || ReceivingInterfaceAddr != INADDR_ANY) {
+#endif
+ MAKE_SOCKADDR_IN(name, ReceivingInterfaceAddr, port.num());
+ if (bind(newSocket, (struct sockaddr*)&name, sizeof name) != 0) {
+ char tmpBuffer[100];
+ sprintf(tmpBuffer, "bind() error (port number: %d): ",
+ ntohs(port.num()));
+ socketErr(env, tmpBuffer);
+ closeSocket(newSocket);
+ return -1;
+ }
+#if defined(__WIN32__) || defined(_WIN32)
+#else
+ }
+#endif
+
+ if (makeNonBlocking) {
+ if (!makeSocketNonBlocking(newSocket)) {
+ socketErr(env, "failed to make non-blocking: ");
+ closeSocket(newSocket);
+ return -1;
+ }
+ }
+
+ // Set the keep alive mechanism for the TCP socket, to avoid "ghost sockets"
+ // that remain after an interrupted communication.
+ if (setKeepAlive) {
+ if (!setSocketKeepAlive(newSocket)) {
+ socketErr(env, "failed to set keep alive: ");
+ closeSocket(newSocket);
+ return -1;
+ }
+ }
+
+ return newSocket;
+}
+
+int readSocket(UsageEnvironment& env,
+ int socket, unsigned char* buffer, unsigned bufferSize,
+ struct sockaddr_in& fromAddress) {
+ SOCKLEN_T addressSize = sizeof fromAddress;
+ int bytesRead = recvfrom(socket, (char*)buffer, bufferSize, 0,
+ (struct sockaddr*)&fromAddress,
+ &addressSize);
+ if (bytesRead < 0) {
+ //##### HACK to work around bugs in Linux and Windows:
+ int err = env.getErrno();
+ if (err == 111 /*ECONNREFUSED (Linux)*/
+#if defined(__WIN32__) || defined(_WIN32)
+ // What a piece of crap Windows is. Sometimes
+ // recvfrom() returns -1, but with an 'errno' of 0.
+ // This appears not to be a real error; just treat
+ // it as if it were a read of zero bytes, and hope
+ // we don't have to do anything else to 'reset'
+ // this alleged error:
+ || err == 0 || err == EWOULDBLOCK
+#else
+ || err == EAGAIN
+#endif
+ || err == 113 /*EHOSTUNREACH (Linux)*/) { // Why does Linux return this for datagram sock?
+ fromAddress.sin_addr.s_addr = 0;
+ return 0;
+ }
+ //##### END HACK
+ socketErr(env, "recvfrom() error: ");
+ } else if (bytesRead == 0) {
+ // "recvfrom()" on a stream socket can return 0 if the remote end has closed the connection. Treat this as an error:
+ return -1;
+ }
+
+ return bytesRead;
+}
+
+Boolean writeSocket(UsageEnvironment& env,
+ int socket, struct in_addr address, portNumBits portNum,
+ u_int8_t ttlArg,
+ unsigned char* buffer, unsigned bufferSize) {
+ // Before sending, set the socket's TTL:
+#if defined(__WIN32__) || defined(_WIN32)
+#define TTL_TYPE int
+#else
+#define TTL_TYPE u_int8_t
+#endif
+ TTL_TYPE ttl = (TTL_TYPE)ttlArg;
+ if (setsockopt(socket, IPPROTO_IP, IP_MULTICAST_TTL,
+ (const char*)&ttl, sizeof ttl) < 0) {
+ socketErr(env, "setsockopt(IP_MULTICAST_TTL) error: ");
+ return False;
+ }
+
+ return writeSocket(env, socket, address, portNum, buffer, bufferSize);
+}
+
+Boolean writeSocket(UsageEnvironment& env,
+ int socket, struct in_addr address, portNumBits portNum,
+ unsigned char* buffer, unsigned bufferSize) {
+ do {
+ MAKE_SOCKADDR_IN(dest, address.s_addr, portNum);
+ int bytesSent = sendto(socket, (char*)buffer, bufferSize, 0,
+ (struct sockaddr*)&dest, sizeof dest);
+ if (bytesSent != (int)bufferSize) {
+ char tmpBuf[100];
+ sprintf(tmpBuf, "writeSocket(%d), sendTo() error: wrote %d bytes instead of %u: ", socket, bytesSent, bufferSize);
+ socketErr(env, tmpBuf);
+ break;
+ }
+
+ return True;
+ } while (0);
+
+ return False;
+}
+
+void ignoreSigPipeOnSocket(int socketNum) {
+ #ifdef USE_SIGNALS
+ #ifdef SO_NOSIGPIPE
+ int set_option = 1;
+ setsockopt(socketNum, SOL_SOCKET, SO_NOSIGPIPE, &set_option, sizeof set_option);
+ #else
+ signal(SIGPIPE, SIG_IGN);
+ #endif
+ #endif
+}
+
+static unsigned getBufferSize(UsageEnvironment& env, int bufOptName,
+ int socket) {
+ unsigned curSize;
+ SOCKLEN_T sizeSize = sizeof curSize;
+ if (getsockopt(socket, SOL_SOCKET, bufOptName,
+ (char*)&curSize, &sizeSize) < 0) {
+ socketErr(env, "getBufferSize() error: ");
+ return 0;
+ }
+
+ return curSize;
+}
+unsigned getSendBufferSize(UsageEnvironment& env, int socket) {
+ return getBufferSize(env, SO_SNDBUF, socket);
+}
+unsigned getReceiveBufferSize(UsageEnvironment& env, int socket) {
+ return getBufferSize(env, SO_RCVBUF, socket);
+}
+
+static unsigned setBufferTo(UsageEnvironment& env, int bufOptName,
+ int socket, unsigned requestedSize) {
+ SOCKLEN_T sizeSize = sizeof requestedSize;
+ setsockopt(socket, SOL_SOCKET, bufOptName, (char*)&requestedSize, sizeSize);
+
+ // Get and return the actual, resulting buffer size:
+ return getBufferSize(env, bufOptName, socket);
+}
+unsigned setSendBufferTo(UsageEnvironment& env,
+ int socket, unsigned requestedSize) {
+ return setBufferTo(env, SO_SNDBUF, socket, requestedSize);
+}
+unsigned setReceiveBufferTo(UsageEnvironment& env,
+ int socket, unsigned requestedSize) {
+ return setBufferTo(env, SO_RCVBUF, socket, requestedSize);
+}
+
+static unsigned increaseBufferTo(UsageEnvironment& env, int bufOptName,
+ int socket, unsigned requestedSize) {
+ // First, get the current buffer size. If it's already at least
+ // as big as what we're requesting, do nothing.
+ unsigned curSize = getBufferSize(env, bufOptName, socket);
+
+ // Next, try to increase the buffer to the requested size,
+ // or to some smaller size, if that's not possible:
+ while (requestedSize > curSize) {
+ SOCKLEN_T sizeSize = sizeof requestedSize;
+ if (setsockopt(socket, SOL_SOCKET, bufOptName,
+ (char*)&requestedSize, sizeSize) >= 0) {
+ // success
+ return requestedSize;
+ }
+ requestedSize = (requestedSize+curSize)/2;
+ }
+
+ return getBufferSize(env, bufOptName, socket);
+}
+unsigned increaseSendBufferTo(UsageEnvironment& env,
+ int socket, unsigned requestedSize) {
+ return increaseBufferTo(env, SO_SNDBUF, socket, requestedSize);
+}
+unsigned increaseReceiveBufferTo(UsageEnvironment& env,
+ int socket, unsigned requestedSize) {
+ return increaseBufferTo(env, SO_RCVBUF, socket, requestedSize);
+}
+
+static void clearMulticastAllSocketOption(int socket) {
+#ifdef IP_MULTICAST_ALL
+ // This option is defined in modern versions of Linux to overcome a bug in the Linux kernel's default behavior.
+ // When set to 0, it ensures that we receive only packets that were sent to the specified IP multicast address,
+ // even if some other process on the same system has joined a different multicast group with the same port number.
+ int multicastAll = 0;
+ (void)setsockopt(socket, IPPROTO_IP, IP_MULTICAST_ALL, (void*)&multicastAll, sizeof multicastAll);
+ // Ignore the call's result. Should it fail, we'll still receive packets (just perhaps more than intended)
+#endif
+}
+
+Boolean socketJoinGroup(UsageEnvironment& env, int socket,
+ netAddressBits groupAddress){
+ if (!IsMulticastAddress(groupAddress)) return True; // ignore this case
+
+ struct ip_mreq imr;
+ imr.imr_multiaddr.s_addr = groupAddress;
+ imr.imr_interface.s_addr = ReceivingInterfaceAddr;
+ if (setsockopt(socket, IPPROTO_IP, IP_ADD_MEMBERSHIP,
+ (const char*)&imr, sizeof (struct ip_mreq)) < 0) {
+#if defined(__WIN32__) || defined(_WIN32)
+ if (env.getErrno() != 0) {
+ // That piece-of-shit toy operating system (Windows) sometimes lies
+ // about setsockopt() failing!
+#endif
+ socketErr(env, "setsockopt(IP_ADD_MEMBERSHIP) error: ");
+ return False;
+#if defined(__WIN32__) || defined(_WIN32)
+ }
+#endif
+ }
+
+ clearMulticastAllSocketOption(socket);
+
+ return True;
+}
+
+Boolean socketLeaveGroup(UsageEnvironment&, int socket,
+ netAddressBits groupAddress) {
+ if (!IsMulticastAddress(groupAddress)) return True; // ignore this case
+
+ struct ip_mreq imr;
+ imr.imr_multiaddr.s_addr = groupAddress;
+ imr.imr_interface.s_addr = ReceivingInterfaceAddr;
+ if (setsockopt(socket, IPPROTO_IP, IP_DROP_MEMBERSHIP,
+ (const char*)&imr, sizeof (struct ip_mreq)) < 0) {
+ return False;
+ }
+
+ return True;
+}
+
+// The source-specific join/leave operations require special setsockopt()
+// commands, and a special structure (ip_mreq_source). If the include files
+// didn't define these, we do so here:
+#if !defined(IP_ADD_SOURCE_MEMBERSHIP)
+struct ip_mreq_source {
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_sourceaddr; /* IP address of source */
+ struct in_addr imr_interface; /* local IP address of interface */
+};
+#endif
+
+#ifndef IP_ADD_SOURCE_MEMBERSHIP
+
+#ifdef LINUX
+#define IP_ADD_SOURCE_MEMBERSHIP 39
+#define IP_DROP_SOURCE_MEMBERSHIP 40
+#else
+#define IP_ADD_SOURCE_MEMBERSHIP 25
+#define IP_DROP_SOURCE_MEMBERSHIP 26
+#endif
+
+#endif
+
+Boolean socketJoinGroupSSM(UsageEnvironment& env, int socket,
+ netAddressBits groupAddress,
+ netAddressBits sourceFilterAddr) {
+ if (!IsMulticastAddress(groupAddress)) return True; // ignore this case
+
+ struct ip_mreq_source imr;
+#if ANDROID_OLD_NDK
+ imr.imr_multiaddr = groupAddress;
+ imr.imr_sourceaddr = sourceFilterAddr;
+ imr.imr_interface = ReceivingInterfaceAddr;
+#else
+ imr.imr_multiaddr.s_addr = groupAddress;
+ imr.imr_sourceaddr.s_addr = sourceFilterAddr;
+ imr.imr_interface.s_addr = ReceivingInterfaceAddr;
+#endif
+ if (setsockopt(socket, IPPROTO_IP, IP_ADD_SOURCE_MEMBERSHIP,
+ (const char*)&imr, sizeof (struct ip_mreq_source)) < 0) {
+ socketErr(env, "setsockopt(IP_ADD_SOURCE_MEMBERSHIP) error: ");
+ return False;
+ }
+
+ clearMulticastAllSocketOption(socket);
+
+ return True;
+}
+
+Boolean socketLeaveGroupSSM(UsageEnvironment& /*env*/, int socket,
+ netAddressBits groupAddress,
+ netAddressBits sourceFilterAddr) {
+ if (!IsMulticastAddress(groupAddress)) return True; // ignore this case
+
+ struct ip_mreq_source imr;
+#if ANDROID_OLD_NDK
+ imr.imr_multiaddr = groupAddress;
+ imr.imr_sourceaddr = sourceFilterAddr;
+ imr.imr_interface = ReceivingInterfaceAddr;
+#else
+ imr.imr_multiaddr.s_addr = groupAddress;
+ imr.imr_sourceaddr.s_addr = sourceFilterAddr;
+ imr.imr_interface.s_addr = ReceivingInterfaceAddr;
+#endif
+ if (setsockopt(socket, IPPROTO_IP, IP_DROP_SOURCE_MEMBERSHIP,
+ (const char*)&imr, sizeof (struct ip_mreq_source)) < 0) {
+ return False;
+ }
+
+ return True;
+}
+
+static Boolean getSourcePort0(int socket, portNumBits& resultPortNum/*host order*/) {
+ sockaddr_in test; test.sin_port = 0;
+ SOCKLEN_T len = sizeof test;
+ if (getsockname(socket, (struct sockaddr*)&test, &len) < 0) return False;
+
+ resultPortNum = ntohs(test.sin_port);
+ return True;
+}
+
+Boolean getSourcePort(UsageEnvironment& env, int socket, Port& port) {
+ portNumBits portNum = 0;
+ if (!getSourcePort0(socket, portNum) || portNum == 0) {
+ // Hack - call bind(), then try again:
+ MAKE_SOCKADDR_IN(name, INADDR_ANY, 0);
+ bind(socket, (struct sockaddr*)&name, sizeof name);
+
+ if (!getSourcePort0(socket, portNum) || portNum == 0) {
+ socketErr(env, "getsockname() error: ");
+ return False;
+ }
+ }
+
+ port = Port(portNum);
+ return True;
+}
+
+static Boolean badAddressForUs(netAddressBits addr) {
+ // Check for some possible erroneous addresses:
+ netAddressBits nAddr = htonl(addr);
+ return (nAddr == 0x7F000001 /* 127.0.0.1 */
+ || nAddr == 0
+ || nAddr == (netAddressBits)(~0));
+}
+
+Boolean loopbackWorks = 1;
+
+netAddressBits ourIPAddress(UsageEnvironment& env) {
+ static netAddressBits ourAddress = 0;
+ int sock = -1;
+ struct in_addr testAddr;
+
+ if (ReceivingInterfaceAddr != INADDR_ANY) {
+ // Hack: If we were told to receive on a specific interface address, then
+ // define this to be our ip address:
+ ourAddress = ReceivingInterfaceAddr;
+ }
+
+ if (ourAddress == 0) {
+ // We need to find our source address
+ struct sockaddr_in fromAddr;
+ fromAddr.sin_addr.s_addr = 0;
+
+ // Get our address by sending a (0-TTL) multicast packet,
+ // receiving it, and looking at the source address used.
+ // (This is kinda bogus, but it provides the best guarantee
+ // that other nodes will think our address is the same as we do.)
+ do {
+ loopbackWorks = 0; // until we learn otherwise
+
+#ifndef DISABLE_LOOPBACK_IP_ADDRESS_CHECK
+ testAddr.s_addr = our_inet_addr("228.67.43.91"); // arbitrary
+ Port testPort(15947); // ditto
+
+ sock = setupDatagramSocket(env, testPort);
+ if (sock < 0) break;
+
+ if (!socketJoinGroup(env, sock, testAddr.s_addr)) break;
+
+ unsigned char testString[] = "hostIdTest";
+ unsigned testStringLength = sizeof testString;
+
+ if (!writeSocket(env, sock, testAddr, testPort.num(), 0,
+ testString, testStringLength)) break;
+
+ // Block until the socket is readable (with a 5-second timeout):
+ fd_set rd_set;
+ FD_ZERO(&rd_set);
+ FD_SET((unsigned)sock, &rd_set);
+ const unsigned numFds = sock+1;
+ struct timeval timeout;
+ timeout.tv_sec = 5;
+ timeout.tv_usec = 0;
+ int result = select(numFds, &rd_set, NULL, NULL, &timeout);
+ if (result <= 0) break;
+
+ unsigned char readBuffer[20];
+ int bytesRead = readSocket(env, sock,
+ readBuffer, sizeof readBuffer,
+ fromAddr);
+ if (bytesRead != (int)testStringLength
+ || strncmp((char*)readBuffer, (char*)testString, testStringLength) != 0) {
+ break;
+ }
+
+ // We use this packet's source address, if it's good:
+ loopbackWorks = !badAddressForUs(fromAddr.sin_addr.s_addr);
+#endif
+ } while (0);
+
+ if (sock >= 0) {
+ socketLeaveGroup(env, sock, testAddr.s_addr);
+ closeSocket(sock);
+ }
+
+ if (!loopbackWorks) do {
+ // We couldn't find our address using multicast loopback,
+ // so try instead to look it up directly - by first getting our host name, and then resolving this host name
+ char hostname[100];
+ hostname[0] = '\0';
+ int result = gethostname(hostname, sizeof hostname);
+ if (result != 0 || hostname[0] == '\0') {
+ env.setResultErrMsg("initial gethostname() failed");
+ break;
+ }
+
+ // Try to resolve "hostname" to an IP address:
+ NetAddressList addresses(hostname);
+ NetAddressList::Iterator iter(addresses);
+ NetAddress const* address;
+
+ // Take the first address that's not bad:
+ netAddressBits addr = 0;
+ while ((address = iter.nextAddress()) != NULL) {
+ netAddressBits a = *(netAddressBits*)(address->data());
+ if (!badAddressForUs(a)) {
+ addr = a;
+ break;
+ }
+ }
+
+ // Assign the address that we found to "fromAddr" (as if the 'loopback' method had worked), to simplify the code below:
+ fromAddr.sin_addr.s_addr = addr;
+ } while (0);
+
+ // Make sure we have a good address:
+ netAddressBits from = fromAddr.sin_addr.s_addr;
+ if (badAddressForUs(from)) {
+ char tmp[100];
+ sprintf(tmp, "This computer has an invalid IP address: %s", AddressString(from).val());
+ env.setResultMsg(tmp);
+ from = 0;
+ }
+
+ ourAddress = from;
+
+ // Use our newly-discovered IP address, and the current time,
+ // to initialize the random number generator's seed:
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ unsigned seed = ourAddress^timeNow.tv_sec^timeNow.tv_usec;
+ our_srandom(seed);
+ }
+ return ourAddress;
+}
+
+netAddressBits chooseRandomIPv4SSMAddress(UsageEnvironment& env) {
+ // First, a hack to ensure that our random number generator is seeded:
+ (void) ourIPAddress(env);
+
+ // Choose a random address in the range [232.0.1.0, 232.255.255.255)
+ // i.e., [0xE8000100, 0xE8FFFFFF)
+ netAddressBits const first = 0xE8000100, lastPlus1 = 0xE8FFFFFF;
+ netAddressBits const range = lastPlus1 - first;
+
+ return ntohl(first + ((netAddressBits)our_random())%range);
+}
+
+char const* timestampString() {
+ struct timeval tvNow;
+ gettimeofday(&tvNow, NULL);
+
+#if !defined(_WIN32_WCE)
+ static char timeString[9]; // holds hh:mm:ss plus trailing '\0'
+
+ time_t tvNow_t = tvNow.tv_sec;
+ char const* ctimeResult = ctime(&tvNow_t);
+ if (ctimeResult == NULL) {
+ sprintf(timeString, "??:??:??");
+ } else {
+ char const* from = &ctimeResult[11];
+ int i;
+ for (i = 0; i < 8; ++i) {
+ timeString[i] = from[i];
+ }
+ timeString[i] = '\0';
+ }
+#else
+ // WinCE apparently doesn't have "ctime()", so instead, construct
+ // a timestamp string just using the integer and fractional parts
+ // of "tvNow":
+ static char timeString[50];
+ sprintf(timeString, "%lu.%06ld", tvNow.tv_sec, tvNow.tv_usec);
+#endif
+
+ return (char const*)&timeString;
+}
+
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(__MINGW32__)
+// For Windoze, we need to implement our own gettimeofday()
+
+// used to make sure that static variables in gettimeofday() aren't initialized simultaneously by multiple threads
+static LONG initializeLock_gettimeofday = 0;
+
+#if !defined(_WIN32_WCE)
+#include <sys/timeb.h>
+#endif
+
+int gettimeofday(struct timeval* tp, int* /*tz*/) {
+ static LARGE_INTEGER tickFrequency, epochOffset;
+
+ static Boolean isInitialized = False;
+
+ LARGE_INTEGER tickNow;
+
+#if !defined(_WIN32_WCE)
+ QueryPerformanceCounter(&tickNow);
+#else
+ tickNow.QuadPart = GetTickCount();
+#endif
+
+ if (!isInitialized) {
+ if(1 == InterlockedIncrement(&initializeLock_gettimeofday)) {
+#if !defined(_WIN32_WCE)
+ // For our first call, use "ftime()", so that we get a time with a proper epoch.
+ // For subsequent calls, use "QueryPerformanceCount()", because it's more fine-grain.
+ struct timeb tb;
+ ftime(&tb);
+ tp->tv_sec = tb.time;
+ tp->tv_usec = 1000*tb.millitm;
+
+ // Also get our counter frequency:
+ QueryPerformanceFrequency(&tickFrequency);
+#else
+ /* FILETIME of Jan 1 1970 00:00:00. */
+ const LONGLONG epoch = 116444736000000000LL;
+ FILETIME fileTime;
+ LARGE_INTEGER time;
+ GetSystemTimeAsFileTime(&fileTime);
+
+ time.HighPart = fileTime.dwHighDateTime;
+ time.LowPart = fileTime.dwLowDateTime;
+
+ // convert to from 100ns time to unix timestamp in seconds, 1000*1000*10
+ tp->tv_sec = (long)((time.QuadPart - epoch) / 10000000L);
+
+ /*
+ GetSystemTimeAsFileTime has just a seconds resolution,
+ thats why wince-version of gettimeofday is not 100% accurate, usec accuracy would be calculated like this:
+ // convert 100 nanoseconds to usec
+ tp->tv_usec= (long)((time.QuadPart - epoch)%10000000L) / 10L;
+ */
+ tp->tv_usec = 0;
+
+ // resolution of GetTickCounter() is always milliseconds
+ tickFrequency.QuadPart = 1000;
+#endif
+ // compute an offset to add to subsequent counter times, so we get a proper epoch:
+ epochOffset.QuadPart
+ = tp->tv_sec * tickFrequency.QuadPart + (tp->tv_usec * tickFrequency.QuadPart) / 1000000L - tickNow.QuadPart;
+
+ // next caller can use ticks for time calculation
+ isInitialized = True;
+ return 0;
+ } else {
+ InterlockedDecrement(&initializeLock_gettimeofday);
+ // wait until first caller has initialized static values
+ while(!isInitialized){
+ Sleep(1);
+ }
+ }
+ }
+
+ // adjust our tick count so that we get a proper epoch:
+ tickNow.QuadPart += epochOffset.QuadPart;
+
+ tp->tv_sec = (long)(tickNow.QuadPart / tickFrequency.QuadPart);
+ tp->tv_usec = (long)(((tickNow.QuadPart % tickFrequency.QuadPart) * 1000000L) / tickFrequency.QuadPart);
+
+ return 0;
+}
+#endif
+#undef ANDROID_OLD_NDK
diff --git a/groupsock/IOHandlers.cpp b/groupsock/IOHandlers.cpp
new file mode 100644
index 0000000..e69c43e
--- /dev/null
+++ b/groupsock/IOHandlers.cpp
@@ -0,0 +1,46 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "mTunnel" multicast access service
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// IO event handlers
+// Implementation
+
+#include "IOHandlers.hh"
+#include "TunnelEncaps.hh"
+
+//##### TEMP: Use a single buffer, sized for UDP tunnels:
+//##### This assumes that the I/O handlers are non-reentrant
+static unsigned const maxPacketLength = 50*1024; // bytes
+ // This is usually overkill, because UDP packets are usually no larger
+ // than the typical Ethernet MTU (1500 bytes). However, I've seen
+ // reports of Windows Media Servers sending UDP packets as large as
+ // 27 kBytes. These will probably undego lots of IP-level
+ // fragmentation, but that occurs below us. We just have to hope that
+ // fragments don't get lost.
+static unsigned const ioBufferSize
+ = maxPacketLength + TunnelEncapsulationTrailerMaxSize;
+static unsigned char ioBuffer[ioBufferSize];
+
+
+void socketReadHandler(Socket* sock, int /*mask*/) {
+ unsigned bytesRead;
+ struct sockaddr_in fromAddress;
+ UsageEnvironment& saveEnv = sock->env();
+ // because handleRead(), if it fails, may delete "sock"
+ if (!sock->handleRead(ioBuffer, ioBufferSize, bytesRead, fromAddress)) {
+ saveEnv.reportBackgroundError();
+ }
+}
diff --git a/groupsock/Makefile.head b/groupsock/Makefile.head
new file mode 100644
index 0000000..219f685
--- /dev/null
+++ b/groupsock/Makefile.head
@@ -0,0 +1,4 @@
+INCLUDES = -Iinclude -I../UsageEnvironment/include
+PREFIX = /usr/local
+LIBDIR = $(PREFIX)/lib
+##### Change the following for your environment:
diff --git a/groupsock/Makefile.tail b/groupsock/Makefile.tail
new file mode 100644
index 0000000..89a8593
--- /dev/null
+++ b/groupsock/Makefile.tail
@@ -0,0 +1,45 @@
+##### End of variables to change
+
+NAME = libgroupsock
+ALL = $(NAME).$(LIB_SUFFIX)
+all: $(ALL)
+
+.$(C).$(OBJ):
+ $(C_COMPILER) -c $(C_FLAGS) $<
+.$(CPP).$(OBJ):
+ $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $<
+
+GROUPSOCK_LIB_OBJS = GroupsockHelper.$(OBJ) GroupEId.$(OBJ) inet.$(OBJ) Groupsock.$(OBJ) NetInterface.$(OBJ) NetAddress.$(OBJ) IOHandlers.$(OBJ)
+
+GroupsockHelper.$(CPP): include/GroupsockHelper.hh
+include/GroupsockHelper.hh: include/NetAddress.hh
+include/NetAddress.hh: include/NetCommon.h
+GroupEId.$(CPP): include/GroupEId.hh
+include/GroupEId.hh: include/NetAddress.hh
+inet.$(C): include/NetCommon.h
+Groupsock.$(CPP): include/Groupsock.hh include/GroupsockHelper.hh include/TunnelEncaps.hh
+include/Groupsock.hh: include/groupsock_version.hh include/NetInterface.hh include/GroupEId.hh
+include/NetInterface.hh: include/NetAddress.hh
+include/TunnelEncaps.hh: include/NetAddress.hh
+NetInterface.$(CPP): include/NetInterface.hh include/GroupsockHelper.hh
+NetAddress.$(CPP): include/NetAddress.hh include/GroupsockHelper.hh
+IOHandlers.$(CPP): include/IOHandlers.hh include/TunnelEncaps.hh
+
+libgroupsock.$(LIB_SUFFIX): $(GROUPSOCK_LIB_OBJS) \
+ $(PLATFORM_SPECIFIC_LIB_OBJS)
+ $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) \
+ $(GROUPSOCK_LIB_OBJS)
+
+clean:
+ -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~
+
+install: install1 $(INSTALL2)
+install1: libgroupsock.$(LIB_SUFFIX)
+ install -d $(DESTDIR)$(PREFIX)/include/groupsock $(DESTDIR)$(LIBDIR)
+ install -m 644 include/*.hh include/*.h $(DESTDIR)$(PREFIX)/include/groupsock
+ install -m 644 libgroupsock.$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)
+install_shared_libraries: libgroupsock.$(LIB_SUFFIX)
+ ln -fs libgroupsock.$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/libgroupsock.$(SHORT_LIB_SUFFIX)
+ ln -fs libgroupsock.$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/libgroupsock.so
+
+##### Any additional, platform-specific rules come here:
diff --git a/groupsock/NetAddress.cpp b/groupsock/NetAddress.cpp
new file mode 100644
index 0000000..d99e29c
--- /dev/null
+++ b/groupsock/NetAddress.cpp
@@ -0,0 +1,312 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "mTunnel" multicast access service
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Network Addresses
+// Implementation
+
+#include "NetAddress.hh"
+#include "GroupsockHelper.hh"
+
+#include <stddef.h>
+#include <stdio.h>
+#if defined(__WIN32__) || defined(_WIN32)
+#define USE_GETHOSTBYNAME 1 /*because at least some Windows don't have getaddrinfo()*/
+#else
+#ifndef INADDR_NONE
+#define INADDR_NONE 0xFFFFFFFF
+#endif
+#endif
+
+////////// NetAddress //////////
+
+NetAddress::NetAddress(u_int8_t const* data, unsigned length) {
+ assign(data, length);
+}
+
+NetAddress::NetAddress(unsigned length) {
+ fData = new u_int8_t[length];
+ if (fData == NULL) {
+ fLength = 0;
+ return;
+ }
+
+ for (unsigned i = 0; i < length; ++i) fData[i] = 0;
+ fLength = length;
+}
+
+NetAddress::NetAddress(NetAddress const& orig) {
+ assign(orig.data(), orig.length());
+}
+
+NetAddress& NetAddress::operator=(NetAddress const& rightSide) {
+ if (&rightSide != this) {
+ clean();
+ assign(rightSide.data(), rightSide.length());
+ }
+ return *this;
+}
+
+NetAddress::~NetAddress() {
+ clean();
+}
+
+void NetAddress::assign(u_int8_t const* data, unsigned length) {
+ fData = new u_int8_t[length];
+ if (fData == NULL) {
+ fLength = 0;
+ return;
+ }
+
+ for (unsigned i = 0; i < length; ++i) fData[i] = data[i];
+ fLength = length;
+}
+
+void NetAddress::clean() {
+ delete[] fData; fData = NULL;
+ fLength = 0;
+}
+
+
+////////// NetAddressList //////////
+
+NetAddressList::NetAddressList(char const* hostname)
+ : fNumAddresses(0), fAddressArray(NULL) {
+ // First, check whether "hostname" is an IP address string:
+ netAddressBits addr = our_inet_addr((char*)hostname);
+ if (addr != INADDR_NONE) {
+ // Yes, it was an IP address string. Return a 1-element list with this address:
+ fNumAddresses = 1;
+ fAddressArray = new NetAddress*[fNumAddresses];
+ if (fAddressArray == NULL) return;
+
+ fAddressArray[0] = new NetAddress((u_int8_t*)&addr, sizeof (netAddressBits));
+ return;
+ }
+
+ // "hostname" is not an IP address string; try resolving it as a real host name instead:
+#if defined(USE_GETHOSTBYNAME) || defined(VXWORKS)
+ struct hostent* host;
+#if defined(VXWORKS)
+ char hostentBuf[512];
+
+ host = (struct hostent*)resolvGetHostByName((char*)hostname, (char*)&hostentBuf, sizeof hostentBuf);
+#else
+ host = gethostbyname((char*)hostname);
+#endif
+ if (host == NULL || host->h_length != 4 || host->h_addr_list == NULL) return; // no luck
+
+ u_int8_t const** const hAddrPtr = (u_int8_t const**)host->h_addr_list;
+ // First, count the number of addresses:
+ u_int8_t const** hAddrPtr1 = hAddrPtr;
+ while (*hAddrPtr1 != NULL) {
+ ++fNumAddresses;
+ ++hAddrPtr1;
+ }
+
+ // Next, set up the list:
+ fAddressArray = new NetAddress*[fNumAddresses];
+ if (fAddressArray == NULL) return;
+
+ for (unsigned i = 0; i < fNumAddresses; ++i) {
+ fAddressArray[i] = new NetAddress(hAddrPtr[i], host->h_length);
+ }
+#else
+ // Use "getaddrinfo()" (rather than the older, deprecated "gethostbyname()"):
+ struct addrinfo addrinfoHints;
+ memset(&addrinfoHints, 0, sizeof addrinfoHints);
+ addrinfoHints.ai_family = AF_INET; // For now, we're interested in IPv4 addresses only
+ struct addrinfo* addrinfoResultPtr = NULL;
+ int result = getaddrinfo(hostname, NULL, &addrinfoHints, &addrinfoResultPtr);
+ if (result != 0 || addrinfoResultPtr == NULL) return; // no luck
+
+ // First, count the number of addresses:
+ const struct addrinfo* p = addrinfoResultPtr;
+ while (p != NULL) {
+ if (p->ai_addrlen < 4) continue; // sanity check: skip over addresses that are too small
+ ++fNumAddresses;
+ p = p->ai_next;
+ }
+
+ // Next, set up the list:
+ fAddressArray = new NetAddress*[fNumAddresses];
+ if (fAddressArray == NULL) return;
+
+ unsigned i = 0;
+ p = addrinfoResultPtr;
+ while (p != NULL) {
+ if (p->ai_addrlen < 4) continue;
+ fAddressArray[i++] = new NetAddress((u_int8_t const*)&(((struct sockaddr_in*)p->ai_addr)->sin_addr.s_addr), 4);
+ p = p->ai_next;
+ }
+
+ // Finally, free the data that we had allocated by calling "getaddrinfo()":
+ freeaddrinfo(addrinfoResultPtr);
+#endif
+}
+
+NetAddressList::NetAddressList(NetAddressList const& orig) {
+ assign(orig.numAddresses(), orig.fAddressArray);
+}
+
+NetAddressList& NetAddressList::operator=(NetAddressList const& rightSide) {
+ if (&rightSide != this) {
+ clean();
+ assign(rightSide.numAddresses(), rightSide.fAddressArray);
+ }
+ return *this;
+}
+
+NetAddressList::~NetAddressList() {
+ clean();
+}
+
+void NetAddressList::assign(unsigned numAddresses, NetAddress** addressArray) {
+ fAddressArray = new NetAddress*[numAddresses];
+ if (fAddressArray == NULL) {
+ fNumAddresses = 0;
+ return;
+ }
+
+ for (unsigned i = 0; i < numAddresses; ++i) {
+ fAddressArray[i] = new NetAddress(*addressArray[i]);
+ }
+ fNumAddresses = numAddresses;
+}
+
+void NetAddressList::clean() {
+ while (fNumAddresses-- > 0) {
+ delete fAddressArray[fNumAddresses];
+ }
+ delete[] fAddressArray; fAddressArray = NULL;
+}
+
+NetAddress const* NetAddressList::firstAddress() const {
+ if (fNumAddresses == 0) return NULL;
+
+ return fAddressArray[0];
+}
+
+////////// NetAddressList::Iterator //////////
+NetAddressList::Iterator::Iterator(NetAddressList const& addressList)
+ : fAddressList(addressList), fNextIndex(0) {}
+
+NetAddress const* NetAddressList::Iterator::nextAddress() {
+ if (fNextIndex >= fAddressList.numAddresses()) return NULL; // no more
+ return fAddressList.fAddressArray[fNextIndex++];
+}
+
+
+////////// Port //////////
+
+Port::Port(portNumBits num /* in host byte order */) {
+ fPortNum = htons(num);
+}
+
+UsageEnvironment& operator<<(UsageEnvironment& s, const Port& p) {
+ return s << ntohs(p.num());
+}
+
+
+////////// AddressPortLookupTable //////////
+
+AddressPortLookupTable::AddressPortLookupTable()
+ : fTable(HashTable::create(3)) { // three-word keys are used
+}
+
+AddressPortLookupTable::~AddressPortLookupTable() {
+ delete fTable;
+}
+
+void* AddressPortLookupTable::Add(netAddressBits address1,
+ netAddressBits address2,
+ Port port, void* value) {
+ int key[3];
+ key[0] = (int)address1;
+ key[1] = (int)address2;
+ key[2] = (int)port.num();
+ return fTable->Add((char*)key, value);
+}
+
+void* AddressPortLookupTable::Lookup(netAddressBits address1,
+ netAddressBits address2,
+ Port port) {
+ int key[3];
+ key[0] = (int)address1;
+ key[1] = (int)address2;
+ key[2] = (int)port.num();
+ return fTable->Lookup((char*)key);
+}
+
+Boolean AddressPortLookupTable::Remove(netAddressBits address1,
+ netAddressBits address2,
+ Port port) {
+ int key[3];
+ key[0] = (int)address1;
+ key[1] = (int)address2;
+ key[2] = (int)port.num();
+ return fTable->Remove((char*)key);
+}
+
+AddressPortLookupTable::Iterator::Iterator(AddressPortLookupTable& table)
+ : fIter(HashTable::Iterator::create(*(table.fTable))) {
+}
+
+AddressPortLookupTable::Iterator::~Iterator() {
+ delete fIter;
+}
+
+void* AddressPortLookupTable::Iterator::next() {
+ char const* key; // dummy
+ return fIter->next(key);
+}
+
+
+////////// isMulticastAddress() implementation //////////
+
+Boolean IsMulticastAddress(netAddressBits address) {
+ // Note: We return False for addresses in the range 224.0.0.0
+ // through 224.0.0.255, because these are non-routable
+ // Note: IPv4-specific #####
+ netAddressBits addressInNetworkOrder = htonl(address);
+ return addressInNetworkOrder > 0xE00000FF &&
+ addressInNetworkOrder <= 0xEFFFFFFF;
+}
+
+
+////////// AddressString implementation //////////
+
+AddressString::AddressString(struct sockaddr_in const& addr) {
+ init(addr.sin_addr.s_addr);
+}
+
+AddressString::AddressString(struct in_addr const& addr) {
+ init(addr.s_addr);
+}
+
+AddressString::AddressString(netAddressBits addr) {
+ init(addr);
+}
+
+void AddressString::init(netAddressBits addr) {
+ fVal = new char[16]; // large enough for "abc.def.ghi.jkl"
+ netAddressBits addrNBO = htonl(addr); // make sure we have a value in a known byte order: big endian
+ sprintf(fVal, "%u.%u.%u.%u", (addrNBO>>24)&0xFF, (addrNBO>>16)&0xFF, (addrNBO>>8)&0xFF, addrNBO&0xFF);
+}
+
+AddressString::~AddressString() {
+ delete[] fVal;
+}
diff --git a/groupsock/NetInterface.cpp b/groupsock/NetInterface.cpp
new file mode 100644
index 0000000..2fc0af1
--- /dev/null
+++ b/groupsock/NetInterface.cpp
@@ -0,0 +1,174 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "mTunnel" multicast access service
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Network Interfaces
+// Implementation
+
+#include "NetInterface.hh"
+#include "GroupsockHelper.hh"
+
+#ifndef NO_SSTREAM
+#include <sstream>
+#endif
+#include <stdio.h>
+
+////////// NetInterface //////////
+
+UsageEnvironment* NetInterface::DefaultUsageEnvironment = NULL;
+
+NetInterface::NetInterface() {
+}
+
+NetInterface::~NetInterface() {
+}
+
+
+////////// NetInterface //////////
+
+DirectedNetInterface::DirectedNetInterface() {
+}
+
+DirectedNetInterface::~DirectedNetInterface() {
+}
+
+
+////////// DirectedNetInterfaceSet //////////
+
+DirectedNetInterfaceSet::DirectedNetInterfaceSet()
+ : fTable(HashTable::create(ONE_WORD_HASH_KEYS)) {
+}
+
+DirectedNetInterfaceSet::~DirectedNetInterfaceSet() {
+ delete fTable;
+}
+
+DirectedNetInterface*
+DirectedNetInterfaceSet::Add(DirectedNetInterface const* interf) {
+ return (DirectedNetInterface*) fTable->Add((char*)interf, (void*)interf);
+}
+
+Boolean
+DirectedNetInterfaceSet::Remove(DirectedNetInterface const* interf) {
+ return fTable->Remove((char*)interf);
+}
+
+DirectedNetInterfaceSet::Iterator::
+Iterator(DirectedNetInterfaceSet& interfaces)
+ : fIter(HashTable::Iterator::create(*(interfaces.fTable))) {
+}
+
+DirectedNetInterfaceSet::Iterator::~Iterator() {
+ delete fIter;
+}
+
+DirectedNetInterface* DirectedNetInterfaceSet::Iterator::next() {
+ char const* key; // dummy
+ return (DirectedNetInterface*) fIter->next(key);
+};
+
+
+////////// Socket //////////
+
+int Socket::DebugLevel = 1; // default value
+
+Socket::Socket(UsageEnvironment& env, Port port)
+ : fEnv(DefaultUsageEnvironment != NULL ? *DefaultUsageEnvironment : env), fPort(port) {
+ fSocketNum = setupDatagramSocket(fEnv, port);
+}
+
+void Socket::reset() {
+ if (fSocketNum >= 0) closeSocket(fSocketNum);
+ fSocketNum = -1;
+}
+
+Socket::~Socket() {
+ reset();
+}
+
+Boolean Socket::changePort(Port newPort) {
+ int oldSocketNum = fSocketNum;
+ unsigned oldReceiveBufferSize = getReceiveBufferSize(fEnv, fSocketNum);
+ unsigned oldSendBufferSize = getSendBufferSize(fEnv, fSocketNum);
+ closeSocket(fSocketNum);
+
+ fSocketNum = setupDatagramSocket(fEnv, newPort);
+ if (fSocketNum < 0) {
+ fEnv.taskScheduler().turnOffBackgroundReadHandling(oldSocketNum);
+ return False;
+ }
+
+ setReceiveBufferTo(fEnv, fSocketNum, oldReceiveBufferSize);
+ setSendBufferTo(fEnv, fSocketNum, oldSendBufferSize);
+ if (fSocketNum != oldSocketNum) { // the socket number has changed, so move any event handling for it:
+ fEnv.taskScheduler().moveSocketHandling(oldSocketNum, fSocketNum);
+ }
+ return True;
+}
+
+UsageEnvironment& operator<<(UsageEnvironment& s, const Socket& sock) {
+ return s << timestampString() << " Socket(" << sock.socketNum() << ")";
+}
+
+////////// SocketLookupTable //////////
+
+SocketLookupTable::SocketLookupTable()
+ : fTable(HashTable::create(ONE_WORD_HASH_KEYS)) {
+}
+
+SocketLookupTable::~SocketLookupTable() {
+ delete fTable;
+}
+
+Socket* SocketLookupTable::Fetch(UsageEnvironment& env, Port port,
+ Boolean& isNew) {
+ isNew = False;
+ Socket* sock;
+ do {
+ sock = (Socket*) fTable->Lookup((char*)(long)(port.num()));
+ if (sock == NULL) { // we need to create one:
+ sock = CreateNew(env, port);
+ if (sock == NULL || sock->socketNum() < 0) break;
+
+ fTable->Add((char*)(long)(port.num()), (void*)sock);
+ isNew = True;
+ }
+
+ return sock;
+ } while (0);
+
+ delete sock;
+ return NULL;
+}
+
+Boolean SocketLookupTable::Remove(Socket const* sock) {
+ return fTable->Remove( (char*)(long)(sock->port().num()) );
+}
+
+////////// NetInterfaceTrafficStats //////////
+
+NetInterfaceTrafficStats::NetInterfaceTrafficStats() {
+ fTotNumPackets = fTotNumBytes = 0.0;
+}
+
+void NetInterfaceTrafficStats::countPacket(unsigned packetSize) {
+ fTotNumPackets += 1.0;
+ fTotNumBytes += packetSize;
+}
+
+Boolean NetInterfaceTrafficStats::haveSeenTraffic() const {
+ return fTotNumPackets != 0.0;
+}
diff --git a/groupsock/include/GroupEId.hh b/groupsock/include/GroupEId.hh
new file mode 100644
index 0000000..e7fc6be
--- /dev/null
+++ b/groupsock/include/GroupEId.hh
@@ -0,0 +1,64 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "multikit" Multicast Application Shell
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// "Group Endpoint Id"
+// C++ header
+
+#ifndef _GROUPEID_HH
+#define _GROUPEID_HH
+
+#ifndef _BOOLEAN_HH
+#include "Boolean.hh"
+#endif
+
+#ifndef _NET_ADDRESS_HH
+#include "NetAddress.hh"
+#endif
+
+class GroupEId {
+public:
+ GroupEId(struct in_addr const& groupAddr,
+ portNumBits portNum, u_int8_t ttl);
+ // used for a 'source-independent multicast' group
+ GroupEId(struct in_addr const& groupAddr,
+ struct in_addr const& sourceFilterAddr,
+ portNumBits portNum);
+ // used for a 'source-specific multicast' group
+
+ struct in_addr const& groupAddress() const { return fGroupAddress; }
+ struct in_addr const& sourceFilterAddress() const { return fSourceFilterAddress; }
+
+ Boolean isSSM() const;
+
+ portNumBits portNum() const { return fPortNum; }
+
+ u_int8_t ttl() const { return fTTL; }
+
+private:
+ void init(struct in_addr const& groupAddr,
+ struct in_addr const& sourceFilterAddr,
+ portNumBits portNum,
+ u_int8_t ttl);
+
+private:
+ struct in_addr fGroupAddress;
+ struct in_addr fSourceFilterAddress;
+ portNumBits fPortNum; // in network byte order
+ u_int8_t fTTL;
+};
+
+#endif
diff --git a/groupsock/include/Groupsock.hh b/groupsock/include/Groupsock.hh
new file mode 100644
index 0000000..b3a7604
--- /dev/null
+++ b/groupsock/include/Groupsock.hh
@@ -0,0 +1,218 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "mTunnel" multicast access service
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// 'Group sockets'
+// C++ header
+
+#ifndef _GROUPSOCK_HH
+#define _GROUPSOCK_HH
+
+#ifndef _GROUPSOCK_VERSION_HH
+#include "groupsock_version.hh"
+#endif
+
+#ifndef _NET_INTERFACE_HH
+#include "NetInterface.hh"
+#endif
+
+#ifndef _GROUPEID_HH
+#include "GroupEId.hh"
+#endif
+
+// An "OutputSocket" is (by default) used only to send packets.
+// No packets are received on it (unless a subclass arranges this)
+
+class OutputSocket: public Socket {
+public:
+ OutputSocket(UsageEnvironment& env);
+ virtual ~OutputSocket();
+
+ virtual Boolean write(netAddressBits address, portNumBits portNum/*in network order*/, u_int8_t ttl,
+ unsigned char* buffer, unsigned bufferSize);
+ Boolean write(struct sockaddr_in& addressAndPort, u_int8_t ttl,
+ unsigned char* buffer, unsigned bufferSize) {
+ return write(addressAndPort.sin_addr.s_addr, addressAndPort.sin_port, ttl, buffer, bufferSize);
+ }
+
+protected:
+ OutputSocket(UsageEnvironment& env, Port port);
+
+ portNumBits sourcePortNum() const {return fSourcePort.num();}
+
+private: // redefined virtual function
+ virtual Boolean handleRead(unsigned char* buffer, unsigned bufferMaxSize,
+ unsigned& bytesRead,
+ struct sockaddr_in& fromAddressAndPort);
+
+private:
+ Port fSourcePort;
+ unsigned fLastSentTTL;
+};
+
+class destRecord {
+public:
+ destRecord(struct in_addr const& addr, Port const& port, u_int8_t ttl, unsigned sessionId,
+ destRecord* next);
+ virtual ~destRecord();
+
+public:
+ destRecord* fNext;
+ GroupEId fGroupEId;
+ unsigned fSessionId;
+};
+
+// A "Groupsock" is used to both send and receive packets.
+// As the name suggests, it was originally designed to send/receive
+// multicast, but it can send/receive unicast as well.
+
+class Groupsock: public OutputSocket {
+public:
+ Groupsock(UsageEnvironment& env, struct in_addr const& groupAddr,
+ Port port, u_int8_t ttl);
+ // used for a 'source-independent multicast' group
+ Groupsock(UsageEnvironment& env, struct in_addr const& groupAddr,
+ struct in_addr const& sourceFilterAddr,
+ Port port);
+ // used for a 'source-specific multicast' group
+ virtual ~Groupsock();
+
+ virtual destRecord* createNewDestRecord(struct in_addr const& addr, Port const& port, u_int8_t ttl, unsigned sessionId, destRecord* next);
+ // Can be redefined by subclasses that also subclass "destRecord"
+
+ void changeDestinationParameters(struct in_addr const& newDestAddr,
+ Port newDestPort, int newDestTTL,
+ unsigned sessionId = 0);
+ // By default, the destination address, port and ttl for
+ // outgoing packets are those that were specified in
+ // the constructor. This works OK for multicast sockets,
+ // but for unicast we usually want the destination port
+ // number, at least, to be different from the source port.
+ // (If a parameter is 0 (or ~0 for ttl), then no change is made to that parameter.)
+ // (If no existing "destRecord" exists with this "sessionId", then we add a new "destRecord".)
+ unsigned lookupSessionIdFromDestination(struct sockaddr_in const& destAddrAndPort) const;
+ // returns 0 if not found
+
+ // As a special case, we also allow multiple destinations (addresses & ports)
+ // (This can be used to implement multi-unicast.)
+ virtual void addDestination(struct in_addr const& addr, Port const& port, unsigned sessionId);
+ virtual void removeDestination(unsigned sessionId);
+ void removeAllDestinations();
+ Boolean hasMultipleDestinations() const { return fDests != NULL && fDests->fNext != NULL; }
+
+ struct in_addr const& groupAddress() const {
+ return fIncomingGroupEId.groupAddress();
+ }
+ struct in_addr const& sourceFilterAddress() const {
+ return fIncomingGroupEId.sourceFilterAddress();
+ }
+
+ Boolean isSSM() const {
+ return fIncomingGroupEId.isSSM();
+ }
+
+ u_int8_t ttl() const { return fIncomingGroupEId.ttl(); }
+
+ void multicastSendOnly(); // send, but don't receive any multicast packets
+
+ virtual Boolean output(UsageEnvironment& env, unsigned char* buffer, unsigned bufferSize,
+ DirectedNetInterface* interfaceNotToFwdBackTo = NULL);
+
+ DirectedNetInterfaceSet& members() { return fMembers; }
+
+ Boolean deleteIfNoMembers;
+ Boolean isSlave; // for tunneling
+
+ static NetInterfaceTrafficStats statsIncoming;
+ static NetInterfaceTrafficStats statsOutgoing;
+ static NetInterfaceTrafficStats statsRelayedIncoming;
+ static NetInterfaceTrafficStats statsRelayedOutgoing;
+ NetInterfaceTrafficStats statsGroupIncoming; // *not* static
+ NetInterfaceTrafficStats statsGroupOutgoing; // *not* static
+ NetInterfaceTrafficStats statsGroupRelayedIncoming; // *not* static
+ NetInterfaceTrafficStats statsGroupRelayedOutgoing; // *not* static
+
+ Boolean wasLoopedBackFromUs(UsageEnvironment& env, struct sockaddr_in& fromAddressAndPort);
+
+public: // redefined virtual functions
+ virtual Boolean handleRead(unsigned char* buffer, unsigned bufferMaxSize,
+ unsigned& bytesRead,
+ struct sockaddr_in& fromAddressAndPort);
+
+protected:
+ destRecord* lookupDestRecordFromDestination(struct sockaddr_in const& destAddrAndPort) const;
+
+private:
+ void removeDestinationFrom(destRecord*& dests, unsigned sessionId);
+ // used to implement (the public) "removeDestination()", and "changeDestinationParameters()"
+ int outputToAllMembersExcept(DirectedNetInterface* exceptInterface,
+ u_int8_t ttlToFwd,
+ unsigned char* data, unsigned size,
+ netAddressBits sourceAddr);
+
+protected:
+ destRecord* fDests;
+private:
+ GroupEId fIncomingGroupEId;
+ DirectedNetInterfaceSet fMembers;
+};
+
+UsageEnvironment& operator<<(UsageEnvironment& s, const Groupsock& g);
+
+// A data structure for looking up a 'groupsock'
+// by (multicast address, port), or by socket number
+class GroupsockLookupTable {
+public:
+ Groupsock* Fetch(UsageEnvironment& env, netAddressBits groupAddress,
+ Port port, u_int8_t ttl, Boolean& isNew);
+ // Creates a new Groupsock if none already exists
+ Groupsock* Fetch(UsageEnvironment& env, netAddressBits groupAddress,
+ netAddressBits sourceFilterAddr,
+ Port port, Boolean& isNew);
+ // Creates a new Groupsock if none already exists
+ Groupsock* Lookup(netAddressBits groupAddress, Port port);
+ // Returns NULL if none already exists
+ Groupsock* Lookup(netAddressBits groupAddress,
+ netAddressBits sourceFilterAddr,
+ Port port);
+ // Returns NULL if none already exists
+ Groupsock* Lookup(UsageEnvironment& env, int sock);
+ // Returns NULL if none already exists
+ Boolean Remove(Groupsock const* groupsock);
+
+ // Used to iterate through the groupsocks in the table
+ class Iterator {
+ public:
+ Iterator(GroupsockLookupTable& groupsocks);
+
+ Groupsock* next(); // NULL iff none
+
+ private:
+ AddressPortLookupTable::Iterator fIter;
+ };
+
+private:
+ Groupsock* AddNew(UsageEnvironment& env,
+ netAddressBits groupAddress,
+ netAddressBits sourceFilterAddress,
+ Port port, u_int8_t ttl);
+
+private:
+ friend class Iterator;
+ AddressPortLookupTable fTable;
+};
+
+#endif
diff --git a/groupsock/include/GroupsockHelper.hh b/groupsock/include/GroupsockHelper.hh
new file mode 100644
index 0000000..503ab26
--- /dev/null
+++ b/groupsock/include/GroupsockHelper.hh
@@ -0,0 +1,147 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "mTunnel" multicast access service
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Helper routines to implement 'group sockets'
+// C++ header
+
+#ifndef _GROUPSOCK_HELPER_HH
+#define _GROUPSOCK_HELPER_HH
+
+#ifndef _NET_ADDRESS_HH
+#include "NetAddress.hh"
+#endif
+
+int setupDatagramSocket(UsageEnvironment& env, Port port);
+int setupStreamSocket(UsageEnvironment& env,
+ Port port, Boolean makeNonBlocking = True, Boolean setKeepAlive = False);
+
+int readSocket(UsageEnvironment& env,
+ int socket, unsigned char* buffer, unsigned bufferSize,
+ struct sockaddr_in& fromAddress);
+
+Boolean writeSocket(UsageEnvironment& env,
+ int socket, struct in_addr address, portNumBits portNum/*network byte order*/,
+ u_int8_t ttlArg,
+ unsigned char* buffer, unsigned bufferSize);
+
+Boolean writeSocket(UsageEnvironment& env,
+ int socket, struct in_addr address, portNumBits portNum/*network byte order*/,
+ unsigned char* buffer, unsigned bufferSize);
+ // An optimized version of "writeSocket" that omits the "setsockopt()" call to set the TTL.
+
+void ignoreSigPipeOnSocket(int socketNum);
+
+unsigned getSendBufferSize(UsageEnvironment& env, int socket);
+unsigned getReceiveBufferSize(UsageEnvironment& env, int socket);
+unsigned setSendBufferTo(UsageEnvironment& env,
+ int socket, unsigned requestedSize);
+unsigned setReceiveBufferTo(UsageEnvironment& env,
+ int socket, unsigned requestedSize);
+unsigned increaseSendBufferTo(UsageEnvironment& env,
+ int socket, unsigned requestedSize);
+unsigned increaseReceiveBufferTo(UsageEnvironment& env,
+ int socket, unsigned requestedSize);
+
+Boolean makeSocketNonBlocking(int sock);
+Boolean makeSocketBlocking(int sock, unsigned writeTimeoutInMilliseconds = 0);
+ // A "writeTimeoutInMilliseconds" value of 0 means: Don't timeout
+Boolean setSocketKeepAlive(int sock);
+
+Boolean socketJoinGroup(UsageEnvironment& env, int socket,
+ netAddressBits groupAddress);
+Boolean socketLeaveGroup(UsageEnvironment&, int socket,
+ netAddressBits groupAddress);
+
+// source-specific multicast join/leave
+Boolean socketJoinGroupSSM(UsageEnvironment& env, int socket,
+ netAddressBits groupAddress,
+ netAddressBits sourceFilterAddr);
+Boolean socketLeaveGroupSSM(UsageEnvironment&, int socket,
+ netAddressBits groupAddress,
+ netAddressBits sourceFilterAddr);
+
+Boolean getSourcePort(UsageEnvironment& env, int socket, Port& port);
+
+netAddressBits ourIPAddress(UsageEnvironment& env); // in network order
+
+// IP addresses of our sending and receiving interfaces. (By default, these
+// are INADDR_ANY (i.e., 0), specifying the default interface.)
+extern netAddressBits SendingInterfaceAddr;
+extern netAddressBits ReceivingInterfaceAddr;
+
+// Allocates a randomly-chosen IPv4 SSM (multicast) address:
+netAddressBits chooseRandomIPv4SSMAddress(UsageEnvironment& env);
+
+// Returns a simple "hh:mm:ss" string, for use in debugging output (e.g.)
+char const* timestampString();
+
+
+#ifdef HAVE_SOCKADDR_LEN
+#define SET_SOCKADDR_SIN_LEN(var) var.sin_len = sizeof var
+#else
+#define SET_SOCKADDR_SIN_LEN(var)
+#endif
+
+#define MAKE_SOCKADDR_IN(var,adr,prt) /*adr,prt must be in network order*/\
+ struct sockaddr_in var;\
+ var.sin_family = AF_INET;\
+ var.sin_addr.s_addr = (adr);\
+ var.sin_port = (prt);\
+ SET_SOCKADDR_SIN_LEN(var);
+
+
+// By default, we create sockets with the SO_REUSE_* flag set.
+// If, instead, you want to create sockets without the SO_REUSE_* flags,
+// Then enclose the creation code with:
+// {
+// NoReuse dummy;
+// ...
+// }
+class NoReuse {
+public:
+ NoReuse(UsageEnvironment& env);
+ ~NoReuse();
+
+private:
+ UsageEnvironment& fEnv;
+};
+
+
+// Define the "UsageEnvironment"-specific "groupsockPriv" structure:
+
+struct _groupsockPriv { // There should be only one of these allocated
+ HashTable* socketTable;
+ int reuseFlag;
+};
+_groupsockPriv* groupsockPriv(UsageEnvironment& env); // allocates it if necessary
+void reclaimGroupsockPriv(UsageEnvironment& env);
+
+
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(__MINGW32__)
+// For Windoze, we need to implement our own gettimeofday()
+extern int gettimeofday(struct timeval*, int*);
+#else
+#include <sys/time.h>
+#endif
+
+// The following are implemented in inet.c:
+extern "C" netAddressBits our_inet_addr(char const*);
+extern "C" void our_srandom(int x);
+extern "C" long our_random();
+extern "C" u_int32_t our_random32(); // because "our_random()" returns a 31-bit number
+
+#endif
diff --git a/groupsock/include/IOHandlers.hh b/groupsock/include/IOHandlers.hh
new file mode 100644
index 0000000..23375b0
--- /dev/null
+++ b/groupsock/include/IOHandlers.hh
@@ -0,0 +1,31 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "mTunnel" multicast access service
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// IO event handlers
+// C++ header
+
+#ifndef _IO_HANDLERS_HH
+#define _IO_HANDLERS_HH
+
+#ifndef _NET_INTERFACE_HH
+#include "NetInterface.hh"
+#endif
+
+// Handles incoming data on sockets:
+void socketReadHandler(Socket* sock, int mask);
+
+#endif
diff --git a/groupsock/include/NetAddress.hh b/groupsock/include/NetAddress.hh
new file mode 100644
index 0000000..909bba2
--- /dev/null
+++ b/groupsock/include/NetAddress.hh
@@ -0,0 +1,162 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "mTunnel" multicast access service
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Network Addresses
+// C++ header
+
+#ifndef _NET_ADDRESS_HH
+#define _NET_ADDRESS_HH
+
+#ifndef _HASH_TABLE_HH
+#include "HashTable.hh"
+#endif
+
+#ifndef _NET_COMMON_H
+#include "NetCommon.h"
+#endif
+
+#ifndef _USAGE_ENVIRONMENT_HH
+#include "UsageEnvironment.hh"
+#endif
+
+// Definition of a type representing a low-level network address.
+// At present, this is 32-bits, for IPv4. Later, generalize it,
+// to allow for IPv6.
+typedef u_int32_t netAddressBits;
+
+class NetAddress {
+public:
+ NetAddress(u_int8_t const* data,
+ unsigned length = 4 /* default: 32 bits */);
+ NetAddress(unsigned length = 4); // sets address data to all-zeros
+ NetAddress(NetAddress const& orig);
+ NetAddress& operator=(NetAddress const& rightSide);
+ virtual ~NetAddress();
+
+ unsigned length() const { return fLength; }
+ u_int8_t const* data() const // always in network byte order
+ { return fData; }
+
+private:
+ void assign(u_int8_t const* data, unsigned length);
+ void clean();
+
+ unsigned fLength;
+ u_int8_t* fData;
+};
+
+class NetAddressList {
+public:
+ NetAddressList(char const* hostname);
+ NetAddressList(NetAddressList const& orig);
+ NetAddressList& operator=(NetAddressList const& rightSide);
+ virtual ~NetAddressList();
+
+ unsigned numAddresses() const { return fNumAddresses; }
+
+ NetAddress const* firstAddress() const;
+
+ // Used to iterate through the addresses in a list:
+ class Iterator {
+ public:
+ Iterator(NetAddressList const& addressList);
+ NetAddress const* nextAddress(); // NULL iff none
+ private:
+ NetAddressList const& fAddressList;
+ unsigned fNextIndex;
+ };
+
+private:
+ void assign(netAddressBits numAddresses, NetAddress** addressArray);
+ void clean();
+
+ friend class Iterator;
+ unsigned fNumAddresses;
+ NetAddress** fAddressArray;
+};
+
+typedef u_int16_t portNumBits;
+
+class Port {
+public:
+ Port(portNumBits num /* in host byte order */);
+
+ portNumBits num() const { return fPortNum; } // in network byte order
+
+private:
+ portNumBits fPortNum; // stored in network byte order
+#ifdef IRIX
+ portNumBits filler; // hack to overcome a bug in IRIX C++ compiler
+#endif
+};
+
+UsageEnvironment& operator<<(UsageEnvironment& s, const Port& p);
+
+
+// A generic table for looking up objects by (address1, address2, port)
+class AddressPortLookupTable {
+public:
+ AddressPortLookupTable();
+ virtual ~AddressPortLookupTable();
+
+ void* Add(netAddressBits address1, netAddressBits address2, Port port, void* value);
+ // Returns the old value if different, otherwise 0
+ Boolean Remove(netAddressBits address1, netAddressBits address2, Port port);
+ void* Lookup(netAddressBits address1, netAddressBits address2, Port port);
+ // Returns 0 if not found
+ void* RemoveNext() { return fTable->RemoveNext(); }
+
+ // Used to iterate through the entries in the table
+ class Iterator {
+ public:
+ Iterator(AddressPortLookupTable& table);
+ virtual ~Iterator();
+
+ void* next(); // NULL iff none
+
+ private:
+ HashTable::Iterator* fIter;
+ };
+
+private:
+ friend class Iterator;
+ HashTable* fTable;
+};
+
+
+Boolean IsMulticastAddress(netAddressBits address);
+
+
+// A mechanism for displaying an IPv4 address in ASCII. This is intended to replace "inet_ntoa()", which is not thread-safe.
+class AddressString {
+public:
+ AddressString(struct sockaddr_in const& addr);
+ AddressString(struct in_addr const& addr);
+ AddressString(netAddressBits addr); // "addr" is assumed to be in host byte order here
+
+ virtual ~AddressString();
+
+ char const* val() const { return fVal; }
+
+private:
+ void init(netAddressBits addr); // used to implement each of the constructors
+
+private:
+ char* fVal; // The result ASCII string: allocated by the constructor; deleted by the destructor
+};
+
+#endif
diff --git a/groupsock/include/NetCommon.h b/groupsock/include/NetCommon.h
new file mode 100644
index 0000000..eab1abb
--- /dev/null
+++ b/groupsock/include/NetCommon.h
@@ -0,0 +1,126 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+/* "groupsock" interface
+ * Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+ * Common include files, typically used for networking
+ */
+
+#ifndef _NET_COMMON_H
+#define _NET_COMMON_H
+
+#if defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_WCE)
+/* Windows */
+#if defined(WINNT) || defined(_WINNT) || defined(__BORLANDC__) || defined(__MINGW32__) || defined(_WIN32_WCE) || defined (_MSC_VER)
+#define _MSWSOCK_
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#endif
+#include <windows.h>
+#include <errno.h>
+#include <string.h>
+
+#define closeSocket closesocket
+#ifdef EWOULDBLOCK
+#undef EWOULDBLOCK
+#endif
+#ifdef EINPROGRESS
+#undef EINPROGRESS
+#endif
+#ifdef EAGAIN
+#undef EAGAIN
+#endif
+#ifdef EINTR
+#undef EINTR
+#endif
+#define EWOULDBLOCK WSAEWOULDBLOCK
+#define EINPROGRESS WSAEWOULDBLOCK
+#define EAGAIN WSAEWOULDBLOCK
+#define EINTR WSAEINTR
+
+#if defined(_WIN32_WCE)
+#define NO_STRSTREAM 1
+#endif
+
+/* Definitions of size-specific types: */
+typedef __int64 int64_t;
+typedef unsigned __int64 u_int64_t;
+
+typedef int int32_t;
+typedef unsigned u_int32_t;
+
+typedef short int16_t;
+typedef unsigned short u_int16_t;
+
+typedef unsigned char u_int8_t;
+
+// For "uintptr_t" and "intptr_t", we assume that if they're not already defined, then this must be
+// an old, 32-bit version of Windows:
+#if !defined(_MSC_STDINT_H_) && !defined(_UINTPTR_T_DEFINED) && !defined(_UINTPTR_T_DECLARED) && !defined(_UINTPTR_T)
+typedef unsigned uintptr_t;
+#endif
+#if !defined(_MSC_STDINT_H_) && !defined(_INTPTR_T_DEFINED) && !defined(_INTPTR_T_DECLARED) && !defined(_INTPTR_T)
+typedef int intptr_t;
+#endif
+
+#elif defined(VXWORKS)
+/* VxWorks */
+#include <time.h>
+#include <timers.h>
+#include <sys/times.h>
+#include <sockLib.h>
+#include <hostLib.h>
+#include <resolvLib.h>
+#include <ioLib.h>
+
+typedef unsigned int u_int32_t;
+typedef unsigned short u_int16_t;
+typedef unsigned char u_int8_t;
+
+#else
+/* Unix */
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <strings.h>
+#include <ctype.h>
+#include <stdint.h>
+#if defined(_QNX4)
+#include <sys/select.h>
+#include <unix.h>
+#endif
+
+#define closeSocket close
+
+#ifdef SOLARIS
+#define u_int64_t uint64_t
+#define u_int32_t uint32_t
+#define u_int16_t uint16_t
+#define u_int8_t uint8_t
+#endif
+#endif
+
+#ifndef SOCKLEN_T
+#define SOCKLEN_T int
+#endif
+
+#endif
diff --git a/groupsock/include/NetInterface.hh b/groupsock/include/NetInterface.hh
new file mode 100644
index 0000000..3489b32
--- /dev/null
+++ b/groupsock/include/NetInterface.hh
@@ -0,0 +1,149 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "mTunnel" multicast access service
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Network Interfaces
+// C++ header
+
+#ifndef _NET_INTERFACE_HH
+#define _NET_INTERFACE_HH
+
+#ifndef _NET_ADDRESS_HH
+#include "NetAddress.hh"
+#endif
+
+class NetInterface {
+public:
+ virtual ~NetInterface();
+
+ static UsageEnvironment* DefaultUsageEnvironment;
+ // if non-NULL, used for each new interfaces
+
+protected:
+ NetInterface(); // virtual base class
+};
+
+class DirectedNetInterface: public NetInterface {
+public:
+ virtual ~DirectedNetInterface();
+
+ virtual Boolean write(unsigned char* data, unsigned numBytes) = 0;
+
+ virtual Boolean SourceAddrOKForRelaying(UsageEnvironment& env,
+ unsigned addr) = 0;
+
+protected:
+ DirectedNetInterface(); // virtual base class
+};
+
+class DirectedNetInterfaceSet {
+public:
+ DirectedNetInterfaceSet();
+ virtual ~DirectedNetInterfaceSet();
+
+ DirectedNetInterface* Add(DirectedNetInterface const* interf);
+ // Returns the old value if different, otherwise 0
+ Boolean Remove(DirectedNetInterface const* interf);
+
+ Boolean IsEmpty() { return fTable->IsEmpty(); }
+
+ // Used to iterate through the interfaces in the set
+ class Iterator {
+ public:
+ Iterator(DirectedNetInterfaceSet& interfaces);
+ virtual ~Iterator();
+
+ DirectedNetInterface* next(); // NULL iff none
+
+ private:
+ HashTable::Iterator* fIter;
+ };
+
+private:
+ friend class Iterator;
+ HashTable* fTable;
+};
+
+class Socket: public NetInterface {
+public:
+ virtual ~Socket();
+ void reset(); // closes the socket, and sets "fSocketNum" to -1
+
+ virtual Boolean handleRead(unsigned char* buffer, unsigned bufferMaxSize,
+ unsigned& bytesRead,
+ struct sockaddr_in& fromAddress) = 0;
+ // Returns False on error; resultData == NULL if data ignored
+
+ int socketNum() const { return fSocketNum; }
+
+ Port port() const {
+ return fPort;
+ }
+
+ UsageEnvironment& env() const { return fEnv; }
+
+ static int DebugLevel;
+
+protected:
+ Socket(UsageEnvironment& env, Port port); // virtual base class
+
+ Boolean changePort(Port newPort); // will also cause socketNum() to change
+
+private:
+ int fSocketNum;
+ UsageEnvironment& fEnv;
+ Port fPort;
+};
+
+UsageEnvironment& operator<<(UsageEnvironment& s, const Socket& sock);
+
+// A data structure for looking up a Socket by port:
+
+class SocketLookupTable {
+public:
+ virtual ~SocketLookupTable();
+
+ Socket* Fetch(UsageEnvironment& env, Port port, Boolean& isNew);
+ // Creates a new Socket if none already exists
+ Boolean Remove(Socket const* sock);
+
+protected:
+ SocketLookupTable(); // abstract base class
+ virtual Socket* CreateNew(UsageEnvironment& env, Port port) = 0;
+
+private:
+ HashTable* fTable;
+};
+
+// A data structure for counting traffic:
+
+class NetInterfaceTrafficStats {
+public:
+ NetInterfaceTrafficStats();
+
+ void countPacket(unsigned packetSize);
+
+ float totNumPackets() const {return fTotNumPackets;}
+ float totNumBytes() const {return fTotNumBytes;}
+
+ Boolean haveSeenTraffic() const;
+
+private:
+ float fTotNumPackets;
+ float fTotNumBytes;
+};
+
+#endif
diff --git a/groupsock/include/TunnelEncaps.hh b/groupsock/include/TunnelEncaps.hh
new file mode 100644
index 0000000..1b21d07
--- /dev/null
+++ b/groupsock/include/TunnelEncaps.hh
@@ -0,0 +1,101 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "mTunnel" multicast access service
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Encapsulation trailer for tunnels
+// C++ header
+
+#ifndef _TUNNEL_ENCAPS_HH
+#define _TUNNEL_ENCAPS_HH
+
+#ifndef _NET_ADDRESS_HH
+#include "NetAddress.hh"
+#endif
+
+typedef u_int16_t Cookie;
+
+class TunnelEncapsulationTrailer {
+ // The trailer is layed out as follows:
+ // bytes 0-1: source 'cookie'
+ // bytes 2-3: destination 'cookie'
+ // bytes 4-7: address
+ // bytes 8-9: port
+ // byte 10: ttl
+ // byte 11: command
+
+ // Optionally, there may also be a 4-byte 'auxilliary address'
+ // (e.g., for 'source-specific multicast' preceding this)
+ // bytes -4 through -1: auxilliary address
+
+ public:
+ Cookie& srcCookie()
+ { return *(Cookie*)byteOffset(0); }
+ Cookie& dstCookie()
+ { return *(Cookie*)byteOffset(2); }
+ u_int32_t& address()
+ { return *(u_int32_t*)byteOffset(4); }
+ Port& port()
+ { return *(Port*)byteOffset(8); }
+ u_int8_t& ttl()
+ { return *(u_int8_t*)byteOffset(10); }
+ u_int8_t& command()
+ { return *(u_int8_t*)byteOffset(11); }
+
+ u_int32_t& auxAddress()
+ { return *(u_int32_t*)byteOffset(-4); }
+
+ private:
+ inline char* byteOffset(int charIndex)
+ { return ((char*)this) + charIndex; }
+};
+
+const unsigned TunnelEncapsulationTrailerSize = 12; // bytes
+const unsigned TunnelEncapsulationTrailerAuxSize = 4; // bytes
+const unsigned TunnelEncapsulationTrailerMaxSize
+ = TunnelEncapsulationTrailerSize + TunnelEncapsulationTrailerAuxSize;
+
+// Command codes:
+// 0: unused
+const u_int8_t TunnelDataCmd = 1;
+const u_int8_t TunnelJoinGroupCmd = 2;
+const u_int8_t TunnelLeaveGroupCmd = 3;
+const u_int8_t TunnelTearDownCmd = 4;
+const u_int8_t TunnelProbeCmd = 5;
+const u_int8_t TunnelProbeAckCmd = 6;
+const u_int8_t TunnelProbeNackCmd = 7;
+const u_int8_t TunnelJoinRTPGroupCmd = 8;
+const u_int8_t TunnelLeaveRTPGroupCmd = 9;
+// 0x0A through 0x10: currently unused.
+const u_int8_t TunnelExtensionFlag = 0x80; // a flag, not a cmd code
+const u_int8_t TunnelDataAuxCmd
+ = (TunnelExtensionFlag|TunnelDataCmd);
+const u_int8_t TunnelJoinGroupAuxCmd
+ = (TunnelExtensionFlag|TunnelJoinGroupCmd);
+const u_int8_t TunnelLeaveGroupAuxCmd
+ = (TunnelExtensionFlag|TunnelLeaveGroupCmd);
+// Note: the TearDown, Probe, ProbeAck, ProbeNack cmds have no Aux version
+// 0x84 through 0x87: currently unused.
+const u_int8_t TunnelJoinRTPGroupAuxCmd
+ = (TunnelExtensionFlag|TunnelJoinRTPGroupCmd);
+const u_int8_t TunnelLeaveRTPGroupAuxCmd
+ = (TunnelExtensionFlag|TunnelLeaveRTPGroupCmd);
+// 0x8A through 0xFF: currently unused
+
+inline Boolean TunnelIsAuxCmd(u_int8_t cmd) {
+ return (cmd&TunnelExtensionFlag) != 0;
+}
+
+#endif
diff --git a/groupsock/include/groupsock_version.hh b/groupsock/include/groupsock_version.hh
new file mode 100644
index 0000000..a80445e
--- /dev/null
+++ b/groupsock/include/groupsock_version.hh
@@ -0,0 +1,10 @@
+// Version information for the "groupsock" library
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+
+#ifndef _GROUPSOCK_VERSION_HH
+#define _GROUPSOCK_VERSION_HH
+
+#define GROUPSOCK_LIBRARY_VERSION_STRING "2020.03.06"
+#define GROUPSOCK_LIBRARY_VERSION_INT 1583452800
+
+#endif
diff --git a/groupsock/inet.c b/groupsock/inet.c
new file mode 100644
index 0000000..2050347
--- /dev/null
+++ b/groupsock/inet.c
@@ -0,0 +1,451 @@
+#ifndef _NET_COMMON_H
+#include "NetCommon.h"
+#endif
+
+#include <stdio.h>
+
+#ifdef VXWORKS
+#include <inetLib.h>
+#endif
+
+/* Some systems (e.g., SunOS) have header files that erroneously declare inet_addr() as taking no arguments.
+ * This confuses C++. To overcome this, we use our own routine, implemented in C.
+ */
+
+unsigned our_inet_addr(cp)
+ char const* cp;
+{
+ return inet_addr(cp);
+}
+
+#if defined(__WIN32__) || defined(_WIN32)
+#ifndef IMN_PIM
+#define WS_VERSION_CHOICE1 0x202/*MAKEWORD(2,2)*/
+#define WS_VERSION_CHOICE2 0x101/*MAKEWORD(1,1)*/
+int initializeWinsockIfNecessary(void) {
+ /* We need to call an initialization routine before
+ * we can do anything with winsock. (How fucking lame!):
+ */
+ static int _haveInitializedWinsock = 0;
+ WSADATA wsadata;
+
+ if (!_haveInitializedWinsock) {
+ if ((WSAStartup(WS_VERSION_CHOICE1, &wsadata) != 0)
+ && ((WSAStartup(WS_VERSION_CHOICE2, &wsadata)) != 0)) {
+ return 0; /* error in initialization */
+ }
+ if ((wsadata.wVersion != WS_VERSION_CHOICE1)
+ && (wsadata.wVersion != WS_VERSION_CHOICE2)) {
+ WSACleanup();
+ return 0; /* desired Winsock version was not available */
+ }
+ _haveInitializedWinsock = 1;
+ }
+
+ return 1;
+}
+#else
+int initializeWinsockIfNecessary(void) { return 1; }
+#endif
+#else
+#define initializeWinsockIfNecessary() 1
+#endif
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#ifdef USE_SYSTEM_RANDOM
+/* Use the system-supplied "random()" and "srandom()" functions */
+#include <stdlib.h>
+long our_random() {
+#if defined(__WIN32__) || defined(_WIN32)
+ return rand();
+#else
+ return random();
+#endif
+}
+void our_srandom(unsigned int x) {
+#if defined(__WIN32__) || defined(_WIN32)
+ srand(x);
+#else
+ srandom(x);
+#endif
+}
+
+#else
+
+/* Use our own implementation of the "random()" and "srandom()" functions */
+/*
+ * random.c:
+ *
+ * An improved random number generation package. In addition to the standard
+ * rand()/srand() like interface, this package also has a special state info
+ * interface. The our_initstate() routine is called with a seed, an array of
+ * bytes, and a count of how many bytes are being passed in; this array is
+ * then initialized to contain information for random number generation with
+ * that much state information. Good sizes for the amount of state
+ * information are 32, 64, 128, and 256 bytes. The state can be switched by
+ * calling the our_setstate() routine with the same array as was initiallized
+ * with our_initstate(). By default, the package runs with 128 bytes of state
+ * information and generates far better random numbers than a linear
+ * congruential generator. If the amount of state information is less than
+ * 32 bytes, a simple linear congruential R.N.G. is used.
+ *
+ * Internally, the state information is treated as an array of longs; the
+ * zeroeth element of the array is the type of R.N.G. being used (small
+ * integer); the remainder of the array is the state information for the
+ * R.N.G. Thus, 32 bytes of state information will give 7 longs worth of
+ * state information, which will allow a degree seven polynomial. (Note:
+ * the zeroeth word of state information also has some other information
+ * stored in it -- see our_setstate() for details).
+ *
+ * The random number generation technique is a linear feedback shift register
+ * approach, employing trinomials (since there are fewer terms to sum up that
+ * way). In this approach, the least significant bit of all the numbers in
+ * the state table will act as a linear feedback shift register, and will
+ * have period 2^deg - 1 (where deg is the degree of the polynomial being
+ * used, assuming that the polynomial is irreducible and primitive). The
+ * higher order bits will have longer periods, since their values are also
+ * influenced by pseudo-random carries out of the lower bits. The total
+ * period of the generator is approximately deg*(2**deg - 1); thus doubling
+ * the amount of state information has a vast influence on the period of the
+ * generator. Note: the deg*(2**deg - 1) is an approximation only good for
+ * large deg, when the period of the shift register is the dominant factor.
+ * With deg equal to seven, the period is actually much longer than the
+ * 7*(2**7 - 1) predicted by this formula.
+ */
+
+/*
+ * For each of the currently supported random number generators, we have a
+ * break value on the amount of state information (you need at least this
+ * many bytes of state info to support this random number generator), a degree
+ * for the polynomial (actually a trinomial) that the R.N.G. is based on, and
+ * the separation between the two lower order coefficients of the trinomial.
+ */
+#define TYPE_0 0 /* linear congruential */
+#define BREAK_0 8
+#define DEG_0 0
+#define SEP_0 0
+
+#define TYPE_1 1 /* x**7 + x**3 + 1 */
+#define BREAK_1 32
+#define DEG_1 7
+#define SEP_1 3
+
+#define TYPE_2 2 /* x**15 + x + 1 */
+#define BREAK_2 64
+#define DEG_2 15
+#define SEP_2 1
+
+#define TYPE_3 3 /* x**31 + x**3 + 1 */
+#define BREAK_3 128
+#define DEG_3 31
+#define SEP_3 3
+
+#define TYPE_4 4 /* x**63 + x + 1 */
+#define BREAK_4 256
+#define DEG_4 63
+#define SEP_4 1
+
+/*
+ * Array versions of the above information to make code run faster --
+ * relies on fact that TYPE_i == i.
+ */
+#define MAX_TYPES 5 /* max number of types above */
+
+static int const degrees[MAX_TYPES] = { DEG_0, DEG_1, DEG_2, DEG_3, DEG_4 };
+static int const seps [MAX_TYPES] = { SEP_0, SEP_1, SEP_2, SEP_3, SEP_4 };
+
+/*
+ * Initially, everything is set up as if from:
+ *
+ * our_initstate(1, &randtbl, 128);
+ *
+ * Note that this initialization takes advantage of the fact that srandom()
+ * advances the front and rear pointers 10*rand_deg times, and hence the
+ * rear pointer which starts at 0 will also end up at zero; thus the zeroeth
+ * element of the state information, which contains info about the current
+ * position of the rear pointer is just
+ *
+ * MAX_TYPES * (rptr - state) + TYPE_3 == TYPE_3.
+ */
+
+static long randtbl[DEG_3 + 1] = {
+ TYPE_3,
+ 0x9a319039, 0x32d9c024, 0x9b663182, 0x5da1f342, 0xde3b81e0, 0xdf0a6fb5,
+ 0xf103bc02, 0x48f340fb, 0x7449e56b, 0xbeb1dbb0, 0xab5c5918, 0x946554fd,
+ 0x8c2e680f, 0xeb3d799f, 0xb11ee0b7, 0x2d436b86, 0xda672e2a, 0x1588ca88,
+ 0xe369735d, 0x904f35f7, 0xd7158fd6, 0x6fa6f051, 0x616e6b96, 0xac94efdc,
+ 0x36413f93, 0xc622c298, 0xf5a42ab8, 0x8a88d77b, 0xf5ad9d0e, 0x8999220b,
+ 0x27fb47b9,
+};
+
+/*
+ * fptr and rptr are two pointers into the state info, a front and a rear
+ * pointer. These two pointers are always rand_sep places aparts, as they
+ * cycle cyclically through the state information. (Yes, this does mean we
+ * could get away with just one pointer, but the code for random() is more
+ * efficient this way). The pointers are left positioned as they would be
+ * from the call
+ *
+ * our_initstate(1, randtbl, 128);
+ *
+ * (The position of the rear pointer, rptr, is really 0 (as explained above
+ * in the initialization of randtbl) because the state table pointer is set
+ * to point to randtbl[1] (as explained below).
+ */
+static long* fptr = &randtbl[SEP_3 + 1];
+static long* rptr = &randtbl[1];
+
+/*
+ * The following things are the pointer to the state information table, the
+ * type of the current generator, the degree of the current polynomial being
+ * used, and the separation between the two pointers. Note that for efficiency
+ * of random(), we remember the first location of the state information, not
+ * the zeroeth. Hence it is valid to access state[-1], which is used to
+ * store the type of the R.N.G. Also, we remember the last location, since
+ * this is more efficient than indexing every time to find the address of
+ * the last element to see if the front and rear pointers have wrapped.
+ */
+static long *state = &randtbl[1];
+static int rand_type = TYPE_3;
+static int rand_deg = DEG_3;
+static int rand_sep = SEP_3;
+static long* end_ptr = &randtbl[DEG_3 + 1];
+
+/*
+ * srandom:
+ *
+ * Initialize the random number generator based on the given seed. If the
+ * type is the trivial no-state-information type, just remember the seed.
+ * Otherwise, initializes state[] based on the given "seed" via a linear
+ * congruential generator. Then, the pointers are set to known locations
+ * that are exactly rand_sep places apart. Lastly, it cycles the state
+ * information a given number of times to get rid of any initial dependencies
+ * introduced by the L.C.R.N.G. Note that the initialization of randtbl[]
+ * for default usage relies on values produced by this routine.
+ */
+long our_random(void); /*forward*/
+void
+our_srandom(unsigned int x)
+{
+ register int i;
+
+ if (rand_type == TYPE_0)
+ state[0] = x;
+ else {
+ state[0] = x;
+ for (i = 1; i < rand_deg; i++)
+ state[i] = 1103515245 * state[i - 1] + 12345;
+ fptr = &state[rand_sep];
+ rptr = &state[0];
+ for (i = 0; i < 10 * rand_deg; i++)
+ (void)our_random();
+ }
+}
+
+/*
+ * our_initstate:
+ *
+ * Initialize the state information in the given array of n bytes for future
+ * random number generation. Based on the number of bytes we are given, and
+ * the break values for the different R.N.G.'s, we choose the best (largest)
+ * one we can and set things up for it. srandom() is then called to
+ * initialize the state information.
+ *
+ * Note that on return from srandom(), we set state[-1] to be the type
+ * multiplexed with the current value of the rear pointer; this is so
+ * successive calls to our_initstate() won't lose this information and will be
+ * able to restart with our_setstate().
+ *
+ * Note: the first thing we do is save the current state, if any, just like
+ * our_setstate() so that it doesn't matter when our_initstate is called.
+ *
+ * Returns a pointer to the old state.
+ */
+char *
+our_initstate(seed, arg_state, n)
+ unsigned int seed; /* seed for R.N.G. */
+ char *arg_state; /* pointer to state array */
+ int n; /* # bytes of state info */
+{
+ register char *ostate = (char *)(&state[-1]);
+
+ if (rand_type == TYPE_0)
+ state[-1] = rand_type;
+ else
+ state[-1] = MAX_TYPES * (rptr - state) + rand_type;
+ if (n < BREAK_0) {
+#ifdef DEBUG
+ (void)fprintf(stderr,
+ "random: not enough state (%d bytes); ignored.\n", n);
+#endif
+ return(0);
+ }
+ if (n < BREAK_1) {
+ rand_type = TYPE_0;
+ rand_deg = DEG_0;
+ rand_sep = SEP_0;
+ } else if (n < BREAK_2) {
+ rand_type = TYPE_1;
+ rand_deg = DEG_1;
+ rand_sep = SEP_1;
+ } else if (n < BREAK_3) {
+ rand_type = TYPE_2;
+ rand_deg = DEG_2;
+ rand_sep = SEP_2;
+ } else if (n < BREAK_4) {
+ rand_type = TYPE_3;
+ rand_deg = DEG_3;
+ rand_sep = SEP_3;
+ } else {
+ rand_type = TYPE_4;
+ rand_deg = DEG_4;
+ rand_sep = SEP_4;
+ }
+ state = &(((long *)arg_state)[1]); /* first location */
+ end_ptr = &state[rand_deg]; /* must set end_ptr before srandom */
+ our_srandom(seed);
+ if (rand_type == TYPE_0)
+ state[-1] = rand_type;
+ else
+ state[-1] = MAX_TYPES*(rptr - state) + rand_type;
+ return(ostate);
+}
+
+/*
+ * our_setstate:
+ *
+ * Restore the state from the given state array.
+ *
+ * Note: it is important that we also remember the locations of the pointers
+ * in the current state information, and restore the locations of the pointers
+ * from the old state information. This is done by multiplexing the pointer
+ * location into the zeroeth word of the state information.
+ *
+ * Note that due to the order in which things are done, it is OK to call
+ * our_setstate() with the same state as the current state.
+ *
+ * Returns a pointer to the old state information.
+ */
+char *
+our_setstate(arg_state)
+ char *arg_state;
+{
+ register long *new_state = (long *)arg_state;
+ register int type = new_state[0] % MAX_TYPES;
+ register int rear = new_state[0] / MAX_TYPES;
+ char *ostate = (char *)(&state[-1]);
+
+ if (rand_type == TYPE_0)
+ state[-1] = rand_type;
+ else
+ state[-1] = MAX_TYPES * (rptr - state) + rand_type;
+ switch(type) {
+ case TYPE_0:
+ case TYPE_1:
+ case TYPE_2:
+ case TYPE_3:
+ case TYPE_4:
+ rand_type = type;
+ rand_deg = degrees[type];
+ rand_sep = seps[type];
+ break;
+ default:
+#ifdef DEBUG
+ (void)fprintf(stderr,
+ "random: state info corrupted; not changed.\n");
+#endif
+ break;
+ }
+ state = &new_state[1];
+ if (rand_type != TYPE_0) {
+ rptr = &state[rear];
+ fptr = &state[(rear + rand_sep) % rand_deg];
+ }
+ end_ptr = &state[rand_deg]; /* set end_ptr too */
+ return(ostate);
+}
+
+/*
+ * random:
+ *
+ * If we are using the trivial TYPE_0 R.N.G., just do the old linear
+ * congruential bit. Otherwise, we do our fancy trinomial stuff, which is
+ * the same in all the other cases due to all the global variables that have
+ * been set up. The basic operation is to add the number at the rear pointer
+ * into the one at the front pointer. Then both pointers are advanced to
+ * the next location cyclically in the table. The value returned is the sum
+ * generated, reduced to 31 bits by throwing away the "least random" low bit.
+ *
+ * Note: the code takes advantage of the fact that both the front and
+ * rear pointers can't wrap on the same call by not testing the rear
+ * pointer if the front one has wrapped.
+ *
+ * Returns a 31-bit random number.
+ */
+long our_random() {
+ long i;
+
+ if (rand_type == TYPE_0) {
+ i = state[0] = (state[0] * 1103515245 + 12345) & 0x7fffffff;
+ } else {
+ /* Make copies of "rptr" and "fptr" before working with them, in case we're being called concurrently by multiple threads: */
+ long* rp = rptr;
+ long* fp = fptr;
+
+ /* Make sure "rp" and "fp" are separated by the correct distance (again, allowing for concurrent access): */
+ if (!(fp == rp+SEP_3 || fp+DEG_3 == rp+SEP_3)) {
+ /* A rare case that should occur only if we're being called concurrently by multiple threads. */
+ /* Restore the proper separation between the pointers: */
+ if (rp <= fp) rp = fp-SEP_3; else rp = fp+DEG_3-SEP_3;
+ }
+
+ *fp += *rp;
+ i = (*fp >> 1) & 0x7fffffff; /* chucking least random bit */
+ if (++fp >= end_ptr) {
+ fp = state;
+ ++rp;
+ } else if (++rp >= end_ptr) {
+ rp = state;
+ }
+
+ /* Restore "rptr" and "fptr" from our working copies: */
+ rptr = rp;
+ fptr = fp;
+ }
+
+ return i;
+}
+#endif
+
+u_int32_t our_random32() {
+ /* Return a 32-bit random number.
+ Because "our_random()" returns a 31-bit random number, we call it a second
+ time, to generate the high bit.
+ (Actually, to increase the likelihood of randomness, we take the middle 16 bits of two successive calls to "our_random()")
+ */
+ long random_1 = our_random();
+ u_int32_t random16_1 = (u_int32_t)(random_1&0x00FFFF00);
+
+ long random_2 = our_random();
+ u_int32_t random16_2 = (u_int32_t)(random_2&0x00FFFF00);
+
+ return (random16_1<<8) | (random16_2>>8);
+}
+
+#ifdef USE_OUR_BZERO
+#ifndef __bzero
+void
+__bzero (to, count)
+ char *to;
+ int count;
+{
+ while (count-- > 0)
+ {
+ *to++ = 0;
+ }
+}
+#endif
+#endif
diff --git a/hlsProxy/COPYING b/hlsProxy/COPYING
new file mode 120000
index 0000000..012065c
--- /dev/null
+++ b/hlsProxy/COPYING
@@ -0,0 +1 @@
+../COPYING
\ No newline at end of file
diff --git a/hlsProxy/COPYING.LESSER b/hlsProxy/COPYING.LESSER
new file mode 120000
index 0000000..ce9a3ce
--- /dev/null
+++ b/hlsProxy/COPYING.LESSER
@@ -0,0 +1 @@
+../COPYING.LESSER
\ No newline at end of file
diff --git a/hlsProxy/Makefile.head b/hlsProxy/Makefile.head
new file mode 100644
index 0000000..e81e1ba
--- /dev/null
+++ b/hlsProxy/Makefile.head
@@ -0,0 +1,7 @@
+INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include -I../BasicUsageEnvironment/include
+# Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later.
+libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX)
+libBasicUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX)
+libUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX)
+libgroupsock_LIB_SUFFIX = $(LIB_SUFFIX)
+##### Change the following for your environment:
diff --git a/hlsProxy/Makefile.tail b/hlsProxy/Makefile.tail
new file mode 100644
index 0000000..51f4701
--- /dev/null
+++ b/hlsProxy/Makefile.tail
@@ -0,0 +1,38 @@
+##### End of variables to change
+
+HLS_PROXY = live555HLSProxy$(EXE)
+
+PREFIX = /usr/local
+ALL = $(HLS_PROXY)
+all: $(ALL)
+
+.$(C).$(OBJ):
+ $(C_COMPILER) -c $(C_FLAGS) $<
+.$(CPP).$(OBJ):
+ $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $<
+
+HLS_PROXY_OBJS = live555HLSProxy.$(OBJ)
+
+USAGE_ENVIRONMENT_DIR = ../UsageEnvironment
+USAGE_ENVIRONMENT_LIB = $(USAGE_ENVIRONMENT_DIR)/libUsageEnvironment.$(libUsageEnvironment_LIB_SUFFIX)
+BASIC_USAGE_ENVIRONMENT_DIR = ../BasicUsageEnvironment
+BASIC_USAGE_ENVIRONMENT_LIB = $(BASIC_USAGE_ENVIRONMENT_DIR)/libBasicUsageEnvironment.$(libBasicUsageEnvironment_LIB_SUFFIX)
+LIVEMEDIA_DIR = ../liveMedia
+LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(libliveMedia_LIB_SUFFIX)
+GROUPSOCK_DIR = ../groupsock
+GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(libgroupsock_LIB_SUFFIX)
+LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \
+ $(BASIC_USAGE_ENVIRONMENT_LIB) $(USAGE_ENVIRONMENT_LIB)
+LIBS = $(LOCAL_LIBS) $(LIBS_FOR_CONSOLE_APPLICATION)
+
+live555HLSProxy$(EXE): $(HLS_PROXY_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(HLS_PROXY_OBJS) $(LIBS)
+
+clean:
+ -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~
+
+install: $(HLS_PROXY)
+ install -d $(DESTDIR)$(PREFIX)/bin
+ install -m 755 $(HLS_PROXY) $(DESTDIR)$(PREFIX)/bin
+
+##### Any additional, platform-specific rules come here:
diff --git a/hlsProxy/live555HLSProxy.cpp b/hlsProxy/live555HLSProxy.cpp
new file mode 100644
index 0000000..cc23409
--- /dev/null
+++ b/hlsProxy/live555HLSProxy.cpp
@@ -0,0 +1,439 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A program that acts as a proxy for a RTSP stream, converting it into a sequence of
+// HLS (HTTP Live Streaming) segments, plus a ".m3u8" file that can be accessed via a web browser.
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+
+#define RTSP_CLIENT_VERBOSITY_LEVEL 0 // set to 1 for more verbose output from the "RTSPClient"
+#define OUR_HLS_SEGMENTATION_DURATION 6 /*seconds*/
+#define OUR_HLS_REWIND_DURATION 60 /*seconds: How far back in time a browser can seek*/
+
+UsageEnvironment* env;
+char const* programName;
+char* username = NULL;
+char* password = NULL;
+Authenticator* ourAuthenticator = NULL;
+Boolean streamUsingTCP = False;
+portNumBits tunnelOverHTTPPortNum = 0;
+char const* hlsPrefix;
+MediaSession* session;
+MediaSubsession* subsession;
+double duration = 0.0;
+Boolean createHandlerServerForREGISTERCommand = False;
+portNumBits handlerServerForREGISTERCommandPortNum = 0;
+HandlerServerForREGISTERCommand* handlerServerForREGISTERCommand;
+char* usernameForREGISTER = NULL;
+char* passwordForREGISTER = NULL;
+UserAuthenticationDatabase* authDBForREGISTER = NULL;
+
+void usage() {
+ *env << "usage:\t" << programName << " [-u <username> <password>] [-t|-T <http-port>] <input-RTSP-url> <HLS-prefix>\n";
+ *env << " or:\t" << programName << " -R [<port-num>] [-U <username-for-REGISTER> <password-for-REGISTER>] <HLS-prefix>\n";
+ exit(1);
+}
+
+// Forward function definitions:
+void continueAfterClientCreation0(RTSPClient* rtspClient, Boolean requestStreamingOverTCP);
+void continueAfterClientCreation1(RTSPClient* rtspClient);
+void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString);
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Parse the command line:
+ programName = argv[0];
+ while (argc > 1) {
+ char* const opt = argv[1];
+ if (opt[0] != '-') {
+ if (argc <= 3) break; // only the URL + prefix is left
+ usage();
+ }
+
+ switch (opt[1]) {
+ case 'u': { // specify a username and password
+ if (argc < 4) usage(); // there's no argv[3] (for the "password")
+ username = argv[2];
+ password = argv[3];
+ argv+=2; argc-=2;
+ ourAuthenticator = new Authenticator(username, password);
+ break;
+ }
+
+ case 't': { // stream RTP and RTCP over the TCP 'control' connection
+ streamUsingTCP = True;
+ break;
+ }
+
+ case 'T': {
+ if (argc > 3 && argv[2][0] != '-') {
+ // The next argument is the HTTP server port number:
+ if (sscanf(argv[2], "%hu", &tunnelOverHTTPPortNum) == 1
+ && tunnelOverHTTPPortNum > 0) {
+ ++argv; --argc;
+ break;
+ }
+ }
+
+ // If we get here, the option was specified incorrectly:
+ usage();
+ break;
+ }
+
+ case 'R': {
+ // set up a handler server for incoming "REGISTER" commands
+ createHandlerServerForREGISTERCommand = True;
+ if (argc > 2 && argv[2][0] != '-') {
+ // The next argument is the REGISTER handler server port number:
+ if (sscanf(argv[2], "%hu", &handlerServerForREGISTERCommandPortNum) == 1 && handlerServerForREGISTERCommandPortNum > 0) {
+ ++argv; --argc;
+ break;
+ }
+ }
+ break;
+ }
+
+ case 'U': { // specify a username and password to be used to authentication an incoming "REGISTER" command (for use with -R)
+ if (argc < 4) usage(); // there's no argv[3] (for the "password")
+ usernameForREGISTER = argv[2];
+ passwordForREGISTER = argv[3];
+ argv+=2; argc-=2;
+
+ if (authDBForREGISTER == NULL) authDBForREGISTER = new UserAuthenticationDatabase;
+ authDBForREGISTER->addUserRecord(usernameForREGISTER, passwordForREGISTER);
+ break;
+ }
+
+ default: {
+ *env << "Invalid option: " << opt << "\n";
+ usage();
+ break;
+ }
+ }
+
+ ++argv; --argc;
+ }
+
+ // Create (or arrange to create) our RTSP client object:
+ if (createHandlerServerForREGISTERCommand) {
+ if (argc != 2) usage();
+ hlsPrefix = argv[1];
+
+ handlerServerForREGISTERCommand
+ = HandlerServerForREGISTERCommand::createNew(*env, continueAfterClientCreation0,
+ handlerServerForREGISTERCommandPortNum, authDBForREGISTER,
+ RTSP_CLIENT_VERBOSITY_LEVEL, programName);
+ if (handlerServerForREGISTERCommand == NULL) {
+ *env << "Failed to create a server for handling incoming \"REGISTER\" commands: " << env->getResultMsg() << "\n";
+ exit(1);
+ } else {
+ *env << "Awaiting an incoming \"REGISTER\" command on port " << handlerServerForREGISTERCommand->serverPortNum() << "\n";
+ }
+ } else { // Normal case
+ if (argc != 3) usage();
+ if (usernameForREGISTER != NULL) {
+ *env << "The '-U <username-for-REGISTER> <password-for-REGISTER>' option can be used only with -R\n";
+ usage();
+ }
+ char const* rtspURL = argv[1];
+ hlsPrefix = argv[2];
+
+ RTSPClient* rtspClient
+ = RTSPClient::createNew(*env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, programName, tunnelOverHTTPPortNum);
+ if (rtspClient == NULL) {
+ *env << "Failed to create a RTSP client for URL \"" << rtspURL << "\": " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+
+ continueAfterClientCreation1(rtspClient);
+ }
+
+ // All further processing will be done from within the event loop:
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void continueAfterClientCreation0(RTSPClient* newRTSPClient, Boolean requestStreamingOverTCP) {
+ if (newRTSPClient == NULL) return;
+
+ streamUsingTCP = requestStreamingOverTCP;
+
+ // Having handled one "REGISTER" command (giving us a "rtsp://" URL to stream from), we don't handle any more:
+ Medium::close(handlerServerForREGISTERCommand); handlerServerForREGISTERCommand = NULL;
+
+ continueAfterClientCreation1(newRTSPClient);
+}
+
+void continueAfterClientCreation1(RTSPClient* rtspClient) {
+ // Having created a "RTSPClient" object, send a RTSP "DESCRIBE" command for the URL:
+ rtspClient->sendDescribeCommand(continueAfterDESCRIBE, ourAuthenticator);
+}
+
+// A function that outputs a string that identifies each stream (for debugging output).
+UsageEnvironment& operator<<(UsageEnvironment& env, const RTSPClient& rtspClient) {
+ return env << "[URL:\"" << rtspClient.url() << "\"]: ";
+}
+
+// A function that outputs a string that identifies each subsession (for debugging output).
+UsageEnvironment& operator<<(UsageEnvironment& env, const MediaSubsession& subsession) {
+ return env << subsession.mediumName() << "/" << subsession.codecName();
+}
+
+void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString); // forward
+
+void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ do {
+ if (resultCode != 0) {
+ *env << *rtspClient << "Failed to get a SDP description: " << resultString << "\n";
+ delete[] resultString;
+ break;
+ }
+
+ // Create a media session object from the SDP description.
+ // Then iterate over it, to look for subsession(s) that we can handle:
+ session = MediaSession::createNew(*env, resultString);
+ delete[] resultString;
+ if (session == NULL) {
+ *env << *rtspClient << "Failed to create a MediaSession object from the SDP description: " << env->getResultMsg() << "\n";
+ break;
+ } else if (!session->hasSubsessions()) {
+ *env << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)\n";
+ break;
+ }
+
+ MediaSubsessionIterator* iter = new MediaSubsessionIterator(*session);
+ while ((subsession = iter->next()) != NULL) {
+ if (strcmp(subsession->mediumName(), "video") == 0 &&
+ strcmp(subsession->codecName(), "H264") == 0) break; // use this subsession
+ }
+ delete iter;
+
+ if (subsession == NULL) {
+ *env << *rtspClient << "This stream has no usable subsessions\n";
+ break;
+ }
+
+ if (!subsession->initiate()) {
+ *env << *rtspClient << "Failed to initiate the \"" << *subsession << "\" subsession: " << env->getResultMsg() << "\n";
+ break;
+ } else {
+ *env << *rtspClient << "Initiated the \"" << *subsession << "\" subsession\n";
+ }
+
+ // Continue setting up this subsession, by sending a RTSP "SETUP" command:
+ rtspClient->sendSetupCommand(*subsession, continueAfterSETUP, False, streamUsingTCP,
+ False, ourAuthenticator);
+ return;
+ } while (0);
+
+ // An error occurred:
+ exit(1);
+}
+
+void segmentationCallback(void* clientData, char const* segmentFileName, double segmentDuration); // forward
+void afterPlaying(void* clientData); // forward
+void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString); // forward
+
+void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ do {
+ if (resultCode != 0) {
+ *env << *rtspClient << "Failed to set up the \"" << *subsession << "\" subsession: " << resultString << "\n";
+ break;
+ }
+ delete[] resultString;
+
+ *env << *rtspClient << "Set up the \"" << *subsession << "\" subsession\n";
+
+ // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it.
+ // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later,
+ // after we've sent a RTSP "PLAY" command.)
+
+ subsession->sink
+ = HLSSegmenter::createNew(*env, OUR_HLS_SEGMENTATION_DURATION, hlsPrefix, segmentationCallback);
+
+ // Create a 'framer' filter for the input source, to put the stream of NAL units into a
+ // form that's usable in output Transport Streams.
+ // (Note that we use a *DiscreteFramer*, because the input source is a stream of discrete
+ // NAL units - i.e., one at a time.)
+ H264VideoStreamDiscreteFramer* framer
+ = H264VideoStreamDiscreteFramer::createNew(*env, subsession->readSource(),
+ True/*includeStartCodeInOutput*/,
+ True/*insertAccessUnitDelimiters*/);
+
+ // Then create a filter that packs the H.264 video data into a Transport Stream:
+ MPEG2TransportStreamFromESSource* tsFrames = MPEG2TransportStreamFromESSource::createNew(*env);
+ tsFrames->addNewVideoSource(framer, 5/*mpegVersion: H.264*/);
+
+ // Start playing the sink object:
+ *env << "Beginning to read...\n";
+ subsession->sink->startPlaying(*tsFrames, afterPlaying, NULL);
+
+ // Also set up BYE handler//#####@@@@@
+
+ // Finally, send a RTSP "PLAY" command to tell the server to start streaming:
+ if (session->absStartTime() != NULL) {
+ // Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command:
+ rtspClient->sendPlayCommand(*session, continueAfterPLAY, session->absStartTime(), session->absEndTime(), 1.0f, ourAuthenticator);
+ } else {
+ duration = session->playEndTime() - session->playStartTime();
+ rtspClient->sendPlayCommand(*session, continueAfterPLAY, 0.0f, -1.0f, 1.0f, ourAuthenticator);
+ }
+
+ return;
+ } while (0);
+
+ // An error occurred:
+ exit(1);
+}
+
+// A record that defines a segment that has been written. These records are kept in a list:
+class SegmentRecord {
+public:
+ SegmentRecord(char const* segmentFileName, double segmentDuration)
+ : fNext(NULL), fSegmentFileName(strDup(segmentFileName)), fSegmentDuration(segmentDuration) {
+ }
+ virtual ~SegmentRecord() {
+ delete[] fSegmentFileName;
+ delete fNext;
+ }
+
+ SegmentRecord*& next() { return fNext; }
+ char const* fileName() const { return fSegmentFileName; }
+ double duration() const { return fSegmentDuration; }
+
+private:
+ SegmentRecord* fNext;
+ char* fSegmentFileName;
+ double fSegmentDuration;
+};
+
+SegmentRecord* head = NULL;
+SegmentRecord* tail = NULL;
+double totalDuration = 0.0;
+char* ourM3U8FileName = NULL;
+
+void segmentationCallback(void* /*clientData*/,
+ char const* segmentFileName, double segmentDuration) {
+ // Begin by updating our list of segments:
+ SegmentRecord* newSegment = new SegmentRecord(segmentFileName, segmentDuration);
+ if (tail != NULL) {
+ tail->next() = newSegment;
+ } else {
+ head = newSegment;
+ }
+ tail = newSegment;
+ totalDuration += segmentDuration;
+
+ fprintf(stderr, "Wrote segment \"%s\" (duration: %f seconds) -> %f seconds of data stored\n",
+ segmentFileName, segmentDuration, totalDuration);
+
+ static unsigned firstSegmentCounter = 1;
+ while (totalDuration > OUR_HLS_REWIND_DURATION) {
+ // Remove segments from the head of the list:
+ SegmentRecord* segmentToRemove = head;
+ if (segmentToRemove == NULL) exit(1); // should not happen
+
+ head = segmentToRemove->next();
+ if (tail == segmentToRemove) { // should not happen
+ tail = NULL;
+ }
+ segmentToRemove->next() = NULL;
+
+ totalDuration -= segmentToRemove->duration();
+ fprintf(stderr, "\tDeleting segment \"%s\" (duration: %f seconds) -> %f seconds of data stored\n",
+ segmentToRemove->fileName(), segmentToRemove->duration(), totalDuration);
+ if (unlink(segmentToRemove->fileName()) != 0) {
+ *env << "\t\tunlink(\"" << segmentToRemove->fileName() << "\") failed: " << env->getResultMsg() << "\n";
+ }
+ delete segmentToRemove;
+ ++firstSegmentCounter;
+ }
+
+ // Then, rewrite our ".h3u8" file with the new list of segments:
+ if (ourM3U8FileName == NULL) {
+ ourM3U8FileName = new char[strlen(hlsPrefix) + 5/*strlen(".m3u8")*/ + 1];
+ if (ourM3U8FileName == NULL) exit(1);
+ sprintf(ourM3U8FileName, "%s.m3u8", hlsPrefix);
+ }
+
+ // Open our ".m3u8" file for output, and write the prefix:
+ FILE* ourM3U8Fid = fopen(ourM3U8FileName, "wb");
+ if (ourM3U8Fid == NULL) {
+ *env << "Failed to open file \"" << ourM3U8FileName << "\": " << env->getResultMsg();
+ exit(1);
+ }
+
+ fprintf(ourM3U8Fid,
+ "#EXTM3U\n"
+ "#EXT-X-VERSION:3\n"
+ "#EXT-X-INDEPENDENT-SEGMENTS\n"
+ "#EXT-X-TARGETDURATION:%u\n"
+ "#EXT-X-MEDIA-SEQUENCE:%u\n",
+ OUR_HLS_SEGMENTATION_DURATION,
+ firstSegmentCounter);
+
+ // Write the list of segments:
+ for (SegmentRecord* segment = head; segment != NULL; segment = segment->next()) {
+ fprintf(ourM3U8Fid,
+ "#EXTINF:%f,\n"
+ "%s\n",
+ segment->duration(),
+ segment->fileName());
+ }
+
+ // Close our ".m3u8" file:
+ fclose(ourM3U8Fid);
+
+ static Boolean isFirstTime = True;
+ if (isFirstTime) {
+ fprintf(stderr, "Wrote index file \"%s\"; the stream can now be played from a URL pointing to this file.\007\n", ourM3U8FileName);
+ isFirstTime = False;
+ }
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...Done reading\n";
+ exit(0);
+}
+
+void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ do {
+ if (resultCode != 0) {
+ *env << *rtspClient << "Failed to start playing session: " << resultString << "\n";
+ break;
+ }
+ delete[] resultString;
+
+ // Set timer based on duration #####@@@@@
+
+ *env << *rtspClient << "Started playing session";
+ if (duration > 0) {
+ *env << " (for up to " << duration << " seconds)";
+ }
+ *env << "...\n";
+
+ return;
+ } while (0);
+
+ // An error occurred:
+ exit(1);
+}
diff --git a/liveMedia/0_2n55ezgp_0_9w6043oh-0_2n55ezgp_0_9w6043oh.mp4 b/liveMedia/0_2n55ezgp_0_9w6043oh-0_2n55ezgp_0_9w6043oh.mp4
new file mode 100644
index 0000000..b576361
--- /dev/null
+++ b/liveMedia/0_2n55ezgp_0_9w6043oh-0_2n55ezgp_0_9w6043oh.mp4
Binary files differ
diff --git a/liveMedia/AC3AudioFileServerMediaSubsession.cpp b/liveMedia/AC3AudioFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..b8c8b5a
--- /dev/null
+++ b/liveMedia/AC3AudioFileServerMediaSubsession.cpp
@@ -0,0 +1,61 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an AC3 audio file.
+// Implementation
+
+#include "AC3AudioFileServerMediaSubsession.hh"
+#include "ByteStreamFileSource.hh"
+#include "AC3AudioStreamFramer.hh"
+#include "AC3AudioRTPSink.hh"
+
+AC3AudioFileServerMediaSubsession*
+AC3AudioFileServerMediaSubsession::createNew(UsageEnvironment& env,
+ char const* fileName,
+ Boolean reuseFirstSource) {
+ return new AC3AudioFileServerMediaSubsession(env, fileName, reuseFirstSource);
+}
+
+AC3AudioFileServerMediaSubsession
+::AC3AudioFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource) {
+}
+
+AC3AudioFileServerMediaSubsession::~AC3AudioFileServerMediaSubsession() {
+}
+
+FramedSource* AC3AudioFileServerMediaSubsession
+::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
+ estBitrate = 48; // kbps, estimate
+
+ ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName);
+ if (fileSource == NULL) return NULL;
+
+ return AC3AudioStreamFramer::createNew(envir(), fileSource);
+}
+
+RTPSink* AC3AudioFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource) {
+ AC3AudioStreamFramer* audioSource = (AC3AudioStreamFramer*)inputSource;
+ return AC3AudioRTPSink::createNew(envir(), rtpGroupsock,
+ rtpPayloadTypeIfDynamic,
+ audioSource->samplingRate());
+}
diff --git a/liveMedia/AC3AudioRTPSink.cpp b/liveMedia/AC3AudioRTPSink.cpp
new file mode 100644
index 0000000..ef4f041
--- /dev/null
+++ b/liveMedia/AC3AudioRTPSink.cpp
@@ -0,0 +1,97 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for AC3 audio
+// Implementation
+
+#include "AC3AudioRTPSink.hh"
+
+AC3AudioRTPSink::AC3AudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency)
+ : AudioRTPSink(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency, "AC3"),
+ fTotNumFragmentsUsed(0) {
+}
+
+AC3AudioRTPSink::~AC3AudioRTPSink() {
+}
+
+AC3AudioRTPSink*
+AC3AudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency) {
+ return new AC3AudioRTPSink(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency);
+}
+
+Boolean AC3AudioRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // (For now) allow at most 1 frame in a single packet:
+ return False;
+}
+
+void AC3AudioRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ // Set the 2-byte "payload header", as defined in RFC 4184.
+ unsigned char headers[2];
+
+ Boolean isFragment = numRemainingBytes > 0 || fragmentationOffset > 0;
+ if (!isFragment) {
+ headers[0] = 0; // One or more complete frames
+ headers[1] = 1; // because we (for now) allow at most 1 frame per packet
+ } else {
+ if (fragmentationOffset > 0) {
+ headers[0] = 3; // Fragment of frame other than initial fragment
+ } else {
+ // An initial fragment of the frame
+ unsigned const totalFrameSize = fragmentationOffset + numBytesInFrame + numRemainingBytes;
+ unsigned const fiveEighthsPoint = totalFrameSize/2 + totalFrameSize/8;
+ headers[0] = numBytesInFrame >= fiveEighthsPoint ? 1 : 2;
+
+ // Because this outgoing packet will be full (because it's an initial fragment), we can compute how many total
+ // fragments (and thus packets) will make up the complete AC-3 frame:
+ fTotNumFragmentsUsed = (totalFrameSize + (numBytesInFrame-1))/numBytesInFrame;
+ }
+
+ headers[1] = fTotNumFragmentsUsed;
+ }
+
+ setSpecialHeaderBytes(headers, sizeof headers);
+
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the RTP 'M' ('marker') bit:
+ setMarkerBit();
+ }
+
+ // Important: Also call our base class's doSpecialFrameHandling(),
+ // to set the packet's timestamp:
+ MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
+ frameStart, numBytesInFrame,
+ framePresentationTime,
+ numRemainingBytes);
+}
+
+unsigned AC3AudioRTPSink::specialHeaderSize() const {
+ return 2;
+}
diff --git a/liveMedia/AC3AudioRTPSource.cpp b/liveMedia/AC3AudioRTPSource.cpp
new file mode 100644
index 0000000..e5b12b0
--- /dev/null
+++ b/liveMedia/AC3AudioRTPSource.cpp
@@ -0,0 +1,66 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// AC3 Audio RTP Sources
+// Implementation
+
+#include "AC3AudioRTPSource.hh"
+
+AC3AudioRTPSource*
+AC3AudioRTPSource::createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new AC3AudioRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+AC3AudioRTPSource::AC3AudioRTPSource(UsageEnvironment& env,
+ Groupsock* rtpGS,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, rtpGS,
+ rtpPayloadFormat, rtpTimestampFrequency) {
+}
+
+AC3AudioRTPSource::~AC3AudioRTPSource() {
+}
+
+Boolean AC3AudioRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ // There's a 2-byte payload header at the beginning:
+ if (packetSize < 2) return False;
+ resultSpecialHeaderSize = 2;
+
+ unsigned char FT = headerStart[0]&0x03;
+ fCurrentPacketBeginsFrame = FT != 3;
+
+ // The RTP "M" (marker) bit indicates the last fragment of a frame.
+ // In case the sender did not set the "M" bit correctly, we also test for FT == 0:
+ fCurrentPacketCompletesFrame = packet->rtpMarkerBit() || FT == 0;
+
+ return True;
+}
+
+char const* AC3AudioRTPSource::MIMEtype() const {
+ return "audio/AC3";
+}
+
diff --git a/liveMedia/AC3AudioStreamFramer.cpp b/liveMedia/AC3AudioStreamFramer.cpp
new file mode 100644
index 0000000..9ccd0ff
--- /dev/null
+++ b/liveMedia/AC3AudioStreamFramer.cpp
@@ -0,0 +1,340 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an AC3 audio elementary stream into frames
+// Implementation
+
+#include "AC3AudioStreamFramer.hh"
+#include "StreamParser.hh"
+#include <GroupsockHelper.hh>
+
+////////// AC3AudioStreamParser definition //////////
+
+class AC3FrameParams {
+public:
+ AC3FrameParams() : samplingFreq(0) {}
+ // 8-byte header at the start of each frame:
+ // u_int32_t hdr0, hdr1;
+ unsigned hdr0, hdr1;
+
+ // parameters derived from the headers
+ unsigned kbps, samplingFreq, frameSize;
+
+ void setParamsFromHeader();
+};
+
+class AC3AudioStreamParser: public StreamParser {
+public:
+ AC3AudioStreamParser(AC3AudioStreamFramer* usingSource,
+ FramedSource* inputSource);
+ virtual ~AC3AudioStreamParser();
+
+public:
+ void testStreamCode(unsigned char ourStreamCode,
+ unsigned char* ptr, unsigned size);
+ unsigned parseFrame(unsigned& numTruncatedBytes);
+ // returns the size of the frame that was acquired, or 0 if none was
+
+ void registerReadInterest(unsigned char* to, unsigned maxSize);
+
+ AC3FrameParams const& currentFrame() const { return fCurrentFrame; }
+
+ Boolean haveParsedAFrame() const { return fHaveParsedAFrame; }
+ void readAndSaveAFrame();
+
+private:
+ static void afterGettingSavedFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingSavedFrame1(unsigned frameSize);
+ static void onSavedFrameClosure(void* clientData);
+ void onSavedFrameClosure1();
+
+private:
+ AC3AudioStreamFramer* fUsingSource;
+ unsigned char* fTo;
+ unsigned fMaxSize;
+
+ Boolean fHaveParsedAFrame;
+ unsigned char* fSavedFrame;
+ unsigned fSavedFrameSize;
+ char fSavedFrameFlag;
+
+ // Parameters of the most recently read frame:
+ AC3FrameParams fCurrentFrame;
+};
+
+
+////////// AC3AudioStreamFramer implementation //////////
+
+AC3AudioStreamFramer::AC3AudioStreamFramer(UsageEnvironment& env,
+ FramedSource* inputSource,
+ unsigned char streamCode)
+ : FramedFilter(env, inputSource), fOurStreamCode(streamCode) {
+ // Use the current wallclock time as the initial 'presentation time':
+ gettimeofday(&fNextFramePresentationTime, NULL);
+
+ fParser = new AC3AudioStreamParser(this, inputSource);
+}
+
+AC3AudioStreamFramer::~AC3AudioStreamFramer() {
+ delete fParser;
+}
+
+AC3AudioStreamFramer*
+AC3AudioStreamFramer::createNew(UsageEnvironment& env,
+ FramedSource* inputSource,
+ unsigned char streamCode) {
+ // Need to add source type checking here??? #####
+ return new AC3AudioStreamFramer(env, inputSource, streamCode);
+}
+
+unsigned AC3AudioStreamFramer::samplingRate() {
+ if (!fParser->haveParsedAFrame()) {
+ // Because we haven't yet parsed a frame, we don't yet know the input
+ // stream's sampling rate. So, we first need to read a frame
+ // (into a special buffer that we keep around for later use).
+ fParser->readAndSaveAFrame();
+ }
+
+ return fParser->currentFrame().samplingFreq;
+}
+
+void AC3AudioStreamFramer::flushInput() {
+ fParser->flushInput();
+}
+
+void AC3AudioStreamFramer::doGetNextFrame() {
+ fParser->registerReadInterest(fTo, fMaxSize);
+ parseNextFrame();
+}
+
+#define MILLION 1000000
+
+struct timeval AC3AudioStreamFramer::currentFramePlayTime() const {
+ AC3FrameParams const& fr = fParser->currentFrame();
+ unsigned const numSamples = 1536;
+ unsigned const freq = fr.samplingFreq;
+
+ // result is numSamples/freq
+ unsigned const uSeconds = (freq == 0) ? 0
+ : ((numSamples*2*MILLION)/freq + 1)/2; // rounds to nearest integer
+
+ struct timeval result;
+ result.tv_sec = uSeconds/MILLION;
+ result.tv_usec = uSeconds%MILLION;
+ return result;
+}
+
+void AC3AudioStreamFramer
+::handleNewData(void* clientData, unsigned char* ptr, unsigned size,
+ struct timeval /*presentationTime*/) {
+ AC3AudioStreamFramer* framer = (AC3AudioStreamFramer*)clientData;
+ framer->handleNewData(ptr, size);
+}
+
+void AC3AudioStreamFramer
+::handleNewData(unsigned char* ptr, unsigned size) {
+ fParser->testStreamCode(fOurStreamCode, ptr, size);
+
+ parseNextFrame();
+}
+
+void AC3AudioStreamFramer::parseNextFrame() {
+ unsigned acquiredFrameSize = fParser->parseFrame(fNumTruncatedBytes);
+ if (acquiredFrameSize > 0) {
+ // We were able to acquire a frame from the input.
+ // It has already been copied to the reader's space.
+ fFrameSize = acquiredFrameSize;
+
+ // Also set the presentation time, and increment it for next time,
+ // based on the length of this frame:
+ fPresentationTime = fNextFramePresentationTime;
+
+ struct timeval framePlayTime = currentFramePlayTime();
+ fDurationInMicroseconds = framePlayTime.tv_sec*MILLION + framePlayTime.tv_usec;
+ fNextFramePresentationTime.tv_usec += framePlayTime.tv_usec;
+ fNextFramePresentationTime.tv_sec
+ += framePlayTime.tv_sec + fNextFramePresentationTime.tv_usec/MILLION;
+ fNextFramePresentationTime.tv_usec %= MILLION;
+
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking infinite recursion.
+ afterGetting(this);
+ } else {
+ // We were unable to parse a complete frame from the input, because:
+ // - we had to read more data from the source stream, or
+ // - the source stream has ended.
+ }
+}
+
+
+////////// AC3AudioStreamParser implementation //////////
+
+static int const kbpsTable[] = {32, 40, 48, 56, 64, 80, 96, 112,
+ 128, 160, 192, 224, 256, 320, 384, 448,
+ 512, 576, 640};
+
+void AC3FrameParams::setParamsFromHeader() {
+ unsigned char byte4 = hdr1 >> 24;
+
+ unsigned char kbpsIndex = (byte4&0x3E) >> 1;
+ if (kbpsIndex > 18) kbpsIndex = 18;
+ kbps = kbpsTable[kbpsIndex];
+
+ unsigned char samplingFreqIndex = (byte4&0xC0) >> 6;
+ switch (samplingFreqIndex) {
+ case 0:
+ samplingFreq = 48000;
+ frameSize = 4*kbps;
+ break;
+ case 1:
+ samplingFreq = 44100;
+ frameSize = 2*(320*kbps/147 + (byte4&1));
+ break;
+ case 2:
+ case 3: // not legal?
+ samplingFreq = 32000;
+ frameSize = 6*kbps;
+ }
+}
+
+AC3AudioStreamParser
+::AC3AudioStreamParser(AC3AudioStreamFramer* usingSource,
+ FramedSource* inputSource)
+ : StreamParser(inputSource, FramedSource::handleClosure, usingSource,
+ &AC3AudioStreamFramer::handleNewData, usingSource),
+ fUsingSource(usingSource), fHaveParsedAFrame(False),
+ fSavedFrame(NULL), fSavedFrameSize(0) {
+}
+
+AC3AudioStreamParser::~AC3AudioStreamParser() {
+}
+
+void AC3AudioStreamParser::registerReadInterest(unsigned char* to,
+ unsigned maxSize) {
+ fTo = to;
+ fMaxSize = maxSize;
+}
+
+void AC3AudioStreamParser
+::testStreamCode(unsigned char ourStreamCode,
+ unsigned char* ptr, unsigned size) {
+ if (ourStreamCode == 0) return; // we assume that there's no stream code at the beginning of the data
+
+ if (size < 4) return;
+ unsigned char streamCode = *ptr;
+
+ if (streamCode == ourStreamCode) {
+ // Remove the first 4 bytes from the stream:
+ memmove(ptr, ptr + 4, size - 4);
+ totNumValidBytes() = totNumValidBytes() - 4;
+ } else {
+ // Discard all of the data that was just read:
+ totNumValidBytes() = totNumValidBytes() - size;
+ }
+}
+
+unsigned AC3AudioStreamParser::parseFrame(unsigned& numTruncatedBytes) {
+ if (fSavedFrameSize > 0) {
+ // We've already read and parsed a frame. Use it instead:
+ memmove(fTo, fSavedFrame, fSavedFrameSize);
+ delete[] fSavedFrame; fSavedFrame = NULL;
+ unsigned frameSize = fSavedFrameSize;
+ fSavedFrameSize = 0;
+ return frameSize;
+ }
+
+ try {
+ saveParserState();
+
+ // We expect an AC3 audio header (first 2 bytes == 0x0B77) at the start:
+ while (1) {
+ unsigned next4Bytes = test4Bytes();
+ if (next4Bytes>>16 == 0x0B77) break;
+ skipBytes(1);
+ saveParserState();
+ }
+ fCurrentFrame.hdr0 = get4Bytes();
+ fCurrentFrame.hdr1 = test4Bytes();
+
+ fCurrentFrame.setParamsFromHeader();
+ fHaveParsedAFrame = True;
+
+ // Copy the frame to the requested destination:
+ unsigned frameSize = fCurrentFrame.frameSize;
+ if (frameSize > fMaxSize) {
+ numTruncatedBytes = frameSize - fMaxSize;
+ frameSize = fMaxSize;
+ } else {
+ numTruncatedBytes = 0;
+ }
+
+ fTo[0] = fCurrentFrame.hdr0 >> 24;
+ fTo[1] = fCurrentFrame.hdr0 >> 16;
+ fTo[2] = fCurrentFrame.hdr0 >> 8;
+ fTo[3] = fCurrentFrame.hdr0;
+ getBytes(&fTo[4], frameSize-4);
+ skipBytes(numTruncatedBytes);
+
+ return frameSize;
+ } catch (int /*e*/) {
+#ifdef DEBUG
+ fUsingSource->envir() << "AC3AudioStreamParser::parseFrame() EXCEPTION (This is normal behavior - *not* an error)\n";
+#endif
+ return 0; // the parsing got interrupted
+ }
+}
+
+void AC3AudioStreamParser::readAndSaveAFrame() {
+ unsigned const maxAC3FrameSize = 4000;
+ fSavedFrame = new unsigned char[maxAC3FrameSize];
+ fSavedFrameSize = 0;
+
+ fSavedFrameFlag = 0;
+ fUsingSource->getNextFrame(fSavedFrame, maxAC3FrameSize,
+ afterGettingSavedFrame, this,
+ onSavedFrameClosure, this);
+ fUsingSource->envir().taskScheduler().doEventLoop(&fSavedFrameFlag);
+}
+
+void AC3AudioStreamParser
+::afterGettingSavedFrame(void* clientData, unsigned frameSize,
+ unsigned /*numTruncatedBytes*/,
+ struct timeval /*presentationTime*/,
+ unsigned /*durationInMicroseconds*/) {
+ AC3AudioStreamParser* parser = (AC3AudioStreamParser*)clientData;
+ parser->afterGettingSavedFrame1(frameSize);
+}
+
+void AC3AudioStreamParser
+::afterGettingSavedFrame1(unsigned frameSize) {
+ fSavedFrameSize = frameSize;
+ fSavedFrameFlag = ~0;
+}
+
+void AC3AudioStreamParser::onSavedFrameClosure(void* clientData) {
+ AC3AudioStreamParser* parser = (AC3AudioStreamParser*)clientData;
+ parser->onSavedFrameClosure1();
+}
+
+void AC3AudioStreamParser::onSavedFrameClosure1() {
+ delete[] fSavedFrame; fSavedFrame = NULL;
+ fSavedFrameSize = 0;
+ fSavedFrameFlag = ~0;
+}
diff --git a/liveMedia/ADTSAudioFileServerMediaSubsession.cpp b/liveMedia/ADTSAudioFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..a30a94b
--- /dev/null
+++ b/liveMedia/ADTSAudioFileServerMediaSubsession.cpp
@@ -0,0 +1,60 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an AAC audio file in ADTS format
+// Implementation
+
+#include "ADTSAudioFileServerMediaSubsession.hh"
+#include "ADTSAudioFileSource.hh"
+#include "MPEG4GenericRTPSink.hh"
+
+ADTSAudioFileServerMediaSubsession*
+ADTSAudioFileServerMediaSubsession::createNew(UsageEnvironment& env,
+ char const* fileName,
+ Boolean reuseFirstSource) {
+ return new ADTSAudioFileServerMediaSubsession(env, fileName, reuseFirstSource);
+}
+
+ADTSAudioFileServerMediaSubsession
+::ADTSAudioFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource) {
+}
+
+ADTSAudioFileServerMediaSubsession
+::~ADTSAudioFileServerMediaSubsession() {
+}
+
+FramedSource* ADTSAudioFileServerMediaSubsession
+::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
+ estBitrate = 96; // kbps, estimate
+
+ return ADTSAudioFileSource::createNew(envir(), fFileName);
+}
+
+RTPSink* ADTSAudioFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource) {
+ ADTSAudioFileSource* adtsSource = (ADTSAudioFileSource*)inputSource;
+ return MPEG4GenericRTPSink::createNew(envir(), rtpGroupsock,
+ rtpPayloadTypeIfDynamic,
+ adtsSource->samplingFrequency(),
+ "audio", "AAC-hbr", adtsSource->configStr(),
+ adtsSource->numChannels());
+}
diff --git a/liveMedia/ADTSAudioFileSource.cpp b/liveMedia/ADTSAudioFileSource.cpp
new file mode 100644
index 0000000..85cb6e4
--- /dev/null
+++ b/liveMedia/ADTSAudioFileSource.cpp
@@ -0,0 +1,171 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A source object for AAC audio files in ADTS format
+// Implementation
+
+#include "ADTSAudioFileSource.hh"
+#include "InputFile.hh"
+#include <GroupsockHelper.hh>
+
+////////// ADTSAudioFileSource //////////
+
+static unsigned const samplingFrequencyTable[16] = {
+ 96000, 88200, 64000, 48000,
+ 44100, 32000, 24000, 22050,
+ 16000, 12000, 11025, 8000,
+ 7350, 0, 0, 0
+};
+
+ADTSAudioFileSource*
+ADTSAudioFileSource::createNew(UsageEnvironment& env, char const* fileName) {
+ FILE* fid = NULL;
+ do {
+ fid = OpenInputFile(env, fileName);
+ if (fid == NULL) break;
+
+ // Now, having opened the input file, read the fixed header of the first frame,
+ // to get the audio stream's parameters:
+ unsigned char fixedHeader[4]; // it's actually 3.5 bytes long
+ if (fread(fixedHeader, 1, sizeof fixedHeader, fid) < sizeof fixedHeader) break;
+
+ // Check the 'syncword':
+ if (!(fixedHeader[0] == 0xFF && (fixedHeader[1]&0xF0) == 0xF0)) {
+ env.setResultMsg("Bad 'syncword' at start of ADTS file");
+ break;
+ }
+
+ // Get and check the 'profile':
+ u_int8_t profile = (fixedHeader[2]&0xC0)>>6; // 2 bits
+ if (profile == 3) {
+ env.setResultMsg("Bad (reserved) 'profile': 3 in first frame of ADTS file");
+ break;
+ }
+
+ // Get and check the 'sampling_frequency_index':
+ u_int8_t sampling_frequency_index = (fixedHeader[2]&0x3C)>>2; // 4 bits
+ if (samplingFrequencyTable[sampling_frequency_index] == 0) {
+ env.setResultMsg("Bad 'sampling_frequency_index' in first frame of ADTS file");
+ break;
+ }
+
+ // Get and check the 'channel_configuration':
+ u_int8_t channel_configuration
+ = ((fixedHeader[2]&0x01)<<2)|((fixedHeader[3]&0xC0)>>6); // 3 bits
+
+ // If we get here, the frame header was OK.
+ // Reset the fid to the beginning of the file:
+#ifndef _WIN32_WCE
+ rewind(fid);
+#else
+ SeekFile64(fid, SEEK_SET,0);
+#endif
+#ifdef DEBUG
+ fprintf(stderr, "Read first frame: profile %d, "
+ "sampling_frequency_index %d => samplingFrequency %d, "
+ "channel_configuration %d\n",
+ profile,
+ sampling_frequency_index, samplingFrequencyTable[sampling_frequency_index],
+ channel_configuration);
+#endif
+ return new ADTSAudioFileSource(env, fid, profile,
+ sampling_frequency_index, channel_configuration);
+ } while (0);
+
+ // An error occurred:
+ CloseInputFile(fid);
+ return NULL;
+}
+
+ADTSAudioFileSource
+::ADTSAudioFileSource(UsageEnvironment& env, FILE* fid, u_int8_t profile,
+ u_int8_t samplingFrequencyIndex, u_int8_t channelConfiguration)
+ : FramedFileSource(env, fid) {
+ fSamplingFrequency = samplingFrequencyTable[samplingFrequencyIndex];
+ fNumChannels = channelConfiguration == 0 ? 2 : channelConfiguration;
+ fuSecsPerFrame
+ = (1024/*samples-per-frame*/*1000000) / fSamplingFrequency/*samples-per-second*/;
+
+ // Construct the 'AudioSpecificConfig', and from it, the corresponding ASCII string:
+ unsigned char audioSpecificConfig[2];
+ u_int8_t const audioObjectType = profile + 1;
+ audioSpecificConfig[0] = (audioObjectType<<3) | (samplingFrequencyIndex>>1);
+ audioSpecificConfig[1] = (samplingFrequencyIndex<<7) | (channelConfiguration<<3);
+ sprintf(fConfigStr, "%02X%02x", audioSpecificConfig[0], audioSpecificConfig[1]);
+}
+
+ADTSAudioFileSource::~ADTSAudioFileSource() {
+ CloseInputFile(fFid);
+}
+
+// Note: We should change the following to use asynchronous file reading, #####
+// as we now do with ByteStreamFileSource. #####
+void ADTSAudioFileSource::doGetNextFrame() {
+ // Begin by reading the 7-byte fixed_variable headers:
+ unsigned char headers[7];
+ if (fread(headers, 1, sizeof headers, fFid) < sizeof headers
+ || feof(fFid) || ferror(fFid)) {
+ // The input source has ended:
+ handleClosure();
+ return;
+ }
+
+ // Extract important fields from the headers:
+ Boolean protection_absent = headers[1]&0x01;
+ u_int16_t frame_length
+ = ((headers[3]&0x03)<<11) | (headers[4]<<3) | ((headers[5]&0xE0)>>5);
+#ifdef DEBUG
+ u_int16_t syncword = (headers[0]<<4) | (headers[1]>>4);
+ fprintf(stderr, "Read frame: syncword 0x%x, protection_absent %d, frame_length %d\n", syncword, protection_absent, frame_length);
+ if (syncword != 0xFFF) fprintf(stderr, "WARNING: Bad syncword!\n");
+#endif
+ unsigned numBytesToRead
+ = frame_length > sizeof headers ? frame_length - sizeof headers : 0;
+
+ // If there's a 'crc_check' field, skip it:
+ if (!protection_absent) {
+ SeekFile64(fFid, 2, SEEK_CUR);
+ numBytesToRead = numBytesToRead > 2 ? numBytesToRead - 2 : 0;
+ }
+
+ // Next, read the raw frame data into the buffer provided:
+ if (numBytesToRead > fMaxSize) {
+ fNumTruncatedBytes = numBytesToRead - fMaxSize;
+ numBytesToRead = fMaxSize;
+ }
+ int numBytesRead = fread(fTo, 1, numBytesToRead, fFid);
+ if (numBytesRead < 0) numBytesRead = 0;
+ fFrameSize = numBytesRead;
+ fNumTruncatedBytes += numBytesToRead - numBytesRead;
+
+ // Set the 'presentation time':
+ if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
+ // This is the first frame, so use the current time:
+ gettimeofday(&fPresentationTime, NULL);
+ } else {
+ // Increment by the play time of the previous frame:
+ unsigned uSeconds = fPresentationTime.tv_usec + fuSecsPerFrame;
+ fPresentationTime.tv_sec += uSeconds/1000000;
+ fPresentationTime.tv_usec = uSeconds%1000000;
+ }
+
+ fDurationInMicroseconds = fuSecsPerFrame;
+
+ // Switch to another task, and inform the reader that he has data:
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
+ (TaskFunc*)FramedSource::afterGetting, this);
+}
diff --git a/liveMedia/AMRAudioFileServerMediaSubsession.cpp b/liveMedia/AMRAudioFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..feab340
--- /dev/null
+++ b/liveMedia/AMRAudioFileServerMediaSubsession.cpp
@@ -0,0 +1,59 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an AMR audio file.
+// Implementation
+
+#include "AMRAudioFileServerMediaSubsession.hh"
+#include "AMRAudioRTPSink.hh"
+#include "AMRAudioFileSource.hh"
+
+AMRAudioFileServerMediaSubsession*
+AMRAudioFileServerMediaSubsession::createNew(UsageEnvironment& env,
+ char const* fileName,
+ Boolean reuseFirstSource) {
+ return new AMRAudioFileServerMediaSubsession(env, fileName, reuseFirstSource);
+}
+
+AMRAudioFileServerMediaSubsession
+::AMRAudioFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource) {
+}
+
+AMRAudioFileServerMediaSubsession
+::~AMRAudioFileServerMediaSubsession() {
+}
+
+FramedSource* AMRAudioFileServerMediaSubsession
+::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
+ estBitrate = 10; // kbps, estimate
+
+ return AMRAudioFileSource::createNew(envir(), fFileName);
+}
+
+RTPSink* AMRAudioFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource) {
+ AMRAudioFileSource* amrSource = (AMRAudioFileSource*)inputSource;
+ return AMRAudioRTPSink::createNew(envir(), rtpGroupsock,
+ rtpPayloadTypeIfDynamic,
+ amrSource->isWideband(),
+ amrSource->numChannels());
+}
diff --git a/liveMedia/AMRAudioFileSink.cpp b/liveMedia/AMRAudioFileSink.cpp
new file mode 100644
index 0000000..c96f042
--- /dev/null
+++ b/liveMedia/AMRAudioFileSink.cpp
@@ -0,0 +1,101 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// AMR Audio File sinks
+// Implementation
+
+#include "AMRAudioFileSink.hh"
+#include "AMRAudioSource.hh"
+#include "OutputFile.hh"
+
+////////// AMRAudioFileSink //////////
+
+AMRAudioFileSink
+::AMRAudioFileSink(UsageEnvironment& env, FILE* fid, unsigned bufferSize,
+ char const* perFrameFileNamePrefix)
+ : FileSink(env, fid, bufferSize, perFrameFileNamePrefix),
+ fHaveWrittenHeader(False) {
+}
+
+AMRAudioFileSink::~AMRAudioFileSink() {
+}
+
+AMRAudioFileSink*
+AMRAudioFileSink::createNew(UsageEnvironment& env, char const* fileName,
+ unsigned bufferSize, Boolean oneFilePerFrame) {
+ do {
+ FILE* fid;
+ char const* perFrameFileNamePrefix;
+ if (oneFilePerFrame) {
+ // Create the fid for each frame
+ fid = NULL;
+ perFrameFileNamePrefix = fileName;
+ } else {
+ // Normal case: create the fid once
+ fid = OpenOutputFile(env, fileName);
+ if (fid == NULL) break;
+ perFrameFileNamePrefix = NULL;
+ }
+
+ return new AMRAudioFileSink(env, fid, bufferSize, perFrameFileNamePrefix);
+ } while (0);
+
+ return NULL;
+}
+
+Boolean AMRAudioFileSink::sourceIsCompatibleWithUs(MediaSource& source) {
+ // The input source must be a AMR Audio source:
+ return source.isAMRAudioSource();
+}
+
+void AMRAudioFileSink::afterGettingFrame(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime) {
+ AMRAudioSource* source = (AMRAudioSource*)fSource;
+ if (source == NULL) return; // sanity check
+
+ if (!fHaveWrittenHeader && fPerFrameFileNameBuffer == NULL) {
+ // Output the appropriate AMR header to the start of the file.
+ // This header is defined in RFC 4867, section 5.
+ // (However, we don't do this if we're creating one file per frame.)
+ char headerBuffer[100];
+ sprintf(headerBuffer, "#!AMR%s%s\n",
+ source->isWideband() ? "-WB" : "",
+ source->numChannels() > 1 ? "_MC1.0" : "");
+ unsigned headerLength = strlen(headerBuffer);
+ if (source->numChannels() > 1) {
+ // Also add a 32-bit channel description field:
+ headerBuffer[headerLength++] = 0;
+ headerBuffer[headerLength++] = 0;
+ headerBuffer[headerLength++] = 0;
+ headerBuffer[headerLength++] = source->numChannels();
+ }
+
+ addData((unsigned char*)headerBuffer, headerLength, presentationTime);
+ }
+ fHaveWrittenHeader = True;
+
+ // Add the 1-byte header, before writing the file data proper:
+ // (Again, we don't do this if we're creating one file per frame.)
+ if (fPerFrameFileNameBuffer == NULL) {
+ u_int8_t frameHeader = source->lastFrameHeader();
+ addData(&frameHeader, 1, presentationTime);
+ }
+
+ // Call the parent class to complete the normal file write with the input data:
+ FileSink::afterGettingFrame(frameSize, numTruncatedBytes, presentationTime);
+}
diff --git a/liveMedia/AMRAudioFileSource.cpp b/liveMedia/AMRAudioFileSource.cpp
new file mode 100644
index 0000000..ca92d54
--- /dev/null
+++ b/liveMedia/AMRAudioFileSource.cpp
@@ -0,0 +1,174 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A source object for AMR audio files (as defined in RFC 4867, section 5)
+// Implementation
+
+#include "AMRAudioFileSource.hh"
+#include "InputFile.hh"
+#include "GroupsockHelper.hh"
+
+////////// AMRAudioFileSource //////////
+
+AMRAudioFileSource*
+AMRAudioFileSource::createNew(UsageEnvironment& env, char const* fileName) {
+ FILE* fid = NULL;
+ Boolean magicNumberOK = True;
+ do {
+
+ fid = OpenInputFile(env, fileName);
+ if (fid == NULL) break;
+
+ // Now, having opened the input file, read the first few bytes, to
+ // check the required 'magic number':
+ magicNumberOK = False; // until we learn otherwise
+ Boolean isWideband = False; // by default
+ unsigned numChannels = 1; // by default
+ char buf[100];
+ // Start with the first 6 bytes (the first 5 of which must be "#!AMR"):
+ if (fread(buf, 1, 6, fid) < 6) break;
+ if (strncmp(buf, "#!AMR", 5) != 0) break; // bad magic #
+ unsigned bytesRead = 6;
+
+ // The next bytes must be "\n", "-WB\n", "_MC1.0\n", or "-WB_MC1.0\n"
+ if (buf[5] == '-') {
+ // The next bytes must be "WB\n" or "WB_MC1.0\n"
+ if (fread(&buf[bytesRead], 1, 3, fid) < 3) break;
+ if (strncmp(&buf[bytesRead], "WB", 2) != 0) break; // bad magic #
+ isWideband = True;
+ bytesRead += 3;
+ }
+ if (buf[bytesRead-1] == '_') {
+ // The next bytes must be "MC1.0\n"
+ if (fread(&buf[bytesRead], 1, 6, fid) < 6) break;
+ if (strncmp(&buf[bytesRead], "MC1.0\n", 6) != 0) break; // bad magic #
+ bytesRead += 6;
+
+ // The next 4 bytes contain the number of channels:
+ char channelDesc[4];
+ if (fread(channelDesc, 1, 4, fid) < 4) break;
+ numChannels = channelDesc[3]&0xF;
+ } else if (buf[bytesRead-1] != '\n') {
+ break; // bad magic #
+ }
+
+ // If we get here, the magic number was OK:
+ magicNumberOK = True;
+
+#ifdef DEBUG
+ fprintf(stderr, "isWideband: %d, numChannels: %d\n",
+ isWideband, numChannels);
+#endif
+ return new AMRAudioFileSource(env, fid, isWideband, numChannels);
+ } while (0);
+
+ // An error occurred:
+ CloseInputFile(fid);
+ if (!magicNumberOK) {
+ env.setResultMsg("Bad (or nonexistent) AMR file header");
+ }
+ return NULL;
+}
+
+AMRAudioFileSource
+::AMRAudioFileSource(UsageEnvironment& env, FILE* fid,
+ Boolean isWideband, unsigned numChannels)
+ : AMRAudioSource(env, isWideband, numChannels),
+ fFid(fid) {
+}
+
+AMRAudioFileSource::~AMRAudioFileSource() {
+ CloseInputFile(fFid);
+}
+
+// The mapping from the "FT" field to frame size.
+// Values of 65535 are invalid.
+#define FT_INVALID 65535
+static unsigned short const frameSize[16] = {
+ 12, 13, 15, 17,
+ 19, 20, 26, 31,
+ 5, FT_INVALID, FT_INVALID, FT_INVALID,
+ FT_INVALID, FT_INVALID, FT_INVALID, 0
+};
+static unsigned short const frameSizeWideband[16] = {
+ 17, 23, 32, 36,
+ 40, 46, 50, 58,
+ 60, 5, FT_INVALID, FT_INVALID,
+ FT_INVALID, FT_INVALID, 0, 0
+};
+
+// Note: We should change the following to use asynchronous file reading, #####
+// as we now do with ByteStreamFileSource. #####
+void AMRAudioFileSource::doGetNextFrame() {
+ if (feof(fFid) || ferror(fFid)) {
+ handleClosure();
+ return;
+ }
+
+ // Begin by reading the 1-byte frame header (and checking it for validity)
+ while (1) {
+ if (fread(&fLastFrameHeader, 1, 1, fFid) < 1) {
+ handleClosure();
+ return;
+ }
+ if ((fLastFrameHeader&0x83) != 0) {
+#ifdef DEBUG
+ fprintf(stderr, "Invalid frame header 0x%02x (padding bits (0x83) are not zero)\n", fLastFrameHeader);
+#endif
+ } else {
+ unsigned char ft = (fLastFrameHeader&0x78)>>3;
+ fFrameSize = fIsWideband ? frameSizeWideband[ft] : frameSize[ft];
+ if (fFrameSize == FT_INVALID) {
+#ifdef DEBUG
+ fprintf(stderr, "Invalid FT field %d (from frame header 0x%02x)\n",
+ ft, fLastFrameHeader);
+#endif
+ } else {
+ // The frame header is OK
+#ifdef DEBUG
+ fprintf(stderr, "Valid frame header 0x%02x -> ft %d -> frame size %d\n", fLastFrameHeader, ft, fFrameSize);
+#endif
+ break;
+ }
+ }
+ }
+
+ // Next, read the frame-block into the buffer provided:
+ fFrameSize *= fNumChannels; // because multiple channels make up a frame-block
+ if (fFrameSize > fMaxSize) {
+ fNumTruncatedBytes = fFrameSize - fMaxSize;
+ fFrameSize = fMaxSize;
+ }
+ fFrameSize = fread(fTo, 1, fFrameSize, fFid);
+
+ // Set the 'presentation time':
+ if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
+ // This is the first frame, so use the current time:
+ gettimeofday(&fPresentationTime, NULL);
+ } else {
+ // Increment by the play time of the previous frame (20 ms)
+ unsigned uSeconds = fPresentationTime.tv_usec + 20000;
+ fPresentationTime.tv_sec += uSeconds/1000000;
+ fPresentationTime.tv_usec = uSeconds%1000000;
+ }
+
+ fDurationInMicroseconds = 20000; // each frame is 20 ms
+
+ // Switch to another task, and inform the reader that he has data:
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
+ (TaskFunc*)FramedSource::afterGetting, this);
+ }
diff --git a/liveMedia/AMRAudioRTPSink.cpp b/liveMedia/AMRAudioRTPSink.cpp
new file mode 100644
index 0000000..7d0cc33
--- /dev/null
+++ b/liveMedia/AMRAudioRTPSink.cpp
@@ -0,0 +1,134 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for AMR audio (RFC 4867)
+// Implementation
+
+// NOTE: At present, this is just a limited implementation, supporting:
+// octet-alignment only; no interleaving; no frame CRC; no robust-sorting.
+
+#include "AMRAudioRTPSink.hh"
+#include "AMRAudioSource.hh"
+
+AMRAudioRTPSink*
+AMRAudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ Boolean sourceIsWideband,
+ unsigned numChannelsInSource) {
+ return new AMRAudioRTPSink(env, RTPgs, rtpPayloadFormat,
+ sourceIsWideband, numChannelsInSource);
+}
+
+AMRAudioRTPSink
+::AMRAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ Boolean sourceIsWideband, unsigned numChannelsInSource)
+ : AudioRTPSink(env, RTPgs, rtpPayloadFormat,
+ sourceIsWideband ? 16000 : 8000,
+ sourceIsWideband ? "AMR-WB": "AMR",
+ numChannelsInSource),
+ fSourceIsWideband(sourceIsWideband), fFmtpSDPLine(NULL) {
+}
+
+AMRAudioRTPSink::~AMRAudioRTPSink() {
+ delete[] fFmtpSDPLine;
+}
+
+Boolean AMRAudioRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
+ // Our source must be an AMR audio source:
+ if (!source.isAMRAudioSource()) return False;
+
+ // Also, the source must be wideband iff we asked for this:
+ AMRAudioSource& amrSource = (AMRAudioSource&)source;
+ if ((amrSource.isWideband()^fSourceIsWideband) != 0) return False;
+
+ // Also, the source must have the same number of channels that we
+ // specified. (It could, in principle, have more, but we don't
+ // support that.)
+ if (amrSource.numChannels() != numChannels()) return False;
+
+ // Also, because in our current implementation we output only one
+ // frame in each RTP packet, this means that for multi-channel audio,
+ // each 'frame-block' will be split over multiple RTP packets, which
+ // may violate the spec. Warn about this:
+ if (amrSource.numChannels() > 1) {
+ envir() << "AMRAudioRTPSink: Warning: Input source has " << amrSource.numChannels()
+ << " audio channels. In the current implementation, the multi-frame frame-block will be split over multiple RTP packets\n";
+ }
+
+ return True;
+}
+
+void AMRAudioRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ // If this is the 1st frame in the 1st packet, set the RTP 'M' (marker)
+ // bit (because this is considered the start of a talk spurt):
+ if (isFirstPacket() && isFirstFrameInPacket()) {
+ setMarkerBit();
+ }
+
+ // If this is the first frame in the packet, set the 1-byte payload
+ // header (using CMR 15)
+ if (isFirstFrameInPacket()) {
+ u_int8_t payloadHeader = 0xF0;
+ setSpecialHeaderBytes(&payloadHeader, 1, 0);
+ }
+
+ // Set the TOC field for the current frame, based on the "FT" and "Q"
+ // values from our source:
+ AMRAudioSource* amrSource = (AMRAudioSource*)fSource;
+ if (amrSource == NULL) return; // sanity check
+
+ u_int8_t toc = amrSource->lastFrameHeader();
+ // Clear the "F" bit, because we're the last frame in this packet: #####
+ toc &=~ 0x80;
+ setSpecialHeaderBytes(&toc, 1, 1+numFramesUsedSoFar());
+
+ // Important: Also call our base class's doSpecialFrameHandling(),
+ // to set the packet's timestamp:
+ MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
+ frameStart, numBytesInFrame,
+ framePresentationTime,
+ numRemainingBytes);
+}
+
+Boolean AMRAudioRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // For now, pack only one AMR frame into each outgoing RTP packet: #####
+ return False;
+}
+
+unsigned AMRAudioRTPSink::specialHeaderSize() const {
+ // For now, because we're packing only one frame per packet,
+ // there's just a 1-byte payload header, plus a 1-byte TOC #####
+ return 2;
+}
+
+char const* AMRAudioRTPSink::auxSDPLine() {
+ if (fFmtpSDPLine == NULL) {
+ // Generate a "a=fmtp:" line with "octet-aligned=1"
+ // (That is the only non-default parameter.)
+ char buf[100];
+ sprintf(buf, "a=fmtp:%d octet-align=1\r\n", rtpPayloadType());
+ delete[] fFmtpSDPLine; fFmtpSDPLine = strDup(buf);
+ }
+ return fFmtpSDPLine;
+}
diff --git a/liveMedia/AMRAudioRTPSource.cpp b/liveMedia/AMRAudioRTPSource.cpp
new file mode 100644
index 0000000..5370a8f
--- /dev/null
+++ b/liveMedia/AMRAudioRTPSource.cpp
@@ -0,0 +1,749 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// AMR Audio RTP Sources (RFC 4867)
+// Implementation
+
+#include "AMRAudioRTPSource.hh"
+#include "MultiFramedRTPSource.hh"
+#include "BitVector.hh"
+#include <string.h>
+#include <stdlib.h>
+
+// This source is implemented internally by two separate sources:
+// (i) a RTP source for the raw (and possibly interleaved) AMR frames, and
+// (ii) a deinterleaving filter that reads from this.
+// Define these two new classes here:
+
+class RawAMRRTPSource: public MultiFramedRTPSource {
+public:
+ static RawAMRRTPSource*
+ createNew(UsageEnvironment& env,
+ Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ Boolean isWideband, Boolean isOctetAligned,
+ Boolean isInterleaved, Boolean CRCsArePresent);
+
+ Boolean isWideband() const { return fIsWideband; }
+ unsigned char ILL() const { return fILL; }
+ unsigned char ILP() const { return fILP; }
+ unsigned TOCSize() const { return fTOCSize; } // total # of frames in the last pkt
+ unsigned char* TOC() const { return fTOC; } // FT+Q value for each TOC entry
+ unsigned& frameIndex() { return fFrameIndex; } // index of frame-block within pkt
+ Boolean& isSynchronized() { return fIsSynchronized; }
+
+private:
+ RawAMRRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ Boolean isWideband, Boolean isOctetAligned,
+ Boolean isInterleaved, Boolean CRCsArePresent);
+ // called only by createNew()
+
+ virtual ~RawAMRRTPSource();
+
+private:
+ // redefined virtual functions:
+ virtual Boolean hasBeenSynchronizedUsingRTCP();
+
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ Boolean fIsWideband, fIsOctetAligned, fIsInterleaved, fCRCsArePresent;
+ unsigned char fILL, fILP;
+ unsigned fTOCSize;
+ unsigned char* fTOC;
+ unsigned fFrameIndex;
+ Boolean fIsSynchronized;
+};
+
+class AMRDeinterleaver: public AMRAudioSource {
+public:
+ static AMRDeinterleaver*
+ createNew(UsageEnvironment& env,
+ Boolean isWideband, unsigned numChannels, unsigned maxInterleaveGroupSize,
+ RawAMRRTPSource* inputSource);
+
+private:
+ AMRDeinterleaver(UsageEnvironment& env,
+ Boolean isWideband, unsigned numChannels,
+ unsigned maxInterleaveGroupSize, RawAMRRTPSource* inputSource);
+ // called only by "createNew()"
+
+ virtual ~AMRDeinterleaver();
+
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize, struct timeval presentationTime);
+
+private:
+ // Redefined virtual functions:
+ void doGetNextFrame();
+ virtual void doStopGettingFrames();
+
+private:
+ RawAMRRTPSource* fInputSource;
+ class AMRDeinterleavingBuffer* fDeinterleavingBuffer;
+ Boolean fNeedAFrame;
+};
+
+
+////////// AMRAudioRTPSource implementation //////////
+
+#define MAX_NUM_CHANNELS 20 // far larger than ever expected...
+#define MAX_INTERLEAVING_GROUP_SIZE 1000 // far larger than ever expected...
+
+AMRAudioSource*
+AMRAudioRTPSource::createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ RTPSource*& resultRTPSource,
+ unsigned char rtpPayloadFormat,
+ Boolean isWideband,
+ unsigned numChannels,
+ Boolean isOctetAligned,
+ unsigned interleaving,
+ Boolean robustSortingOrder,
+ Boolean CRCsArePresent) {
+ // Perform sanity checks on the input parameters:
+ if (robustSortingOrder) {
+ env << "AMRAudioRTPSource::createNew(): 'Robust sorting order' was specified, but we don't yet support this!\n";
+ return NULL;
+ } else if (numChannels > MAX_NUM_CHANNELS) {
+ env << "AMRAudioRTPSource::createNew(): The \"number of channels\" parameter ("
+ << numChannels << ") is much too large!\n";
+ return NULL;
+ } else if (interleaving > MAX_INTERLEAVING_GROUP_SIZE) {
+ env << "AMRAudioRTPSource::createNew(): The \"interleaving\" parameter ("
+ << interleaving << ") is much too large!\n";
+ return NULL;
+ }
+
+ // 'Bandwidth-efficient mode' precludes some other options:
+ if (!isOctetAligned) {
+ if (interleaving > 0 || robustSortingOrder || CRCsArePresent) {
+ env << "AMRAudioRTPSource::createNew(): 'Bandwidth-efficient mode' was specified, along with interleaving, 'robust sorting order', and/or CRCs, so we assume 'octet-aligned mode' instead.\n";
+ isOctetAligned = True;
+ }
+ }
+
+ Boolean isInterleaved;
+ unsigned maxInterleaveGroupSize; // in frames (not frame-blocks)
+ if (interleaving > 0) {
+ isInterleaved = True;
+ maxInterleaveGroupSize = interleaving*numChannels;
+ } else {
+ isInterleaved = False;
+ maxInterleaveGroupSize = numChannels;
+ }
+
+ RawAMRRTPSource* rawRTPSource;
+ resultRTPSource = rawRTPSource
+ = RawAMRRTPSource::createNew(env, RTPgs, rtpPayloadFormat,
+ isWideband, isOctetAligned,
+ isInterleaved, CRCsArePresent);
+ if (resultRTPSource == NULL) return NULL;
+
+ AMRDeinterleaver* deinterleaver
+ = AMRDeinterleaver::createNew(env, isWideband, numChannels,
+ maxInterleaveGroupSize, rawRTPSource);
+ if (deinterleaver == NULL) {
+ Medium::close(resultRTPSource);
+ resultRTPSource = NULL;
+ }
+
+ return deinterleaver;
+}
+
+
+////////// AMRBufferedPacket and AMRBufferedPacketFactory //////////
+
+// A subclass of BufferedPacket, used to separate out AMR frames.
+
+class AMRBufferedPacket: public BufferedPacket {
+public:
+ AMRBufferedPacket(RawAMRRTPSource& ourSource);
+ virtual ~AMRBufferedPacket();
+
+private: // redefined virtual functions
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+private:
+ RawAMRRTPSource& fOurSource;
+};
+
+class AMRBufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+
+///////// RawAMRRTPSource implementation ////////
+
+RawAMRRTPSource*
+RawAMRRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ Boolean isWideband, Boolean isOctetAligned,
+ Boolean isInterleaved, Boolean CRCsArePresent) {
+ return new RawAMRRTPSource(env, RTPgs, rtpPayloadFormat,
+ isWideband, isOctetAligned,
+ isInterleaved, CRCsArePresent);
+}
+
+RawAMRRTPSource
+::RawAMRRTPSource(UsageEnvironment& env,
+ Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ Boolean isWideband, Boolean isOctetAligned,
+ Boolean isInterleaved, Boolean CRCsArePresent)
+ : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat,
+ isWideband ? 16000 : 8000,
+ new AMRBufferedPacketFactory),
+ fIsWideband(isWideband), fIsOctetAligned(isOctetAligned),
+ fIsInterleaved(isInterleaved), fCRCsArePresent(CRCsArePresent),
+ fILL(0), fILP(0), fTOCSize(0), fTOC(NULL), fFrameIndex(0), fIsSynchronized(False) {
+}
+
+RawAMRRTPSource::~RawAMRRTPSource() {
+ delete[] fTOC;
+}
+
+#define FT_SPEECH_LOST 14
+#define FT_NO_DATA 15
+
+static void unpackBandwidthEfficientData(BufferedPacket* packet,
+ Boolean isWideband); // forward
+
+Boolean RawAMRRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ // If the data is 'bandwidth-efficient', first unpack it so that it's
+ // 'octet-aligned':
+ if (!fIsOctetAligned) unpackBandwidthEfficientData(packet, fIsWideband);
+
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ // There's at least a 1-byte header, containing the CMR:
+ if (packetSize < 1) return False;
+ resultSpecialHeaderSize = 1;
+
+ if (fIsInterleaved) {
+ // There's an extra byte, containing the interleave parameters:
+ if (packetSize < 2) return False;
+
+ // Get the interleaving parameters, and check them for validity:
+ unsigned char const secondByte = headerStart[1];
+ fILL = (secondByte&0xF0)>>4;
+ fILP = secondByte&0x0F;
+ if (fILP > fILL) return False; // invalid
+ ++resultSpecialHeaderSize;
+ }
+#ifdef DEBUG
+ fprintf(stderr, "packetSize: %d, ILL: %d, ILP: %d\n", packetSize, fILL, fILP);
+#endif
+ fFrameIndex = 0; // initially
+
+ // Next, there's a "Payload Table of Contents" (one byte per entry):
+ unsigned numFramesPresent = 0, numNonEmptyFramesPresent = 0;
+ unsigned tocStartIndex = resultSpecialHeaderSize;
+ Boolean F;
+ do {
+ if (resultSpecialHeaderSize >= packetSize) return False;
+ unsigned char const tocByte = headerStart[resultSpecialHeaderSize++];
+ F = (tocByte&0x80) != 0;
+ unsigned char const FT = (tocByte&0x78) >> 3;
+#ifdef DEBUG
+ unsigned char Q = (tocByte&0x04)>>2;
+ fprintf(stderr, "\tTOC entry: F %d, FT %d, Q %d\n", F, FT, Q);
+#endif
+ ++numFramesPresent;
+ if (FT != FT_SPEECH_LOST && FT != FT_NO_DATA) ++numNonEmptyFramesPresent;
+ } while (F);
+#ifdef DEBUG
+ fprintf(stderr, "TOC contains %d entries (%d non-empty)\n", numFramesPresent, numNonEmptyFramesPresent);
+#endif
+
+ // Now that we know the size of the TOC, fill in our copy:
+ if (numFramesPresent > fTOCSize) {
+ delete[] fTOC;
+ fTOC = new unsigned char[numFramesPresent];
+ }
+ fTOCSize = numFramesPresent;
+ for (unsigned i = 0; i < fTOCSize; ++i) {
+ unsigned char const tocByte = headerStart[tocStartIndex + i];
+ fTOC[i] = tocByte&0x7C; // clear everything except the F and Q fields
+ }
+
+ if (fCRCsArePresent) {
+ // 'numNonEmptyFramesPresent' CRC bytes will follow.
+ // Note: we currently don't check the CRCs for validity #####
+ resultSpecialHeaderSize += numNonEmptyFramesPresent;
+#ifdef DEBUG
+ fprintf(stderr, "Ignoring %d following CRC bytes\n", numNonEmptyFramesPresent);
+#endif
+ if (resultSpecialHeaderSize > packetSize) return False;
+ }
+#ifdef DEBUG
+ fprintf(stderr, "Total special header size: %d\n", resultSpecialHeaderSize);
+#endif
+
+ return True;
+}
+
+char const* RawAMRRTPSource::MIMEtype() const {
+ return fIsWideband ? "audio/AMR-WB" : "audio/AMR";
+}
+
+Boolean RawAMRRTPSource::hasBeenSynchronizedUsingRTCP() {
+ return fIsSynchronized;
+}
+
+
+///// AMRBufferedPacket and AMRBufferedPacketFactory implementation
+
+AMRBufferedPacket::AMRBufferedPacket(RawAMRRTPSource& ourSource)
+ : fOurSource(ourSource) {
+}
+
+AMRBufferedPacket::~AMRBufferedPacket() {
+}
+
+// The mapping from the "FT" field to frame size.
+// Values of 65535 are invalid.
+#define FT_INVALID 65535
+static unsigned short const frameBytesFromFT[16] = {
+ 12, 13, 15, 17,
+ 19, 20, 26, 31,
+ 5, FT_INVALID, FT_INVALID, FT_INVALID,
+ FT_INVALID, FT_INVALID, FT_INVALID, 0
+};
+static unsigned short const frameBytesFromFTWideband[16] = {
+ 17, 23, 32, 36,
+ 40, 46, 50, 58,
+ 60, 5, FT_INVALID, FT_INVALID,
+ FT_INVALID, FT_INVALID, 0, 0
+};
+
+unsigned AMRBufferedPacket::
+ nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
+ if (dataSize == 0) return 0; // sanity check
+
+ // The size of the AMR frame is determined by the corresponding 'FT' value
+ // in the packet's Table of Contents.
+ unsigned const tocIndex = fOurSource.frameIndex();
+ if (tocIndex >= fOurSource.TOCSize()) return 0; // sanity check
+
+ unsigned char const tocByte = fOurSource.TOC()[tocIndex];
+ unsigned char const FT = (tocByte&0x78) >> 3;
+ // ASSERT: FT < 16
+ unsigned short frameSize
+ = fOurSource.isWideband() ? frameBytesFromFTWideband[FT] : frameBytesFromFT[FT];
+ if (frameSize == FT_INVALID) {
+ // Strange TOC entry!
+ fOurSource.envir() << "AMRBufferedPacket::nextEnclosedFrameSize(): invalid FT: " << FT << "\n";
+ frameSize = 0; // This probably messes up the rest of this packet, but...
+ }
+#ifdef DEBUG
+ fprintf(stderr, "AMRBufferedPacket::nextEnclosedFrameSize(): frame #: %d, FT: %d, isWideband: %d => frameSize: %d (dataSize: %d)\n", tocIndex, FT, fOurSource.isWideband(), frameSize, dataSize);
+#endif
+ ++fOurSource.frameIndex();
+
+ if (dataSize < frameSize) return 0;
+ return frameSize;
+}
+
+BufferedPacket* AMRBufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* ourSource) {
+ return new AMRBufferedPacket((RawAMRRTPSource&)(*ourSource));
+}
+
+///////// AMRDeinterleavingBuffer /////////
+// (used to implement AMRDeinterleaver)
+
+#define AMR_MAX_FRAME_SIZE 60
+
+class AMRDeinterleavingBuffer {
+public:
+ AMRDeinterleavingBuffer(unsigned numChannels, unsigned maxInterleaveGroupSize);
+ virtual ~AMRDeinterleavingBuffer();
+
+ void deliverIncomingFrame(unsigned frameSize, RawAMRRTPSource* source,
+ struct timeval presentationTime);
+ Boolean retrieveFrame(unsigned char* to, unsigned maxSize,
+ unsigned& resultFrameSize, unsigned& resultNumTruncatedBytes,
+ u_int8_t& resultFrameHeader,
+ struct timeval& resultPresentationTime,
+ Boolean& resultIsSynchronized);
+
+ unsigned char* inputBuffer() { return fInputBuffer; }
+ unsigned inputBufferSize() const { return AMR_MAX_FRAME_SIZE; }
+
+private:
+ unsigned char* createNewBuffer();
+
+ class FrameDescriptor {
+ public:
+ FrameDescriptor();
+ virtual ~FrameDescriptor();
+
+ unsigned frameSize;
+ unsigned char* frameData;
+ u_int8_t frameHeader;
+ struct timeval presentationTime;
+ Boolean fIsSynchronized;
+ };
+
+ unsigned fNumChannels, fMaxInterleaveGroupSize;
+ FrameDescriptor* fFrames[2];
+ unsigned char fIncomingBankId; // toggles between 0 and 1
+ unsigned char fIncomingBinMax; // in the incoming bank
+ unsigned char fOutgoingBinMax; // in the outgoing bank
+ unsigned char fNextOutgoingBin;
+ Boolean fHaveSeenPackets;
+ u_int16_t fLastPacketSeqNumForGroup;
+ unsigned char* fInputBuffer;
+ struct timeval fLastRetrievedPresentationTime;
+ unsigned fNumSuccessiveSyncedFrames;
+ unsigned char fILL;
+};
+
+
+////////// AMRDeinterleaver implementation /////////
+
+AMRDeinterleaver* AMRDeinterleaver
+::createNew(UsageEnvironment& env,
+ Boolean isWideband, unsigned numChannels, unsigned maxInterleaveGroupSize,
+ RawAMRRTPSource* inputSource) {
+ return new AMRDeinterleaver(env, isWideband, numChannels, maxInterleaveGroupSize, inputSource);
+}
+
+AMRDeinterleaver::AMRDeinterleaver(UsageEnvironment& env,
+ Boolean isWideband, unsigned numChannels,
+ unsigned maxInterleaveGroupSize,
+ RawAMRRTPSource* inputSource)
+ : AMRAudioSource(env, isWideband, numChannels),
+ fInputSource(inputSource), fNeedAFrame(False) {
+ fDeinterleavingBuffer
+ = new AMRDeinterleavingBuffer(numChannels, maxInterleaveGroupSize);
+}
+
+AMRDeinterleaver::~AMRDeinterleaver() {
+ delete fDeinterleavingBuffer;
+ Medium::close(fInputSource);
+}
+
+static unsigned const uSecsPerFrame = 20000; // 20 ms
+
+void AMRDeinterleaver::doGetNextFrame() {
+ // First, try getting a frame from the deinterleaving buffer:
+ if (fDeinterleavingBuffer->retrieveFrame(fTo, fMaxSize,
+ fFrameSize, fNumTruncatedBytes,
+ fLastFrameHeader, fPresentationTime,
+ fInputSource->isSynchronized())) {
+
+ // Success!
+ fNeedAFrame = False;
+
+ fDurationInMicroseconds = uSecsPerFrame;
+
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking
+ // infinite recursion
+ afterGetting(this);
+ return;
+ }
+
+ // No luck, so ask our source for help:
+ fNeedAFrame = True;
+ if (!fInputSource->isCurrentlyAwaitingData()) {
+ fInputSource->getNextFrame(fDeinterleavingBuffer->inputBuffer(),
+ fDeinterleavingBuffer->inputBufferSize(),
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+ }
+}
+
+void AMRDeinterleaver::doStopGettingFrames() {
+ fNeedAFrame = False;
+ fInputSource->stopGettingFrames();
+}
+
+void AMRDeinterleaver
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned /*numTruncatedBytes*/,
+ struct timeval presentationTime,
+ unsigned /*durationInMicroseconds*/) {
+ AMRDeinterleaver* deinterleaver = (AMRDeinterleaver*)clientData;
+ deinterleaver->afterGettingFrame1(frameSize, presentationTime);
+}
+
+void AMRDeinterleaver
+::afterGettingFrame1(unsigned frameSize, struct timeval presentationTime) {
+ RawAMRRTPSource* source = (RawAMRRTPSource*)fInputSource;
+
+ // First, put the frame into our deinterleaving buffer:
+ fDeinterleavingBuffer->deliverIncomingFrame(frameSize, source, presentationTime);
+
+ // Then, try delivering a frame to the client (if he wants one):
+ if (fNeedAFrame) doGetNextFrame();
+}
+
+
+////////// AMRDeinterleavingBuffer implementation /////////
+
+AMRDeinterleavingBuffer
+::AMRDeinterleavingBuffer(unsigned numChannels, unsigned maxInterleaveGroupSize)
+ : fNumChannels(numChannels), fMaxInterleaveGroupSize(maxInterleaveGroupSize),
+ fIncomingBankId(0), fIncomingBinMax(0),
+ fOutgoingBinMax(0), fNextOutgoingBin(0),
+ fHaveSeenPackets(False), fNumSuccessiveSyncedFrames(0), fILL(0) {
+ // Use two banks of descriptors - one for incoming, one for outgoing
+ fFrames[0] = new FrameDescriptor[fMaxInterleaveGroupSize];
+ fFrames[1] = new FrameDescriptor[fMaxInterleaveGroupSize];
+ fInputBuffer = createNewBuffer();
+}
+
+AMRDeinterleavingBuffer::~AMRDeinterleavingBuffer() {
+ delete[] fInputBuffer;
+ delete[] fFrames[0]; delete[] fFrames[1];
+}
+
+void AMRDeinterleavingBuffer
+::deliverIncomingFrame(unsigned frameSize, RawAMRRTPSource* source,
+ struct timeval presentationTime) {
+ fILL = source->ILL();
+ unsigned char const ILP = source->ILP();
+ unsigned frameIndex = source->frameIndex();
+ unsigned short packetSeqNum = source->curPacketRTPSeqNum();
+
+ // First perform a sanity check on the parameters:
+ // (This is overkill, as the source should have already done this.)
+ if (ILP > fILL || frameIndex == 0) {
+#ifdef DEBUG
+ fprintf(stderr, "AMRDeinterleavingBuffer::deliverIncomingFrame() param sanity check failed (%d,%d,%d,%d)\n", frameSize, fILL, ILP, frameIndex);
+#endif
+ source->envir().internalError();
+ }
+
+ --frameIndex; // because it was incremented by the source when this frame was read
+ u_int8_t frameHeader;
+ if (frameIndex >= source->TOCSize()) { // sanity check
+ frameHeader = FT_NO_DATA<<3;
+ } else {
+ frameHeader = source->TOC()[frameIndex];
+ }
+
+ unsigned frameBlockIndex = frameIndex/fNumChannels;
+ unsigned frameWithinFrameBlock = frameIndex%fNumChannels;
+
+ // The input "presentationTime" was that of the first frame-block in this
+ // packet. Update it for the current frame:
+ unsigned uSecIncrement = frameBlockIndex*(fILL+1)*uSecsPerFrame;
+ presentationTime.tv_usec += uSecIncrement;
+ presentationTime.tv_sec += presentationTime.tv_usec/1000000;
+ presentationTime.tv_usec = presentationTime.tv_usec%1000000;
+
+ // Next, check whether this packet is part of a new interleave group
+ if (!fHaveSeenPackets
+ || seqNumLT(fLastPacketSeqNumForGroup, packetSeqNum + frameBlockIndex)) {
+ // We've moved to a new interleave group
+#ifdef DEBUG
+ fprintf(stderr, "AMRDeinterleavingBuffer::deliverIncomingFrame(): new interleave group\n");
+#endif
+ fHaveSeenPackets = True;
+ fLastPacketSeqNumForGroup = packetSeqNum + fILL - ILP;
+
+ // Switch the incoming and outgoing banks:
+ fIncomingBankId ^= 1;
+ unsigned char tmp = fIncomingBinMax;
+ fIncomingBinMax = fOutgoingBinMax;
+ fOutgoingBinMax = tmp;
+ fNextOutgoingBin = 0;
+ }
+
+ // Now move the incoming frame into the appropriate bin:
+ unsigned const binNumber
+ = ((ILP + frameBlockIndex*(fILL+1))*fNumChannels + frameWithinFrameBlock)
+ % fMaxInterleaveGroupSize; // the % is for sanity
+#ifdef DEBUG
+ fprintf(stderr, "AMRDeinterleavingBuffer::deliverIncomingFrame(): frameIndex %d (%d,%d) put in bank %d, bin %d (%d): size %d, header 0x%02x, presentationTime %lu.%06ld\n", frameIndex, frameBlockIndex, frameWithinFrameBlock, fIncomingBankId, binNumber, fMaxInterleaveGroupSize, frameSize, frameHeader, presentationTime.tv_sec, presentationTime.tv_usec);
+#endif
+ FrameDescriptor& inBin = fFrames[fIncomingBankId][binNumber];
+ unsigned char* curBuffer = inBin.frameData;
+ inBin.frameData = fInputBuffer;
+ inBin.frameSize = frameSize;
+ inBin.frameHeader = frameHeader;
+ inBin.presentationTime = presentationTime;
+ inBin.fIsSynchronized = ((RTPSource*)source)->RTPSource::hasBeenSynchronizedUsingRTCP();
+
+ if (curBuffer == NULL) curBuffer = createNewBuffer();
+ fInputBuffer = curBuffer;
+
+ if (binNumber >= fIncomingBinMax) {
+ fIncomingBinMax = binNumber + 1;
+ }
+}
+
+Boolean AMRDeinterleavingBuffer
+::retrieveFrame(unsigned char* to, unsigned maxSize,
+ unsigned& resultFrameSize, unsigned& resultNumTruncatedBytes,
+ u_int8_t& resultFrameHeader,
+ struct timeval& resultPresentationTime,
+ Boolean& resultIsSynchronized) {
+
+ if (fNextOutgoingBin >= fOutgoingBinMax) return False; // none left
+
+ FrameDescriptor& outBin = fFrames[fIncomingBankId^1][fNextOutgoingBin];
+ unsigned char* fromPtr = outBin.frameData;
+ unsigned char fromSize = outBin.frameSize;
+ outBin.frameSize = 0; // for the next time this bin is used
+ resultIsSynchronized = False; // by default; can be changed by:
+ if (outBin.fIsSynchronized) {
+ // Don't consider the outgoing frame to be synchronized until we've received at least a complete interleave cycle of
+ // synchronized frames. This ensures that the receiver will be getting all synchronized frames from now on.
+ if (++fNumSuccessiveSyncedFrames > fILL) {
+ resultIsSynchronized = True;
+ fNumSuccessiveSyncedFrames = fILL+1; // prevents overflow
+ }
+ } else {
+ fNumSuccessiveSyncedFrames = 0;
+ }
+
+ // Check whether this frame is missing; if so, return a FT_NO_DATA frame:
+ if (fromSize == 0) {
+ resultFrameHeader = FT_NO_DATA<<3;
+
+ // Compute this erasure frame's presentation time via extrapolation:
+ resultPresentationTime = fLastRetrievedPresentationTime;
+ resultPresentationTime.tv_usec += uSecsPerFrame;
+ if (resultPresentationTime.tv_usec >= 1000000) {
+ ++resultPresentationTime.tv_sec;
+ resultPresentationTime.tv_usec -= 1000000;
+ }
+ } else {
+ // Normal case - a frame exists:
+ resultFrameHeader = outBin.frameHeader;
+ resultPresentationTime = outBin.presentationTime;
+ }
+
+ fLastRetrievedPresentationTime = resultPresentationTime;
+
+ if (fromSize > maxSize) {
+ resultNumTruncatedBytes = fromSize - maxSize;
+ resultFrameSize = maxSize;
+ } else {
+ resultNumTruncatedBytes = 0;
+ resultFrameSize = fromSize;
+ }
+ memmove(to, fromPtr, resultFrameSize);
+#ifdef DEBUG
+ fprintf(stderr, "AMRDeinterleavingBuffer::retrieveFrame(): from bank %d, bin %d: size %d, header 0x%02x, presentationTime %lu.%06ld\n", fIncomingBankId^1, fNextOutgoingBin, resultFrameSize, resultFrameHeader, resultPresentationTime.tv_sec, resultPresentationTime.tv_usec);
+#endif
+
+ ++fNextOutgoingBin;
+ return True;
+}
+
+unsigned char* AMRDeinterleavingBuffer::createNewBuffer() {
+ return new unsigned char[inputBufferSize()];
+}
+
+AMRDeinterleavingBuffer::FrameDescriptor::FrameDescriptor()
+ : frameSize(0), frameData(NULL) {
+}
+
+AMRDeinterleavingBuffer::FrameDescriptor::~FrameDescriptor() {
+ delete[] frameData;
+}
+
+// Unpack bandwidth-aligned data to octet-aligned:
+static unsigned short const frameBitsFromFT[16] = {
+ 95, 103, 118, 134,
+ 148, 159, 204, 244,
+ 39, 0, 0, 0,
+ 0, 0, 0, 0
+};
+static unsigned short const frameBitsFromFTWideband[16] = {
+ 132, 177, 253, 285,
+ 317, 365, 397, 461,
+ 477, 40, 0, 0,
+ 0, 0, 0, 0
+};
+
+static void unpackBandwidthEfficientData(BufferedPacket* packet,
+ Boolean isWideband) {
+#ifdef DEBUG
+ fprintf(stderr, "Unpacking 'bandwidth-efficient' payload (%d bytes):\n", packet->dataSize());
+ for (unsigned j = 0; j < packet->dataSize(); ++j) {
+ fprintf(stderr, "%02x:", (packet->data())[j]);
+ }
+ fprintf(stderr, "\n");
+#endif
+ BitVector fromBV(packet->data(), 0, 8*packet->dataSize());
+
+ unsigned const toBufferSize = 2*packet->dataSize(); // conservatively large
+ unsigned char* toBuffer = new unsigned char[toBufferSize];
+ unsigned toCount = 0;
+
+ // Begin with the payload header:
+ unsigned CMR = fromBV.getBits(4);
+ toBuffer[toCount++] = CMR << 4;
+
+ // Then, run through and unpack the TOC entries:
+ while (1) {
+ unsigned toc = fromBV.getBits(6);
+ toBuffer[toCount++] = toc << 2;
+
+ if ((toc&0x20) == 0) break; // the F bit is 0
+ }
+
+ // Then, using the TOC data, unpack each frame payload:
+ unsigned const tocSize = toCount - 1;
+ for (unsigned i = 1; i <= tocSize; ++i) {
+ unsigned char tocByte = toBuffer[i];
+ unsigned char const FT = (tocByte&0x78) >> 3;
+ unsigned short frameSizeBits
+ = isWideband ? frameBitsFromFTWideband[FT] : frameBitsFromFT[FT];
+ unsigned short frameSizeBytes = (frameSizeBits+7)/8;
+
+ if (frameSizeBits > fromBV.numBitsRemaining()) {
+#ifdef DEBUG
+ fprintf(stderr, "\tWarning: Unpacking frame %d of %d: want %d bits, but only %d are available!\n", i, tocSize, frameSizeBits, fromBV.numBitsRemaining());
+#endif
+ break;
+ }
+
+ shiftBits(&toBuffer[toCount], 0, // to
+ packet->data(), fromBV.curBitIndex(), // from
+ frameSizeBits // num bits
+ );
+ fromBV.skipBits(frameSizeBits);
+ toCount += frameSizeBytes;
+ }
+
+#ifdef DEBUG
+ if (fromBV.numBitsRemaining() > 7) {
+ fprintf(stderr, "\tWarning: %d bits remain unused!\n", fromBV.numBitsRemaining());
+ }
+#endif
+
+ // Finally, replace the current packet data with the unpacked data:
+ packet->removePadding(packet->dataSize()); // throws away current packet data
+ packet->appendData(toBuffer, toCount);
+ delete[] toBuffer;
+}
diff --git a/liveMedia/AMRAudioSource.cpp b/liveMedia/AMRAudioSource.cpp
new file mode 100644
index 0000000..b5fef8c
--- /dev/null
+++ b/liveMedia/AMRAudioSource.cpp
@@ -0,0 +1,38 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A source object for AMR audio sources
+// Implementation
+
+#include "AMRAudioSource.hh"
+
+AMRAudioSource::AMRAudioSource(UsageEnvironment& env,
+ Boolean isWideband, unsigned numChannels)
+ : FramedSource(env),
+ fIsWideband(isWideband), fNumChannels(numChannels), fLastFrameHeader(0) {
+}
+
+AMRAudioSource::~AMRAudioSource() {
+}
+
+char const* AMRAudioSource::MIMEtype() const {
+ return "audio/AMR";
+}
+
+Boolean AMRAudioSource::isAMRAudioSource() const {
+ return True;
+}
diff --git a/liveMedia/AVIFileSink.cpp b/liveMedia/AVIFileSink.cpp
new file mode 100644
index 0000000..ec3f041
--- /dev/null
+++ b/liveMedia/AVIFileSink.cpp
@@ -0,0 +1,789 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A sink that generates an AVI file from a composite media session
+// Implementation
+
+#include "AVIFileSink.hh"
+#include "InputFile.hh"
+#include "OutputFile.hh"
+#include "GroupsockHelper.hh"
+
+#define fourChar(x,y,z,w) ( ((w)<<24)|((z)<<16)|((y)<<8)|(x) )/*little-endian*/
+
+#define AVIIF_LIST 0x00000001
+#define AVIIF_KEYFRAME 0x00000010
+#define AVIIF_NO_TIME 0x00000100
+#define AVIIF_COMPRESSOR 0x0FFF0000
+
+////////// AVISubsessionIOState ///////////
+// A structure used to represent the I/O state of each input 'subsession':
+
+class SubsessionBuffer {
+public:
+ SubsessionBuffer(unsigned bufferSize)
+ : fBufferSize(bufferSize) {
+ reset();
+ fData = new unsigned char[bufferSize];
+ }
+ virtual ~SubsessionBuffer() { delete[] fData; }
+ void reset() { fBytesInUse = 0; }
+ void addBytes(unsigned numBytes) { fBytesInUse += numBytes; }
+
+ unsigned char* dataStart() { return &fData[0]; }
+ unsigned char* dataEnd() { return &fData[fBytesInUse]; }
+ unsigned bytesInUse() const { return fBytesInUse; }
+ unsigned bytesAvailable() const { return fBufferSize - fBytesInUse; }
+
+ void setPresentationTime(struct timeval const& presentationTime) {
+ fPresentationTime = presentationTime;
+ }
+ struct timeval const& presentationTime() const {return fPresentationTime;}
+
+private:
+ unsigned fBufferSize;
+ struct timeval fPresentationTime;
+ unsigned char* fData;
+ unsigned fBytesInUse;
+};
+
+class AVISubsessionIOState {
+public:
+ AVISubsessionIOState(AVIFileSink& sink, MediaSubsession& subsession);
+ virtual ~AVISubsessionIOState();
+
+ void setAVIstate(unsigned subsessionIndex);
+ void setFinalAVIstate();
+
+ void afterGettingFrame(unsigned packetDataSize,
+ struct timeval presentationTime);
+ void onSourceClosure();
+
+ UsageEnvironment& envir() const { return fOurSink.envir(); }
+
+public:
+ SubsessionBuffer *fBuffer, *fPrevBuffer;
+ AVIFileSink& fOurSink;
+ MediaSubsession& fOurSubsession;
+
+ unsigned short fLastPacketRTPSeqNum;
+ Boolean fOurSourceIsActive;
+ struct timeval fPrevPresentationTime;
+ unsigned fMaxBytesPerSecond;
+ Boolean fIsVideo, fIsAudio, fIsByteSwappedAudio;
+ unsigned fAVISubsessionTag;
+ unsigned fAVICodecHandlerType;
+ unsigned fAVISamplingFrequency; // for audio
+ u_int16_t fWAVCodecTag; // for audio
+ unsigned fAVIScale;
+ unsigned fAVIRate;
+ unsigned fAVISize;
+ unsigned fNumFrames;
+ unsigned fSTRHFrameCountPosition;
+
+private:
+ void useFrame(SubsessionBuffer& buffer);
+};
+
+
+///////// AVIIndexRecord definition & implementation //////////
+
+class AVIIndexRecord {
+public:
+ AVIIndexRecord(unsigned chunkId, unsigned flags, unsigned offset, unsigned size)
+ : fNext(NULL), fChunkId(chunkId), fFlags(flags), fOffset(offset), fSize(size) {
+ }
+
+ AVIIndexRecord*& next() { return fNext; }
+ unsigned chunkId() const { return fChunkId; }
+ unsigned flags() const { return fFlags; }
+ unsigned offset() const { return fOffset; }
+ unsigned size() const { return fSize; }
+
+private:
+ AVIIndexRecord* fNext;
+ unsigned fChunkId;
+ unsigned fFlags;
+ unsigned fOffset;
+ unsigned fSize;
+};
+
+
+////////// AVIFileSink implementation //////////
+
+AVIFileSink::AVIFileSink(UsageEnvironment& env,
+ MediaSession& inputSession,
+ char const* outputFileName,
+ unsigned bufferSize,
+ unsigned short movieWidth, unsigned short movieHeight,
+ unsigned movieFPS, Boolean packetLossCompensate)
+ : Medium(env), fInputSession(inputSession),
+ fIndexRecordsHead(NULL), fIndexRecordsTail(NULL), fNumIndexRecords(0),
+ fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate),
+ fAreCurrentlyBeingPlayed(False), fNumSubsessions(0), fNumBytesWritten(0),
+ fHaveCompletedOutputFile(False),
+ fMovieWidth(movieWidth), fMovieHeight(movieHeight), fMovieFPS(movieFPS) {
+ fOutFid = OpenOutputFile(env, outputFileName);
+ if (fOutFid == NULL) return;
+
+ // Set up I/O state for each input subsession:
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ // Ignore subsessions without a data source:
+ FramedSource* subsessionSource = subsession->readSource();
+ if (subsessionSource == NULL) continue;
+
+ // If "subsession's" SDP description specified screen dimension
+ // or frame rate parameters, then use these.
+ if (subsession->videoWidth() != 0) {
+ fMovieWidth = subsession->videoWidth();
+ }
+ if (subsession->videoHeight() != 0) {
+ fMovieHeight = subsession->videoHeight();
+ }
+ if (subsession->videoFPS() != 0) {
+ fMovieFPS = subsession->videoFPS();
+ }
+
+ AVISubsessionIOState* ioState
+ = new AVISubsessionIOState(*this, *subsession);
+ subsession->miscPtr = (void*)ioState;
+
+ // Also set a 'BYE' handler for this subsession's RTCP instance:
+ if (subsession->rtcpInstance() != NULL) {
+ subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState);
+ }
+
+ ++fNumSubsessions;
+ }
+
+ // Begin by writing an AVI header:
+ addFileHeader_AVI();
+}
+
+AVIFileSink::~AVIFileSink() {
+ completeOutputFile();
+
+ // Then, stop streaming and delete each active "AVISubsessionIOState":
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ if (subsession->readSource() != NULL) subsession->readSource()->stopGettingFrames();
+
+ AVISubsessionIOState* ioState
+ = (AVISubsessionIOState*)(subsession->miscPtr);
+ if (ioState == NULL) continue;
+
+ delete ioState;
+ }
+
+ // Then, delete the index records:
+ AVIIndexRecord* cur = fIndexRecordsHead;
+ while (cur != NULL) {
+ AVIIndexRecord* next = cur->next();
+ delete cur;
+ cur = next;
+ }
+
+ // Finally, close our output file:
+ CloseOutputFile(fOutFid);
+}
+
+AVIFileSink* AVIFileSink
+::createNew(UsageEnvironment& env, MediaSession& inputSession,
+ char const* outputFileName,
+ unsigned bufferSize,
+ unsigned short movieWidth, unsigned short movieHeight,
+ unsigned movieFPS, Boolean packetLossCompensate) {
+ AVIFileSink* newSink =
+ new AVIFileSink(env, inputSession, outputFileName, bufferSize,
+ movieWidth, movieHeight, movieFPS, packetLossCompensate);
+ if (newSink == NULL || newSink->fOutFid == NULL) {
+ Medium::close(newSink);
+ return NULL;
+ }
+
+ return newSink;
+}
+
+Boolean AVIFileSink::startPlaying(afterPlayingFunc* afterFunc,
+ void* afterClientData) {
+ // Make sure we're not already being played:
+ if (fAreCurrentlyBeingPlayed) {
+ envir().setResultMsg("This sink has already been played");
+ return False;
+ }
+
+ fAreCurrentlyBeingPlayed = True;
+ fAfterFunc = afterFunc;
+ fAfterClientData = afterClientData;
+
+ return continuePlaying();
+}
+
+Boolean AVIFileSink::continuePlaying() {
+ // Run through each of our input session's 'subsessions',
+ // asking for a frame from each one:
+ Boolean haveActiveSubsessions = False;
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ FramedSource* subsessionSource = subsession->readSource();
+ if (subsessionSource == NULL) continue;
+
+ if (subsessionSource->isCurrentlyAwaitingData()) continue;
+
+ AVISubsessionIOState* ioState
+ = (AVISubsessionIOState*)(subsession->miscPtr);
+ if (ioState == NULL) continue;
+
+ haveActiveSubsessions = True;
+ unsigned char* toPtr = ioState->fBuffer->dataEnd();
+ unsigned toSize = ioState->fBuffer->bytesAvailable();
+ subsessionSource->getNextFrame(toPtr, toSize,
+ afterGettingFrame, ioState,
+ onSourceClosure, ioState);
+ }
+ if (!haveActiveSubsessions) {
+ envir().setResultMsg("No subsessions are currently active");
+ return False;
+ }
+
+ return True;
+}
+
+void AVIFileSink
+::afterGettingFrame(void* clientData, unsigned packetDataSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned /*durationInMicroseconds*/) {
+ AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData;
+ if (numTruncatedBytes > 0) {
+ ioState->envir() << "AVIFileSink::afterGettingFrame(): The input frame data was too large for our buffer. "
+ << numTruncatedBytes
+ << " bytes of trailing data was dropped! Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call.\n";
+ }
+ ioState->afterGettingFrame(packetDataSize, presentationTime);
+}
+
+void AVIFileSink::onSourceClosure(void* clientData) {
+ AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData;
+ ioState->onSourceClosure();
+}
+
+void AVIFileSink::onSourceClosure1() {
+ // Check whether *all* of the subsession sources have closed.
+ // If not, do nothing for now:
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ AVISubsessionIOState* ioState
+ = (AVISubsessionIOState*)(subsession->miscPtr);
+ if (ioState == NULL) continue;
+
+ if (ioState->fOurSourceIsActive) return; // this source hasn't closed
+ }
+
+ completeOutputFile();
+
+ // Call our specified 'after' function:
+ if (fAfterFunc != NULL) {
+ (*fAfterFunc)(fAfterClientData);
+ }
+}
+
+void AVIFileSink::onRTCPBye(void* clientData) {
+ AVISubsessionIOState* ioState = (AVISubsessionIOState*)clientData;
+
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ unsigned secsDiff
+ = timeNow.tv_sec - ioState->fOurSink.fStartTime.tv_sec;
+
+ MediaSubsession& subsession = ioState->fOurSubsession;
+ ioState->envir() << "Received RTCP \"BYE\" on \""
+ << subsession.mediumName()
+ << "/" << subsession.codecName()
+ << "\" subsession (after "
+ << secsDiff << " seconds)\n";
+
+ // Handle the reception of a RTCP "BYE" as if the source had closed:
+ ioState->onSourceClosure();
+}
+
+void AVIFileSink::addIndexRecord(AVIIndexRecord* newIndexRecord) {
+ if (fIndexRecordsHead == NULL) {
+ fIndexRecordsHead = newIndexRecord;
+ } else {
+ fIndexRecordsTail->next() = newIndexRecord;
+ }
+ fIndexRecordsTail = newIndexRecord;
+ ++fNumIndexRecords;
+}
+
+void AVIFileSink::completeOutputFile() {
+ if (fHaveCompletedOutputFile || fOutFid == NULL) return;
+
+ // Update various AVI 'size' fields to take account of the codec data that
+ // we've now written to the file:
+ unsigned maxBytesPerSecond = 0;
+ unsigned numVideoFrames = 0;
+ unsigned numAudioFrames = 0;
+
+ //// Subsession-specific fields:
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ AVISubsessionIOState* ioState
+ = (AVISubsessionIOState*)(subsession->miscPtr);
+ if (ioState == NULL) continue;
+
+ maxBytesPerSecond += ioState->fMaxBytesPerSecond;
+
+ setWord(ioState->fSTRHFrameCountPosition, ioState->fNumFrames);
+ if (ioState->fIsVideo) numVideoFrames = ioState->fNumFrames;
+ else if (ioState->fIsAudio) numAudioFrames = ioState->fNumFrames;
+ }
+
+ //// Global fields:
+ add4ByteString("idx1");
+ addWord(fNumIndexRecords*4*4); // the size of all of the index records, which come next:
+ for (AVIIndexRecord* indexRecord = fIndexRecordsHead; indexRecord != NULL; indexRecord = indexRecord->next()) {
+ addWord(indexRecord->chunkId());
+ addWord(indexRecord->flags());
+ addWord(indexRecord->offset());
+ addWord(indexRecord->size());
+ }
+
+ fRIFFSizeValue += fNumBytesWritten + fNumIndexRecords*4*4 - 4;
+ setWord(fRIFFSizePosition, fRIFFSizeValue);
+
+ setWord(fAVIHMaxBytesPerSecondPosition, maxBytesPerSecond);
+ setWord(fAVIHFrameCountPosition,
+ numVideoFrames > 0 ? numVideoFrames : numAudioFrames);
+
+ fMoviSizeValue += fNumBytesWritten;
+ setWord(fMoviSizePosition, fMoviSizeValue);
+
+ // We're done:
+ fHaveCompletedOutputFile = True;
+}
+
+
+////////// AVISubsessionIOState implementation ///////////
+
+AVISubsessionIOState::AVISubsessionIOState(AVIFileSink& sink,
+ MediaSubsession& subsession)
+ : fOurSink(sink), fOurSubsession(subsession),
+ fMaxBytesPerSecond(0), fIsVideo(False), fIsAudio(False), fIsByteSwappedAudio(False), fNumFrames(0) {
+ fBuffer = new SubsessionBuffer(fOurSink.fBufferSize);
+ fPrevBuffer = sink.fPacketLossCompensate
+ ? new SubsessionBuffer(fOurSink.fBufferSize) : NULL;
+
+ FramedSource* subsessionSource = subsession.readSource();
+ fOurSourceIsActive = subsessionSource != NULL;
+
+ fPrevPresentationTime.tv_sec = 0;
+ fPrevPresentationTime.tv_usec = 0;
+}
+
+AVISubsessionIOState::~AVISubsessionIOState() {
+ delete fBuffer; delete fPrevBuffer;
+}
+
+void AVISubsessionIOState::setAVIstate(unsigned subsessionIndex) {
+ fIsVideo = strcmp(fOurSubsession.mediumName(), "video") == 0;
+ fIsAudio = strcmp(fOurSubsession.mediumName(), "audio") == 0;
+
+ if (fIsVideo) {
+ fAVISubsessionTag
+ = fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'d','c');
+ if (strcmp(fOurSubsession.codecName(), "JPEG") == 0) {
+ fAVICodecHandlerType = fourChar('m','j','p','g');
+ } else if (strcmp(fOurSubsession.codecName(), "MP4V-ES") == 0) {
+ fAVICodecHandlerType = fourChar('D','I','V','X');
+ } else if (strcmp(fOurSubsession.codecName(), "MPV") == 0) {
+ fAVICodecHandlerType = fourChar('m','p','g','1'); // what about MPEG-2?
+ } else if (strcmp(fOurSubsession.codecName(), "H263-1998") == 0 ||
+ strcmp(fOurSubsession.codecName(), "H263-2000") == 0) {
+ fAVICodecHandlerType = fourChar('H','2','6','3');
+ } else if (strcmp(fOurSubsession.codecName(), "H264") == 0) {
+ fAVICodecHandlerType = fourChar('H','2','6','4');
+ } else {
+ fAVICodecHandlerType = fourChar('?','?','?','?');
+ }
+ fAVIScale = 1; // ??? #####
+ fAVIRate = fOurSink.fMovieFPS; // ??? #####
+ fAVISize = fOurSink.fMovieWidth*fOurSink.fMovieHeight*3; // ??? #####
+ } else if (fIsAudio) {
+ fIsByteSwappedAudio = False; // by default
+ fAVISubsessionTag
+ = fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'w','b');
+ fAVICodecHandlerType = 1; // ??? ####
+ unsigned numChannels = fOurSubsession.numChannels();
+ fAVISamplingFrequency = fOurSubsession.rtpTimestampFrequency(); // default
+ if (strcmp(fOurSubsession.codecName(), "L16") == 0) {
+ fIsByteSwappedAudio = True; // need to byte-swap data before writing it
+ fWAVCodecTag = 0x0001;
+ fAVIScale = fAVISize = 2*numChannels; // 2 bytes/sample
+ fAVIRate = fAVISize*fAVISamplingFrequency;
+ } else if (strcmp(fOurSubsession.codecName(), "L8") == 0) {
+ fWAVCodecTag = 0x0001;
+ fAVIScale = fAVISize = numChannels; // 1 byte/sample
+ fAVIRate = fAVISize*fAVISamplingFrequency;
+ } else if (strcmp(fOurSubsession.codecName(), "PCMA") == 0) {
+ fWAVCodecTag = 0x0006;
+ fAVIScale = fAVISize = numChannels; // 1 byte/sample
+ fAVIRate = fAVISize*fAVISamplingFrequency;
+ } else if (strcmp(fOurSubsession.codecName(), "PCMU") == 0) {
+ fWAVCodecTag = 0x0007;
+ fAVIScale = fAVISize = numChannels; // 1 byte/sample
+ fAVIRate = fAVISize*fAVISamplingFrequency;
+ } else if (strcmp(fOurSubsession.codecName(), "MPA") == 0) {
+ fWAVCodecTag = 0x0050;
+ fAVIScale = fAVISize = 1;
+ fAVIRate = 0; // ??? #####
+ } else {
+ fWAVCodecTag = 0x0001; // ??? #####
+ fAVIScale = fAVISize = 1;
+ fAVIRate = 0; // ??? #####
+ }
+ } else { // unknown medium
+ fAVISubsessionTag
+ = fourChar('0'+subsessionIndex/10,'0'+subsessionIndex%10,'?','?');
+ fAVICodecHandlerType = 0;
+ fAVIScale = fAVISize = 1;
+ fAVIRate = 0; // ??? #####
+ }
+}
+
+void AVISubsessionIOState::afterGettingFrame(unsigned packetDataSize,
+ struct timeval presentationTime) {
+ // Begin by checking whether there was a gap in the RTP stream.
+ // If so, try to compensate for this (if desired):
+ unsigned short rtpSeqNum
+ = fOurSubsession.rtpSource()->curPacketRTPSeqNum();
+ if (fOurSink.fPacketLossCompensate && fPrevBuffer->bytesInUse() > 0) {
+ short seqNumGap = rtpSeqNum - fLastPacketRTPSeqNum;
+ for (short i = 1; i < seqNumGap; ++i) {
+ // Insert a copy of the previous frame, to compensate for the loss:
+ useFrame(*fPrevBuffer);
+ }
+ }
+ fLastPacketRTPSeqNum = rtpSeqNum;
+
+ // Now, continue working with the frame that we just got
+ if (fBuffer->bytesInUse() == 0) {
+ fBuffer->setPresentationTime(presentationTime);
+ }
+ fBuffer->addBytes(packetDataSize);
+
+ useFrame(*fBuffer);
+ if (fOurSink.fPacketLossCompensate) {
+ // Save this frame, in case we need it for recovery:
+ SubsessionBuffer* tmp = fPrevBuffer; // assert: != NULL
+ fPrevBuffer = fBuffer;
+ fBuffer = tmp;
+ }
+ fBuffer->reset(); // for the next input
+
+ // Now, try getting more frames:
+ fOurSink.continuePlaying();
+}
+
+void AVISubsessionIOState::useFrame(SubsessionBuffer& buffer) {
+ unsigned char* const frameSource = buffer.dataStart();
+ unsigned const frameSize = buffer.bytesInUse();
+ struct timeval const& presentationTime = buffer.presentationTime();
+ if (fPrevPresentationTime.tv_usec != 0||fPrevPresentationTime.tv_sec != 0) {
+ int uSecondsDiff
+ = (presentationTime.tv_sec - fPrevPresentationTime.tv_sec)*1000000
+ + (presentationTime.tv_usec - fPrevPresentationTime.tv_usec);
+ if (uSecondsDiff > 0) {
+ unsigned bytesPerSecond = (unsigned)((frameSize*1000000.0)/uSecondsDiff);
+ if (bytesPerSecond > fMaxBytesPerSecond) {
+ fMaxBytesPerSecond = bytesPerSecond;
+ }
+ }
+ }
+ fPrevPresentationTime = presentationTime;
+
+ if (fIsByteSwappedAudio) {
+ // We need to swap the 16-bit audio samples from big-endian
+ // to little-endian order, before writing them to a file:
+ for (unsigned i = 0; i < frameSize; i += 2) {
+ unsigned char tmp = frameSource[i];
+ frameSource[i] = frameSource[i+1];
+ frameSource[i+1] = tmp;
+ }
+ }
+
+ // Add an index record for this frame:
+ AVIIndexRecord* newIndexRecord
+ = new AVIIndexRecord(fAVISubsessionTag, // chunk id
+ AVIIF_KEYFRAME, // flags
+ 4 + fOurSink.fNumBytesWritten, // offset (note: 4 == 'movi')
+ frameSize); // size
+ fOurSink.addIndexRecord(newIndexRecord);
+
+ // Write the data into the file:
+ fOurSink.fNumBytesWritten += fOurSink.addWord(fAVISubsessionTag);
+ if (strcmp(fOurSubsession.codecName(), "H264") == 0) {
+ // Insert a 'start code' (0x00 0x00 0x00 0x01) in front of the frame:
+ fOurSink.fNumBytesWritten += fOurSink.addWord(4+frameSize);
+ fOurSink.fNumBytesWritten += fOurSink.addWord(fourChar(0x00, 0x00, 0x00, 0x01));//add start code
+ } else {
+ fOurSink.fNumBytesWritten += fOurSink.addWord(frameSize);
+ }
+ fwrite(frameSource, 1, frameSize, fOurSink.fOutFid);
+ fOurSink.fNumBytesWritten += frameSize;
+ // Pad to an even length:
+ if (frameSize%2 != 0) fOurSink.fNumBytesWritten += fOurSink.addByte(0);
+
+ ++fNumFrames;
+}
+
+void AVISubsessionIOState::onSourceClosure() {
+ fOurSourceIsActive = False;
+ fOurSink.onSourceClosure1();
+}
+
+
+////////// AVI-specific implementation //////////
+
+unsigned AVIFileSink::addWord(unsigned word) {
+ // Add "word" to the file in little-endian order:
+ addByte(word); addByte(word>>8);
+ addByte(word>>16); addByte(word>>24);
+
+ return 4;
+}
+
+unsigned AVIFileSink::addHalfWord(unsigned short halfWord) {
+ // Add "halfWord" to the file in little-endian order:
+ addByte((unsigned char)halfWord); addByte((unsigned char)(halfWord>>8));
+
+ return 2;
+}
+
+unsigned AVIFileSink::addZeroWords(unsigned numWords) {
+ for (unsigned i = 0; i < numWords; ++i) {
+ addWord(0);
+ }
+
+ return numWords*4;
+}
+
+unsigned AVIFileSink::add4ByteString(char const* str) {
+ addByte(str[0]); addByte(str[1]); addByte(str[2]);
+ addByte(str[3] == '\0' ? ' ' : str[3]); // e.g., for "AVI "
+
+ return 4;
+}
+
+void AVIFileSink::setWord(unsigned filePosn, unsigned size) {
+ do {
+ if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break;
+ addWord(size);
+ if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were
+
+ return;
+ } while (0);
+
+ // One of the SeekFile64()s failed, probable because we're not a seekable file
+ envir() << "AVIFileSink::setWord(): SeekFile64 failed (err "
+ << envir().getErrno() << ")\n";
+}
+
+// Methods for writing particular file headers. Note the following macros:
+
+#define addFileHeader(tag,name) \
+ unsigned AVIFileSink::addFileHeader_##name() { \
+ add4ByteString("" #tag ""); \
+ unsigned headerSizePosn = (unsigned)TellFile64(fOutFid); addWord(0); \
+ add4ByteString("" #name ""); \
+ unsigned ignoredSize = 8;/*don't include size of tag or size fields*/ \
+ unsigned size = 12
+
+#define addFileHeader1(name) \
+ unsigned AVIFileSink::addFileHeader_##name() { \
+ add4ByteString("" #name ""); \
+ unsigned headerSizePosn = (unsigned)TellFile64(fOutFid); addWord(0); \
+ unsigned ignoredSize = 8;/*don't include size of name or size fields*/ \
+ unsigned size = 8
+
+#define addFileHeaderEnd \
+ setWord(headerSizePosn, size-ignoredSize); \
+ return size; \
+}
+
+addFileHeader(RIFF,AVI);
+ size += addFileHeader_hdrl();
+ size += addFileHeader_movi();
+ fRIFFSizePosition = headerSizePosn;
+ fRIFFSizeValue = size-ignoredSize;
+addFileHeaderEnd;
+
+addFileHeader(LIST,hdrl);
+ size += addFileHeader_avih();
+
+ // Then, add a "strl" header for each subsession (stream):
+ // (Make the video subsession (if any) come before the audio subsession.)
+ unsigned subsessionCount = 0;
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ fCurrentIOState = (AVISubsessionIOState*)(subsession->miscPtr);
+ if (fCurrentIOState == NULL) continue;
+ if (strcmp(subsession->mediumName(), "video") != 0) continue;
+
+ fCurrentIOState->setAVIstate(subsessionCount++);
+ size += addFileHeader_strl();
+ }
+ iter.reset();
+ while ((subsession = iter.next()) != NULL) {
+ fCurrentIOState = (AVISubsessionIOState*)(subsession->miscPtr);
+ if (fCurrentIOState == NULL) continue;
+ if (strcmp(subsession->mediumName(), "video") == 0) continue;
+
+ fCurrentIOState->setAVIstate(subsessionCount++);
+ size += addFileHeader_strl();
+ }
+
+ // Then add another JUNK entry
+ ++fJunkNumber;
+ size += addFileHeader_JUNK();
+addFileHeaderEnd;
+
+#define AVIF_HASINDEX 0x00000010 // Index at end of file?
+#define AVIF_MUSTUSEINDEX 0x00000020
+#define AVIF_ISINTERLEAVED 0x00000100
+#define AVIF_TRUSTCKTYPE 0x00000800 // Use CKType to find key frames?
+#define AVIF_WASCAPTUREFILE 0x00010000
+#define AVIF_COPYRIGHTED 0x00020000
+
+addFileHeader1(avih);
+ unsigned usecPerFrame = fMovieFPS == 0 ? 0 : 1000000/fMovieFPS;
+ size += addWord(usecPerFrame); // dwMicroSecPerFrame
+ fAVIHMaxBytesPerSecondPosition = (unsigned)TellFile64(fOutFid);
+ size += addWord(0); // dwMaxBytesPerSec (fill in later)
+ size += addWord(0); // dwPaddingGranularity
+ size += addWord(AVIF_TRUSTCKTYPE|AVIF_HASINDEX|AVIF_ISINTERLEAVED); // dwFlags
+ fAVIHFrameCountPosition = (unsigned)TellFile64(fOutFid);
+ size += addWord(0); // dwTotalFrames (fill in later)
+ size += addWord(0); // dwInitialFrame
+ size += addWord(fNumSubsessions); // dwStreams
+ size += addWord(fBufferSize); // dwSuggestedBufferSize
+ size += addWord(fMovieWidth); // dwWidth
+ size += addWord(fMovieHeight); // dwHeight
+ size += addZeroWords(4); // dwReserved
+addFileHeaderEnd;
+
+addFileHeader(LIST,strl);
+ size += addFileHeader_strh();
+ size += addFileHeader_strf();
+ fJunkNumber = 0;
+ size += addFileHeader_JUNK();
+addFileHeaderEnd;
+
+addFileHeader1(strh);
+ size += add4ByteString(fCurrentIOState->fIsVideo ? "vids" :
+ fCurrentIOState->fIsAudio ? "auds" :
+ "????"); // fccType
+ size += addWord(fCurrentIOState->fAVICodecHandlerType); // fccHandler
+ size += addWord(0); // dwFlags
+ size += addWord(0); // wPriority + wLanguage
+ size += addWord(0); // dwInitialFrames
+ size += addWord(fCurrentIOState->fAVIScale); // dwScale
+ size += addWord(fCurrentIOState->fAVIRate); // dwRate
+ size += addWord(0); // dwStart
+ fCurrentIOState->fSTRHFrameCountPosition = (unsigned)TellFile64(fOutFid);
+ size += addWord(0); // dwLength (fill in later)
+ size += addWord(fBufferSize); // dwSuggestedBufferSize
+ size += addWord((unsigned)-1); // dwQuality
+ size += addWord(fCurrentIOState->fAVISize); // dwSampleSize
+ size += addWord(0); // rcFrame (start)
+ if (fCurrentIOState->fIsVideo) {
+ size += addHalfWord(fMovieWidth);
+ size += addHalfWord(fMovieHeight);
+ } else {
+ size += addWord(0);
+ }
+addFileHeaderEnd;
+
+addFileHeader1(strf);
+ if (fCurrentIOState->fIsVideo) {
+ // Add a BITMAPINFO header:
+ unsigned extraDataSize = 0;
+ size += addWord(10*4 + extraDataSize); // size
+ size += addWord(fMovieWidth);
+ size += addWord(fMovieHeight);
+ size += addHalfWord(1); // planes
+ size += addHalfWord(24); // bits-per-sample #####
+ size += addWord(fCurrentIOState->fAVICodecHandlerType); // compr. type
+ size += addWord(fCurrentIOState->fAVISize);
+ size += addZeroWords(4); // ??? #####
+ // Later, add extra data here (if any) #####
+ } else if (fCurrentIOState->fIsAudio) {
+ // Add a WAVFORMATEX header:
+ size += addHalfWord(fCurrentIOState->fWAVCodecTag);
+ unsigned numChannels = fCurrentIOState->fOurSubsession.numChannels();
+ size += addHalfWord(numChannels);
+ size += addWord(fCurrentIOState->fAVISamplingFrequency);
+ size += addWord(fCurrentIOState->fAVIRate); // bytes per second
+ size += addHalfWord(fCurrentIOState->fAVISize); // block alignment
+ unsigned bitsPerSample = (fCurrentIOState->fAVISize*8)/numChannels;
+ size += addHalfWord(bitsPerSample);
+ if (strcmp(fCurrentIOState->fOurSubsession.codecName(), "MPA") == 0) {
+ // Assume MPEG layer II audio (not MP3): #####
+ size += addHalfWord(22); // wav_extra_size
+ size += addHalfWord(2); // fwHeadLayer
+ size += addWord(8*fCurrentIOState->fAVIRate); // dwHeadBitrate #####
+ size += addHalfWord(numChannels == 2 ? 1: 8); // fwHeadMode
+ size += addHalfWord(0); // fwHeadModeExt
+ size += addHalfWord(1); // wHeadEmphasis
+ size += addHalfWord(16); // fwHeadFlags
+ size += addWord(0); // dwPTSLow
+ size += addWord(0); // dwPTSHigh
+ }
+ }
+addFileHeaderEnd;
+
+#define AVI_MASTER_INDEX_SIZE 256
+
+addFileHeader1(JUNK);
+ if (fJunkNumber == 0) {
+ size += addHalfWord(4); // wLongsPerEntry
+ size += addHalfWord(0); // bIndexSubType + bIndexType
+ size += addWord(0); // nEntriesInUse #####
+ size += addWord(fCurrentIOState->fAVISubsessionTag); // dwChunkId
+ size += addZeroWords(2); // dwReserved
+ size += addZeroWords(AVI_MASTER_INDEX_SIZE*4);
+ } else {
+ size += add4ByteString("odml");
+ size += add4ByteString("dmlh");
+ unsigned wtfCount = 248;
+ size += addWord(wtfCount); // ??? #####
+ size += addZeroWords(wtfCount/4);
+ }
+addFileHeaderEnd;
+
+addFileHeader(LIST,movi);
+ fMoviSizePosition = headerSizePosn;
+ fMoviSizeValue = size-ignoredSize;
+addFileHeaderEnd;
diff --git a/liveMedia/AudioInputDevice.cpp b/liveMedia/AudioInputDevice.cpp
new file mode 100644
index 0000000..6946790
--- /dev/null
+++ b/liveMedia/AudioInputDevice.cpp
@@ -0,0 +1,45 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 2001-2003 Live Networks, Inc. All rights reserved.
+// Generic audio input device (such as a microphone, or an input sound card)
+// Implementation
+
+#include <AudioInputDevice.hh>
+
+AudioInputDevice
+::AudioInputDevice(UsageEnvironment& env, unsigned char bitsPerSample,
+ unsigned char numChannels,
+ unsigned samplingFrequency, unsigned granularityInMS)
+ : FramedSource(env), fBitsPerSample(bitsPerSample),
+ fNumChannels(numChannels), fSamplingFrequency(samplingFrequency),
+ fGranularityInMS(granularityInMS) {
+}
+
+AudioInputDevice::~AudioInputDevice() {
+}
+
+char** AudioInputDevice::allowedDeviceNames = NULL;
+
+////////// AudioPortNames implementation //////////
+
+AudioPortNames::AudioPortNames()
+: numPorts(0), portName(NULL) {
+}
+
+AudioPortNames::~AudioPortNames() {
+ for (unsigned i = 0; i < numPorts; ++i) delete portName[i];
+ delete portName;
+}
diff --git a/liveMedia/AudioRTPSink.cpp b/liveMedia/AudioRTPSink.cpp
new file mode 100644
index 0000000..1555360
--- /dev/null
+++ b/liveMedia/AudioRTPSink.cpp
@@ -0,0 +1,37 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic RTP sink for audio codecs (abstract base class)
+// Implementation
+
+#include "AudioRTPSink.hh"
+
+AudioRTPSink::AudioRTPSink(UsageEnvironment& env,
+ Groupsock* rtpgs, unsigned char rtpPayloadType,
+ unsigned rtpTimestampFrequency,
+ char const* rtpPayloadFormatName,
+ unsigned numChannels)
+ : MultiFramedRTPSink(env, rtpgs, rtpPayloadType, rtpTimestampFrequency,
+ rtpPayloadFormatName, numChannels) {
+}
+
+AudioRTPSink::~AudioRTPSink() {
+}
+
+char const* AudioRTPSink::sdpMediaType() const {
+ return "audio";
+}
diff --git a/liveMedia/Base64.cpp b/liveMedia/Base64.cpp
new file mode 100644
index 0000000..92ad4b5
--- /dev/null
+++ b/liveMedia/Base64.cpp
@@ -0,0 +1,122 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Base64 encoding and decoding
+// implementation
+
+#include "Base64.hh"
+#include <strDup.hh>
+#include <string.h>
+
+static char base64DecodeTable[256];
+
+static void initBase64DecodeTable() {
+ int i;
+ for (i = 0; i < 256; ++i) base64DecodeTable[i] = (char)0x80;
+ // default value: invalid
+
+ for (i = 'A'; i <= 'Z'; ++i) base64DecodeTable[i] = 0 + (i - 'A');
+ for (i = 'a'; i <= 'z'; ++i) base64DecodeTable[i] = 26 + (i - 'a');
+ for (i = '0'; i <= '9'; ++i) base64DecodeTable[i] = 52 + (i - '0');
+ base64DecodeTable[(unsigned char)'+'] = 62;
+ base64DecodeTable[(unsigned char)'/'] = 63;
+ base64DecodeTable[(unsigned char)'='] = 0;
+}
+
+unsigned char* base64Decode(char const* in, unsigned& resultSize,
+ Boolean trimTrailingZeros) {
+ if (in == NULL) return NULL; // sanity check
+ return base64Decode(in, strlen(in), resultSize, trimTrailingZeros);
+}
+
+unsigned char* base64Decode(char const* in, unsigned inSize,
+ unsigned& resultSize,
+ Boolean trimTrailingZeros) {
+ static Boolean haveInitializedBase64DecodeTable = False;
+ if (!haveInitializedBase64DecodeTable) {
+ initBase64DecodeTable();
+ haveInitializedBase64DecodeTable = True;
+ }
+
+ unsigned char* out = new unsigned char[inSize+1]; // ensures we have enough space
+ int k = 0;
+ int paddingCount = 0;
+ int const jMax = inSize - 3;
+ // in case "inSize" is not a multiple of 4 (although it should be)
+ for (int j = 0; j < jMax; j += 4) {
+ char inTmp[4], outTmp[4];
+ for (int i = 0; i < 4; ++i) {
+ inTmp[i] = in[i+j];
+ if (inTmp[i] == '=') ++paddingCount;
+ outTmp[i] = base64DecodeTable[(unsigned char)inTmp[i]];
+ if ((outTmp[i]&0x80) != 0) outTmp[i] = 0; // this happens only if there was an invalid character; pretend that it was 'A'
+ }
+
+ out[k++] = (outTmp[0]<<2) | (outTmp[1]>>4);
+ out[k++] = (outTmp[1]<<4) | (outTmp[2]>>2);
+ out[k++] = (outTmp[2]<<6) | outTmp[3];
+ }
+
+ if (trimTrailingZeros) {
+ while (paddingCount > 0 && k > 0 && out[k-1] == '\0') { --k; --paddingCount; }
+ }
+ resultSize = k;
+ unsigned char* result = new unsigned char[resultSize];
+ memmove(result, out, resultSize);
+ delete[] out;
+
+ return result;
+}
+
+static const char base64Char[] =
+"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+char* base64Encode(char const* origSigned, unsigned origLength) {
+ unsigned char const* orig = (unsigned char const*)origSigned; // in case any input bytes have the MSB set
+ if (orig == NULL) return NULL;
+
+ unsigned const numOrig24BitValues = origLength/3;
+ Boolean havePadding = origLength > numOrig24BitValues*3;
+ Boolean havePadding2 = origLength == numOrig24BitValues*3 + 2;
+ unsigned const numResultBytes = 4*(numOrig24BitValues + havePadding);
+ char* result = new char[numResultBytes+1]; // allow for trailing '\0'
+
+ // Map each full group of 3 input bytes into 4 output base-64 characters:
+ unsigned i;
+ for (i = 0; i < numOrig24BitValues; ++i) {
+ result[4*i+0] = base64Char[(orig[3*i]>>2)&0x3F];
+ result[4*i+1] = base64Char[(((orig[3*i]&0x3)<<4) | (orig[3*i+1]>>4))&0x3F];
+ result[4*i+2] = base64Char[((orig[3*i+1]<<2) | (orig[3*i+2]>>6))&0x3F];
+ result[4*i+3] = base64Char[orig[3*i+2]&0x3F];
+ }
+
+ // Now, take padding into account. (Note: i == numOrig24BitValues)
+ if (havePadding) {
+ result[4*i+0] = base64Char[(orig[3*i]>>2)&0x3F];
+ if (havePadding2) {
+ result[4*i+1] = base64Char[(((orig[3*i]&0x3)<<4) | (orig[3*i+1]>>4))&0x3F];
+ result[4*i+2] = base64Char[(orig[3*i+1]<<2)&0x3F];
+ } else {
+ result[4*i+1] = base64Char[((orig[3*i]&0x3)<<4)&0x3F];
+ result[4*i+2] = '=';
+ }
+ result[4*i+3] = '=';
+ }
+
+ result[numResultBytes] = '\0';
+ return result;
+}
diff --git a/liveMedia/BasicUDPSink.cpp b/liveMedia/BasicUDPSink.cpp
new file mode 100644
index 0000000..74fee56
--- /dev/null
+++ b/liveMedia/BasicUDPSink.cpp
@@ -0,0 +1,101 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simple UDP sink (i.e., without RTP or other headers added); one frame per packet
+// Implementation
+
+#include "BasicUDPSink.hh"
+#include <GroupsockHelper.hh>
+
+BasicUDPSink* BasicUDPSink::createNew(UsageEnvironment& env, Groupsock* gs,
+ unsigned maxPayloadSize) {
+ return new BasicUDPSink(env, gs, maxPayloadSize);
+}
+
+BasicUDPSink::BasicUDPSink(UsageEnvironment& env, Groupsock* gs,
+ unsigned maxPayloadSize)
+ : MediaSink(env),
+ fGS(gs), fMaxPayloadSize(maxPayloadSize) {
+ fOutputBuffer = new unsigned char[fMaxPayloadSize];
+}
+
+BasicUDPSink::~BasicUDPSink() {
+ delete[] fOutputBuffer;
+}
+
+Boolean BasicUDPSink::continuePlaying() {
+ // Record the fact that we're starting to play now:
+ gettimeofday(&fNextSendTime, NULL);
+
+ // Arrange to get and send the first payload.
+ // (This will also schedule any future sends.)
+ continuePlaying1();
+ return True;
+}
+
+void BasicUDPSink::continuePlaying1() {
+ nextTask() = NULL;
+ if (fSource != NULL) {
+ fSource->getNextFrame(fOutputBuffer, fMaxPayloadSize,
+ afterGettingFrame, this,
+ onSourceClosure, this);
+ }
+}
+
+void BasicUDPSink::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval /*presentationTime*/,
+ unsigned durationInMicroseconds) {
+ BasicUDPSink* sink = (BasicUDPSink*)clientData;
+ sink->afterGettingFrame1(frameSize, numTruncatedBytes, durationInMicroseconds);
+}
+
+void BasicUDPSink::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ unsigned durationInMicroseconds) {
+ if (numTruncatedBytes > 0) {
+ envir() << "BasicUDPSink::afterGettingFrame1(): The input frame data was too large for our spcified maximum payload size ("
+ << fMaxPayloadSize << "). "
+ << numTruncatedBytes << " bytes of trailing data was dropped!\n";
+ }
+
+ // Send the packet:
+ fGS->output(envir(), fOutputBuffer, frameSize);
+
+ // Figure out the time at which the next packet should be sent, based
+ // on the duration of the payload that we just read:
+ fNextSendTime.tv_usec += durationInMicroseconds;
+ fNextSendTime.tv_sec += fNextSendTime.tv_usec/1000000;
+ fNextSendTime.tv_usec %= 1000000;
+
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ int secsDiff = fNextSendTime.tv_sec - timeNow.tv_sec;
+ int64_t uSecondsToGo = secsDiff*1000000 + (fNextSendTime.tv_usec - timeNow.tv_usec);
+ if (uSecondsToGo < 0 || secsDiff < 0) { // sanity check: Make sure that the time-to-delay is non-negative:
+ uSecondsToGo = 0;
+ }
+
+ // Delay this amount of time:
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecondsToGo,
+ (TaskFunc*)sendNext, this);
+}
+
+// The following is called after each delay between packet sends:
+void BasicUDPSink::sendNext(void* firstArg) {
+ BasicUDPSink* sink = (BasicUDPSink*)firstArg;
+ sink->continuePlaying1();
+}
diff --git a/liveMedia/BasicUDPSource.cpp b/liveMedia/BasicUDPSource.cpp
new file mode 100644
index 0000000..e8ecf9e
--- /dev/null
+++ b/liveMedia/BasicUDPSource.cpp
@@ -0,0 +1,73 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simple UDP source, where every UDP payload is a complete frame
+// Implementation
+
+#include "BasicUDPSource.hh"
+#include <GroupsockHelper.hh>
+
+BasicUDPSource* BasicUDPSource::createNew(UsageEnvironment& env,
+ Groupsock* inputGS) {
+ return new BasicUDPSource(env, inputGS);
+}
+
+BasicUDPSource::BasicUDPSource(UsageEnvironment& env, Groupsock* inputGS)
+ : FramedSource(env), fInputGS(inputGS), fHaveStartedReading(False) {
+ // Try to use a large receive buffer (in the OS):
+ increaseReceiveBufferTo(env, inputGS->socketNum(), 50*1024);
+
+ // Make the socket non-blocking, even though it will be read from only asynchronously, when packets arrive.
+ // The reason for this is that, in some OSs, reads on a blocking socket can (allegedly) sometimes block,
+ // even if the socket was previously reported (e.g., by "select()") as having data available.
+ // (This can supposedly happen if the UDP checksum fails, for example.)
+ makeSocketNonBlocking(fInputGS->socketNum());
+}
+
+BasicUDPSource::~BasicUDPSource(){
+ envir().taskScheduler().turnOffBackgroundReadHandling(fInputGS->socketNum());
+}
+
+void BasicUDPSource::doGetNextFrame() {
+ if (!fHaveStartedReading) {
+ // Await incoming packets:
+ envir().taskScheduler().turnOnBackgroundReadHandling(fInputGS->socketNum(),
+ (TaskScheduler::BackgroundHandlerProc*)&incomingPacketHandler, this);
+ fHaveStartedReading = True;
+ }
+}
+
+void BasicUDPSource::doStopGettingFrames() {
+ envir().taskScheduler().turnOffBackgroundReadHandling(fInputGS->socketNum());
+ fHaveStartedReading = False;
+}
+
+
+void BasicUDPSource::incomingPacketHandler(BasicUDPSource* source, int /*mask*/){
+ source->incomingPacketHandler1();
+}
+
+void BasicUDPSource::incomingPacketHandler1() {
+ if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet
+
+ // Read the packet into our desired destination:
+ struct sockaddr_in fromAddress;
+ if (!fInputGS->handleRead(fTo, fMaxSize, fFrameSize, fromAddress)) return;
+
+ // Tell our client that we have new data:
+ afterGetting(this); // we're preceded by a net read; no infinite recursion
+}
diff --git a/liveMedia/BitVector.cpp b/liveMedia/BitVector.cpp
new file mode 100644
index 0000000..0374e94
--- /dev/null
+++ b/liveMedia/BitVector.cpp
@@ -0,0 +1,183 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Bit Vector data structure
+// Implementation
+
+#include "BitVector.hh"
+
+BitVector::BitVector(unsigned char* baseBytePtr,
+ unsigned baseBitOffset,
+ unsigned totNumBits) {
+ setup(baseBytePtr, baseBitOffset, totNumBits);
+}
+
+void BitVector::setup(unsigned char* baseBytePtr,
+ unsigned baseBitOffset,
+ unsigned totNumBits) {
+ fBaseBytePtr = baseBytePtr;
+ fBaseBitOffset = baseBitOffset;
+ fTotNumBits = totNumBits;
+ fCurBitIndex = 0;
+}
+
+static unsigned char const singleBitMask[8]
+ = {0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01};
+
+#define MAX_LENGTH 32
+
+void BitVector::putBits(unsigned from, unsigned numBits) {
+ if (numBits == 0) return;
+
+ unsigned char tmpBuf[4];
+ unsigned overflowingBits = 0;
+
+ if (numBits > MAX_LENGTH) {
+ numBits = MAX_LENGTH;
+ }
+
+ if (numBits > fTotNumBits - fCurBitIndex) {
+ overflowingBits = numBits - (fTotNumBits - fCurBitIndex);
+ }
+
+ tmpBuf[0] = (unsigned char)(from>>24);
+ tmpBuf[1] = (unsigned char)(from>>16);
+ tmpBuf[2] = (unsigned char)(from>>8);
+ tmpBuf[3] = (unsigned char)from;
+
+ shiftBits(fBaseBytePtr, fBaseBitOffset + fCurBitIndex, /* to */
+ tmpBuf, MAX_LENGTH - numBits, /* from */
+ numBits - overflowingBits /* num bits */);
+ fCurBitIndex += numBits - overflowingBits;
+}
+
+void BitVector::put1Bit(unsigned bit) {
+ // The following is equivalent to "putBits(..., 1)", except faster:
+ if (fCurBitIndex >= fTotNumBits) { /* overflow */
+ return;
+ } else {
+ unsigned totBitOffset = fBaseBitOffset + fCurBitIndex++;
+ unsigned char mask = singleBitMask[totBitOffset%8];
+ if (bit) {
+ fBaseBytePtr[totBitOffset/8] |= mask;
+ } else {
+ fBaseBytePtr[totBitOffset/8] &=~ mask;
+ }
+ }
+}
+
+unsigned BitVector::getBits(unsigned numBits) {
+ if (numBits == 0) return 0;
+
+ unsigned char tmpBuf[4];
+ unsigned overflowingBits = 0;
+
+ if (numBits > MAX_LENGTH) {
+ numBits = MAX_LENGTH;
+ }
+
+ if (numBits > fTotNumBits - fCurBitIndex) {
+ overflowingBits = numBits - (fTotNumBits - fCurBitIndex);
+ }
+
+ shiftBits(tmpBuf, 0, /* to */
+ fBaseBytePtr, fBaseBitOffset + fCurBitIndex, /* from */
+ numBits - overflowingBits /* num bits */);
+ fCurBitIndex += numBits - overflowingBits;
+
+ unsigned result
+ = (tmpBuf[0]<<24) | (tmpBuf[1]<<16) | (tmpBuf[2]<<8) | tmpBuf[3];
+ result >>= (MAX_LENGTH - numBits); // move into low-order part of word
+ result &= (0xFFFFFFFF << overflowingBits); // so any overflow bits are 0
+ return result;
+}
+
+unsigned BitVector::get1Bit() {
+ // The following is equivalent to "getBits(1)", except faster:
+
+ if (fCurBitIndex >= fTotNumBits) { /* overflow */
+ return 0;
+ } else {
+ unsigned totBitOffset = fBaseBitOffset + fCurBitIndex++;
+ unsigned char curFromByte = fBaseBytePtr[totBitOffset/8];
+ unsigned result = (curFromByte >> (7-(totBitOffset%8))) & 0x01;
+ return result;
+ }
+}
+
+void BitVector::skipBits(unsigned numBits) {
+ if (numBits > fTotNumBits - fCurBitIndex) { /* overflow */
+ fCurBitIndex = fTotNumBits;
+ } else {
+ fCurBitIndex += numBits;
+ }
+}
+
+unsigned BitVector::get_expGolomb() {
+ unsigned numLeadingZeroBits = 0;
+ unsigned codeStart = 1;
+
+ while (get1Bit() == 0 && fCurBitIndex < fTotNumBits) {
+ ++numLeadingZeroBits;
+ codeStart *= 2;
+ }
+
+ return codeStart - 1 + getBits(numLeadingZeroBits);
+}
+
+int BitVector::get_expGolombSigned() {
+ unsigned codeNum = get_expGolomb();
+
+ if ((codeNum&1) == 0) { // even
+ return -(int)(codeNum/2);
+ } else { // odd
+ return (codeNum+1)/2;
+ }
+}
+
+void shiftBits(unsigned char* toBasePtr, unsigned toBitOffset,
+ unsigned char const* fromBasePtr, unsigned fromBitOffset,
+ unsigned numBits) {
+ if (numBits == 0) return;
+
+ /* Note that from and to may overlap, if from>to */
+ unsigned char const* fromBytePtr = fromBasePtr + fromBitOffset/8;
+ unsigned fromBitRem = fromBitOffset%8;
+ unsigned char* toBytePtr = toBasePtr + toBitOffset/8;
+ unsigned toBitRem = toBitOffset%8;
+
+ while (numBits-- > 0) {
+ unsigned char fromBitMask = singleBitMask[fromBitRem];
+ unsigned char fromBit = (*fromBytePtr)&fromBitMask;
+ unsigned char toBitMask = singleBitMask[toBitRem];
+
+ if (fromBit != 0) {
+ *toBytePtr |= toBitMask;
+ } else {
+ *toBytePtr &=~ toBitMask;
+ }
+
+ if (++fromBitRem == 8) {
+ ++fromBytePtr;
+ fromBitRem = 0;
+ }
+ if (++toBitRem == 8) {
+ ++toBytePtr;
+ toBitRem = 0;
+ }
+ }
+}
diff --git a/liveMedia/ByteStreamFileSource.cpp b/liveMedia/ByteStreamFileSource.cpp
new file mode 100644
index 0000000..42be417
--- /dev/null
+++ b/liveMedia/ByteStreamFileSource.cpp
@@ -0,0 +1,184 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A file source that is a plain byte stream (rather than frames)
+// Implementation
+
+#include "ByteStreamFileSource.hh"
+#include "InputFile.hh"
+#include "GroupsockHelper.hh"
+
+////////// ByteStreamFileSource //////////
+
+ByteStreamFileSource*
+ByteStreamFileSource::createNew(UsageEnvironment& env, char const* fileName,
+ unsigned preferredFrameSize,
+ unsigned playTimePerFrame) {
+ FILE* fid = OpenInputFile(env, fileName);
+ if (fid == NULL) return NULL;
+
+ ByteStreamFileSource* newSource
+ = new ByteStreamFileSource(env, fid, preferredFrameSize, playTimePerFrame);
+ newSource->fFileSize = GetFileSize(fileName, fid);
+
+ return newSource;
+}
+
+ByteStreamFileSource*
+ByteStreamFileSource::createNew(UsageEnvironment& env, FILE* fid,
+ unsigned preferredFrameSize,
+ unsigned playTimePerFrame) {
+ if (fid == NULL) return NULL;
+
+ ByteStreamFileSource* newSource = new ByteStreamFileSource(env, fid, preferredFrameSize, playTimePerFrame);
+ newSource->fFileSize = GetFileSize(NULL, fid);
+
+ return newSource;
+}
+
+void ByteStreamFileSource::seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream) {
+ SeekFile64(fFid, (int64_t)byteNumber, SEEK_SET);
+
+ fNumBytesToStream = numBytesToStream;
+ fLimitNumBytesToStream = fNumBytesToStream > 0;
+}
+
+void ByteStreamFileSource::seekToByteRelative(int64_t offset, u_int64_t numBytesToStream) {
+ SeekFile64(fFid, offset, SEEK_CUR);
+
+ fNumBytesToStream = numBytesToStream;
+ fLimitNumBytesToStream = fNumBytesToStream > 0;
+}
+
+void ByteStreamFileSource::seekToEnd() {
+ SeekFile64(fFid, 0, SEEK_END);
+}
+
+ByteStreamFileSource::ByteStreamFileSource(UsageEnvironment& env, FILE* fid,
+ unsigned preferredFrameSize,
+ unsigned playTimePerFrame)
+ : FramedFileSource(env, fid), fFileSize(0), fPreferredFrameSize(preferredFrameSize),
+ fPlayTimePerFrame(playTimePerFrame), fLastPlayTime(0),
+ fHaveStartedReading(False), fLimitNumBytesToStream(False), fNumBytesToStream(0) {
+#ifndef READ_FROM_FILES_SYNCHRONOUSLY
+ makeSocketNonBlocking(fileno(fFid));
+#endif
+
+ // Test whether the file is seekable
+ fFidIsSeekable = FileIsSeekable(fFid);
+}
+
+ByteStreamFileSource::~ByteStreamFileSource() {
+ if (fFid == NULL) return;
+
+#ifndef READ_FROM_FILES_SYNCHRONOUSLY
+ envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid));
+#endif
+
+ CloseInputFile(fFid);
+}
+
+void ByteStreamFileSource::doGetNextFrame() {
+ if (feof(fFid) || ferror(fFid) || (fLimitNumBytesToStream && fNumBytesToStream == 0)) {
+ handleClosure();
+ return;
+ }
+
+#ifdef READ_FROM_FILES_SYNCHRONOUSLY
+ doReadFromFile();
+#else
+ if (!fHaveStartedReading) {
+ // Await readable data from the file:
+ envir().taskScheduler().turnOnBackgroundReadHandling(fileno(fFid),
+ (TaskScheduler::BackgroundHandlerProc*)&fileReadableHandler, this);
+ fHaveStartedReading = True;
+ }
+#endif
+}
+
+void ByteStreamFileSource::doStopGettingFrames() {
+ envir().taskScheduler().unscheduleDelayedTask(nextTask());
+#ifndef READ_FROM_FILES_SYNCHRONOUSLY
+ envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid));
+ fHaveStartedReading = False;
+#endif
+}
+
+void ByteStreamFileSource::fileReadableHandler(ByteStreamFileSource* source, int /*mask*/) {
+ if (!source->isCurrentlyAwaitingData()) {
+ source->doStopGettingFrames(); // we're not ready for the data yet
+ return;
+ }
+ source->doReadFromFile();
+}
+
+void ByteStreamFileSource::doReadFromFile() {
+ // Try to read as many bytes as will fit in the buffer provided (or "fPreferredFrameSize" if less)
+ if (fLimitNumBytesToStream && fNumBytesToStream < (u_int64_t)fMaxSize) {
+ fMaxSize = (unsigned)fNumBytesToStream;
+ }
+ if (fPreferredFrameSize > 0 && fPreferredFrameSize < fMaxSize) {
+ fMaxSize = fPreferredFrameSize;
+ }
+#ifdef READ_FROM_FILES_SYNCHRONOUSLY
+ fFrameSize = fread(fTo, 1, fMaxSize, fFid);
+#else
+ if (fFidIsSeekable) {
+ fFrameSize = fread(fTo, 1, fMaxSize, fFid);
+ } else {
+ // For non-seekable files (e.g., pipes), call "read()" rather than "fread()", to ensure that the read doesn't block:
+ fFrameSize = read(fileno(fFid), fTo, fMaxSize);
+ }
+#endif
+ if (fFrameSize == 0) {
+ handleClosure();
+ return;
+ }
+ fNumBytesToStream -= fFrameSize;
+
+ // Set the 'presentation time':
+ if (fPlayTimePerFrame > 0 && fPreferredFrameSize > 0) {
+ if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
+ // This is the first frame, so use the current time:
+ gettimeofday(&fPresentationTime, NULL);
+ } else {
+ // Increment by the play time of the previous data:
+ unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime;
+ fPresentationTime.tv_sec += uSeconds/1000000;
+ fPresentationTime.tv_usec = uSeconds%1000000;
+ }
+
+ // Remember the play time of this data:
+ fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize;
+ fDurationInMicroseconds = fLastPlayTime;
+ } else {
+ // We don't know a specific play time duration for this data,
+ // so just record the current time as being the 'presentation time':
+ gettimeofday(&fPresentationTime, NULL);
+ }
+
+ // Inform the reader that he has data:
+#ifdef READ_FROM_FILES_SYNCHRONOUSLY
+ // To avoid possible infinite recursion, we need to return to the event loop to do this:
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
+ (TaskFunc*)FramedSource::afterGetting, this);
+#else
+ // Because the file read was done from the event loop, we can call the
+ // 'after getting' function directly, without risk of infinite recursion:
+ FramedSource::afterGetting(this);
+#endif
+}
diff --git a/liveMedia/ByteStreamMemoryBufferSource.cpp b/liveMedia/ByteStreamMemoryBufferSource.cpp
new file mode 100644
index 0000000..66c77ce
--- /dev/null
+++ b/liveMedia/ByteStreamMemoryBufferSource.cpp
@@ -0,0 +1,118 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class for streaming data from a (static) memory buffer, as if it were a file.
+// Implementation
+
+#include "ByteStreamMemoryBufferSource.hh"
+#include "GroupsockHelper.hh"
+
+////////// ByteStreamMemoryBufferSource //////////
+
+ByteStreamMemoryBufferSource*
+ByteStreamMemoryBufferSource::createNew(UsageEnvironment& env,
+ u_int8_t* buffer, u_int64_t bufferSize,
+ Boolean deleteBufferOnClose,
+ unsigned preferredFrameSize,
+ unsigned playTimePerFrame) {
+ if (buffer == NULL) return NULL;
+
+ return new ByteStreamMemoryBufferSource(env, buffer, bufferSize, deleteBufferOnClose, preferredFrameSize, playTimePerFrame);
+}
+
+ByteStreamMemoryBufferSource::ByteStreamMemoryBufferSource(UsageEnvironment& env,
+ u_int8_t* buffer, u_int64_t bufferSize,
+ Boolean deleteBufferOnClose,
+ unsigned preferredFrameSize,
+ unsigned playTimePerFrame)
+ : FramedSource(env), fBuffer(buffer), fBufferSize(bufferSize), fCurIndex(0), fDeleteBufferOnClose(deleteBufferOnClose),
+ fPreferredFrameSize(preferredFrameSize), fPlayTimePerFrame(playTimePerFrame), fLastPlayTime(0),
+ fLimitNumBytesToStream(False), fNumBytesToStream(0) {
+}
+
+ByteStreamMemoryBufferSource::~ByteStreamMemoryBufferSource() {
+ if (fDeleteBufferOnClose) delete[] fBuffer;
+}
+
+void ByteStreamMemoryBufferSource::seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream) {
+ fCurIndex = byteNumber;
+ if (fCurIndex > fBufferSize) fCurIndex = fBufferSize;
+
+ fNumBytesToStream = numBytesToStream;
+ fLimitNumBytesToStream = fNumBytesToStream > 0;
+}
+
+void ByteStreamMemoryBufferSource::seekToByteRelative(int64_t offset, u_int64_t numBytesToStream) {
+ int64_t newIndex = fCurIndex + offset;
+ if (newIndex < 0) {
+ fCurIndex = 0;
+ } else {
+ fCurIndex = (u_int64_t)offset;
+ if (fCurIndex > fBufferSize) fCurIndex = fBufferSize;
+ }
+
+ fNumBytesToStream = numBytesToStream;
+ fLimitNumBytesToStream = fNumBytesToStream > 0;
+}
+
+void ByteStreamMemoryBufferSource::doGetNextFrame() {
+ if (fCurIndex >= fBufferSize || (fLimitNumBytesToStream && fNumBytesToStream == 0)) {
+ handleClosure();
+ return;
+ }
+
+ // Try to read as many bytes as will fit in the buffer provided (or "fPreferredFrameSize" if less)
+ fFrameSize = fMaxSize;
+ if (fLimitNumBytesToStream && fNumBytesToStream < (u_int64_t)fFrameSize) {
+ fFrameSize = (unsigned)fNumBytesToStream;
+ }
+ if (fPreferredFrameSize > 0 && fPreferredFrameSize < fFrameSize) {
+ fFrameSize = fPreferredFrameSize;
+ }
+
+ if (fCurIndex + fFrameSize > fBufferSize) {
+ fFrameSize = (unsigned)(fBufferSize - fCurIndex);
+ }
+
+ memmove(fTo, &fBuffer[fCurIndex], fFrameSize);
+ fCurIndex += fFrameSize;
+ fNumBytesToStream -= fFrameSize;
+
+ // Set the 'presentation time':
+ if (fPlayTimePerFrame > 0 && fPreferredFrameSize > 0) {
+ if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
+ // This is the first frame, so use the current time:
+ gettimeofday(&fPresentationTime, NULL);
+ } else {
+ // Increment by the play time of the previous data:
+ unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime;
+ fPresentationTime.tv_sec += uSeconds/1000000;
+ fPresentationTime.tv_usec = uSeconds%1000000;
+ }
+
+ // Remember the play time of this data:
+ fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize;
+ fDurationInMicroseconds = fLastPlayTime;
+ } else {
+ // We don't know a specific play time duration for this data,
+ // so just record the current time as being the 'presentation time':
+ gettimeofday(&fPresentationTime, NULL);
+ }
+
+ // Inform the downstream object that it has data:
+ FramedSource::afterGetting(this);
+}
diff --git a/liveMedia/ByteStreamMultiFileSource.cpp b/liveMedia/ByteStreamMultiFileSource.cpp
new file mode 100644
index 0000000..0425bed
--- /dev/null
+++ b/liveMedia/ByteStreamMultiFileSource.cpp
@@ -0,0 +1,134 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A source that consists of multiple byte-stream files, read sequentially.
+// (The input is an array of file names, with a terminating 'file name' of NULL.)
+// Implementation
+
+#include "ByteStreamMultiFileSource.hh"
+
+ByteStreamMultiFileSource
+::ByteStreamMultiFileSource(UsageEnvironment& env, char const** fileNameArray,
+ unsigned preferredFrameSize, unsigned playTimePerFrame)
+ : FramedSource(env),
+ fPreferredFrameSize(preferredFrameSize), fPlayTimePerFrame(playTimePerFrame),
+ fCurrentlyReadSourceNumber(0), fHaveStartedNewFile(False) {
+ // Begin by counting the number of sources (by looking for a terminating 'file name' of NULL):
+ for (fNumSources = 0; ; ++fNumSources) {
+ if (fileNameArray[fNumSources] == NULL) break;
+ }
+
+ // Next, copy the source file names into our own array:
+ fFileNameArray = new char const*[fNumSources];
+ if (fFileNameArray == NULL) return;
+ unsigned i;
+ for (i = 0; i < fNumSources; ++i) {
+ fFileNameArray[i] = strDup(fileNameArray[i]);
+ }
+
+ // Next, set up our array of component ByteStreamFileSources
+ // Don't actually create these yet; instead, do this on demand
+ fSourceArray = new ByteStreamFileSource*[fNumSources];
+ if (fSourceArray == NULL) return;
+ for (i = 0; i < fNumSources; ++i) {
+ fSourceArray[i] = NULL;
+ }
+}
+
+ByteStreamMultiFileSource::~ByteStreamMultiFileSource() {
+ unsigned i;
+ for (i = 0; i < fNumSources; ++i) {
+ Medium::close(fSourceArray[i]);
+ }
+ delete[] fSourceArray;
+
+ for (i = 0; i < fNumSources; ++i) {
+ delete[] (char*)(fFileNameArray[i]);
+ }
+ delete[] fFileNameArray;
+}
+
+ByteStreamMultiFileSource* ByteStreamMultiFileSource
+::createNew(UsageEnvironment& env, char const** fileNameArray,
+ unsigned preferredFrameSize, unsigned playTimePerFrame) {
+ ByteStreamMultiFileSource* newSource
+ = new ByteStreamMultiFileSource(env, fileNameArray,
+ preferredFrameSize, playTimePerFrame);
+
+ return newSource;
+}
+
+void ByteStreamMultiFileSource::doGetNextFrame() {
+ do {
+ // First, check whether we've run out of sources:
+ if (fCurrentlyReadSourceNumber >= fNumSources) break;
+
+ fHaveStartedNewFile = False;
+ ByteStreamFileSource*& source
+ = fSourceArray[fCurrentlyReadSourceNumber];
+ if (source == NULL) {
+ // The current source hasn't been created yet. Do this now:
+ source = ByteStreamFileSource::createNew(envir(),
+ fFileNameArray[fCurrentlyReadSourceNumber],
+ fPreferredFrameSize, fPlayTimePerFrame);
+ if (source == NULL) break;
+ fHaveStartedNewFile = True;
+ }
+
+ // (Attempt to) read from the current source.
+ source->getNextFrame(fTo, fMaxSize,
+ afterGettingFrame, this,
+ onSourceClosure, this);
+ return;
+ } while (0);
+
+ // An error occurred; consider ourselves closed:
+ handleClosure();
+}
+
+void ByteStreamMultiFileSource
+ ::afterGettingFrame(void* clientData,
+ unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ ByteStreamMultiFileSource* source
+ = (ByteStreamMultiFileSource*)clientData;
+ source->fFrameSize = frameSize;
+ source->fNumTruncatedBytes = numTruncatedBytes;
+ source->fPresentationTime = presentationTime;
+ source->fDurationInMicroseconds = durationInMicroseconds;
+ FramedSource::afterGetting(source);
+}
+
+void ByteStreamMultiFileSource::onSourceClosure(void* clientData) {
+ ByteStreamMultiFileSource* source
+ = (ByteStreamMultiFileSource*)clientData;
+ source->onSourceClosure1();
+}
+
+void ByteStreamMultiFileSource::onSourceClosure1() {
+ // This routine was called because the currently-read source was closed
+ // (probably due to EOF). Close this source down, and move to the
+ // next one:
+ ByteStreamFileSource*& source
+ = fSourceArray[fCurrentlyReadSourceNumber++];
+ Medium::close(source);
+ source = NULL;
+
+ // Try reading again:
+ doGetNextFrame();
+}
diff --git a/liveMedia/COPYING b/liveMedia/COPYING
new file mode 120000
index 0000000..012065c
--- /dev/null
+++ b/liveMedia/COPYING
@@ -0,0 +1 @@
+../COPYING
\ No newline at end of file
diff --git a/liveMedia/COPYING.LESSER b/liveMedia/COPYING.LESSER
new file mode 120000
index 0000000..ce9a3ce
--- /dev/null
+++ b/liveMedia/COPYING.LESSER
@@ -0,0 +1 @@
+../COPYING.LESSER
\ No newline at end of file
diff --git a/liveMedia/DVVideoFileServerMediaSubsession.cpp b/liveMedia/DVVideoFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..0927883
--- /dev/null
+++ b/liveMedia/DVVideoFileServerMediaSubsession.cpp
@@ -0,0 +1,103 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a DV video file.
+// Implementation
+
+#include "DVVideoFileServerMediaSubsession.hh"
+#include "DVVideoRTPSink.hh"
+#include "ByteStreamFileSource.hh"
+#include "DVVideoStreamFramer.hh"
+
+DVVideoFileServerMediaSubsession*
+DVVideoFileServerMediaSubsession::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource) {
+ return new DVVideoFileServerMediaSubsession(env, fileName, reuseFirstSource);
+}
+
+DVVideoFileServerMediaSubsession
+::DVVideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource),
+ fFileDuration(0.0) {
+}
+
+DVVideoFileServerMediaSubsession::~DVVideoFileServerMediaSubsession() {
+}
+
+FramedSource* DVVideoFileServerMediaSubsession
+::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
+ // Create the video source:
+ ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName);
+ if (fileSource == NULL) return NULL;
+ fFileSize = fileSource->fileSize();
+
+ // Create a framer for the Video Elementary Stream:
+ DVVideoStreamFramer* framer = DVVideoStreamFramer::createNew(envir(), fileSource, True/*the file source is seekable*/);
+
+ // Use the framer to figure out the file's duration:
+ unsigned frameSize;
+ double frameDuration;
+ if (framer->getFrameParameters(frameSize, frameDuration)) {
+ fFileDuration = (float)(((int64_t)fFileSize*frameDuration)/(frameSize*1000000.0));
+ estBitrate = (unsigned)((8000.0*frameSize)/frameDuration); // in kbps
+ } else {
+ estBitrate = 50000; // kbps, estimate
+ }
+
+ return framer;
+}
+
+RTPSink* DVVideoFileServerMediaSubsession::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* /*inputSource*/) {
+ return DVVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+}
+
+char const* DVVideoFileServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) {
+ return ((DVVideoRTPSink*)rtpSink)->auxSDPLineFromFramer((DVVideoStreamFramer*)inputSource);
+}
+
+float DVVideoFileServerMediaSubsession::duration() const {
+ return fFileDuration;
+}
+
+void DVVideoFileServerMediaSubsession
+::seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes) {
+ // First, get the file source from "inputSource" (a framer):
+ DVVideoStreamFramer* framer = (DVVideoStreamFramer*)inputSource;
+ ByteStreamFileSource* fileSource = (ByteStreamFileSource*)(framer->inputSource());
+
+ // Then figure out where to seek to within the file:
+ if (fFileDuration > 0.0) {
+ u_int64_t seekByteNumber = (u_int64_t)(((int64_t)fFileSize*seekNPT)/fFileDuration);
+ numBytes = (u_int64_t)(((int64_t)fFileSize*streamDuration)/fFileDuration);
+ fileSource->seekToByteAbsolute(seekByteNumber, numBytes);
+ }
+}
+
+void DVVideoFileServerMediaSubsession
+::setStreamSourceDuration(FramedSource* inputSource, double streamDuration, u_int64_t& numBytes) {
+ // First, get the file source from "inputSource" (a framer):
+ DVVideoStreamFramer* framer = (DVVideoStreamFramer*)inputSource;
+ ByteStreamFileSource* fileSource = (ByteStreamFileSource*)(framer->inputSource());
+
+ // Then figure out how many bytes to limit the streaming to:
+ if (fFileDuration > 0.0) {
+ numBytes = (u_int64_t)(((int64_t)fFileSize*streamDuration)/fFileDuration);
+ fileSource->seekToByteRelative(0, numBytes);
+ }
+}
diff --git a/liveMedia/DVVideoRTPSink.cpp b/liveMedia/DVVideoRTPSink.cpp
new file mode 100644
index 0000000..e60e6fc
--- /dev/null
+++ b/liveMedia/DVVideoRTPSink.cpp
@@ -0,0 +1,95 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for DV video (RFC 3189)
+// (Thanks to Ben Hutchings for prototyping this.)
+// Implementation
+
+#include "DVVideoRTPSink.hh"
+
+////////// DVVideoRTPSink implementation //////////
+
+DVVideoRTPSink
+::DVVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat)
+ : VideoRTPSink(env, RTPgs, rtpPayloadFormat, 90000, "DV"),
+ fFmtpSDPLine(NULL) {
+}
+
+DVVideoRTPSink::~DVVideoRTPSink() {
+ delete[] fFmtpSDPLine;
+}
+
+DVVideoRTPSink*
+DVVideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) {
+ return new DVVideoRTPSink(env, RTPgs, rtpPayloadFormat);
+}
+
+Boolean DVVideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
+ // Our source must be an appropriate framer:
+ return source.isDVVideoStreamFramer();
+}
+
+void DVVideoRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* /*frameStart*/,
+ unsigned /*numBytesInFrame*/,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the RTP 'M' ('marker') bit:
+ setMarkerBit();
+ }
+
+ // Also set the RTP timestamp:
+ setTimestamp(framePresentationTime);
+}
+
+unsigned DVVideoRTPSink::computeOverflowForNewFrame(unsigned newFrameSize) const {
+ unsigned initialOverflow = MultiFramedRTPSink::computeOverflowForNewFrame(newFrameSize);
+
+ // Adjust (increase) this overflow, if necessary, so that the amount of frame data that we use is an integral number
+ // of DIF blocks:
+ unsigned numFrameBytesUsed = newFrameSize - initialOverflow;
+ initialOverflow += numFrameBytesUsed%DV_DIF_BLOCK_SIZE;
+
+ return initialOverflow;
+}
+
+char const* DVVideoRTPSink::auxSDPLine() {
+ // Generate a new "a=fmtp:" line each time, using parameters from
+ // our framer source (in case they've changed since the last time that
+ // we were called):
+ DVVideoStreamFramer* framerSource = (DVVideoStreamFramer*)fSource;
+ if (framerSource == NULL) return NULL; // we don't yet have a source
+
+ return auxSDPLineFromFramer(framerSource);
+}
+
+char const* DVVideoRTPSink::auxSDPLineFromFramer(DVVideoStreamFramer* framerSource) {
+ char const* const profileName = framerSource->profileName();
+ if (profileName == NULL) return NULL;
+
+ char const* const fmtpSDPFmt = "a=fmtp:%d encode=%s;audio=bundled\r\n";
+ unsigned fmtpSDPFmtSize = strlen(fmtpSDPFmt)
+ + 3 // max payload format code length
+ + strlen(profileName);
+ delete[] fFmtpSDPLine; // if it already exists
+ fFmtpSDPLine = new char[fmtpSDPFmtSize];
+ sprintf(fFmtpSDPLine, fmtpSDPFmt, rtpPayloadType(), profileName);
+
+ return fFmtpSDPLine;
+}
diff --git a/liveMedia/DVVideoRTPSource.cpp b/liveMedia/DVVideoRTPSource.cpp
new file mode 100644
index 0000000..9afe213
--- /dev/null
+++ b/liveMedia/DVVideoRTPSource.cpp
@@ -0,0 +1,65 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// DV Video RTP Sources
+// Implementation
+
+#include "DVVideoRTPSource.hh"
+
+DVVideoRTPSource*
+DVVideoRTPSource::createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new DVVideoRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency);
+}
+
+DVVideoRTPSource::DVVideoRTPSource(UsageEnvironment& env,
+ Groupsock* rtpGS,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, rtpGS,
+ rtpPayloadFormat, rtpTimestampFrequency) {
+}
+
+DVVideoRTPSource::~DVVideoRTPSource() {
+}
+
+#define DV_DIF_BLOCK_SIZE 80
+#define DV_SECTION_HEADER 0x1F
+
+Boolean DVVideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned const packetSize = packet->dataSize();
+ if (packetSize < DV_DIF_BLOCK_SIZE) return False; // TARFU!
+
+ u_int8_t const* data = packet->data();
+ fCurrentPacketBeginsFrame = data[0] == DV_SECTION_HEADER && (data[1]&0xf8) == 0 && data[2] == 0; // thanks to Ben Hutchings
+
+ // The RTP "M" (marker) bit indicates the last fragment of a frame:
+ fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
+
+ // There is no special header
+ resultSpecialHeaderSize = 0;
+ return True;
+}
+
+char const* DVVideoRTPSource::MIMEtype() const {
+ return "video/DV";
+}
+
diff --git a/liveMedia/DVVideoStreamFramer.cpp b/liveMedia/DVVideoStreamFramer.cpp
new file mode 100644
index 0000000..8d94758
--- /dev/null
+++ b/liveMedia/DVVideoStreamFramer.cpp
@@ -0,0 +1,220 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that parses a DV input stream into DV frames to deliver to the downstream object
+// Implementation
+// (Thanks to Ben Hutchings for his help, including a prototype implementation.)
+
+#include "DVVideoStreamFramer.hh"
+#include "GroupsockHelper.hh"
+
+////////// DVVideoStreamFramer implementation //////////
+
+DVVideoStreamFramer::DVVideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean sourceIsSeekable, Boolean leavePresentationTimesUnmodified)
+ : FramedFilter(env, inputSource),
+ fLeavePresentationTimesUnmodified(leavePresentationTimesUnmodified),
+ fOurProfile(NULL), fInitialBlocksPresent(False), fSourceIsSeekable(sourceIsSeekable) {
+ fTo = NULL; // hack used when reading "fSavedInitialBlocks"
+ // Use the current wallclock time as the initial 'presentation time':
+ gettimeofday(&fNextFramePresentationTime, NULL);
+}
+
+DVVideoStreamFramer::~DVVideoStreamFramer() {
+}
+
+DVVideoStreamFramer*
+DVVideoStreamFramer::createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean sourceIsSeekable, Boolean leavePresentationTimesUnmodified) {
+ return new DVVideoStreamFramer(env, inputSource, sourceIsSeekable, leavePresentationTimesUnmodified);
+}
+
+// Define the parameters for the profiles that we understand:
+struct DVVideoProfile {
+ char const* name;
+ unsigned apt;
+ unsigned sType;
+ unsigned sequenceCount;
+ unsigned channelCount;
+ unsigned dvFrameSize; // in bytes (== sequenceCount*channelCount*(DV_NUM_BLOCKS_PER_SEQUENCE*DV_DIF_BLOCK_SIZE i.e. 12000))
+ double frameDuration; // duration of the above, in microseconds. (1000000/this == frame rate)
+};
+
+static DVVideoProfile const profiles[] = {
+ { "SD-VCR/525-60", 0, 0x00, 10, 1, 120000, (1000000*1001)/30000.0 },
+ { "SD-VCR/625-50", 0, 0x00, 12, 1, 144000, 1000000/25.0 },
+ { "314M-25/525-60", 1, 0x00, 10, 1, 120000, (1000000*1001)/30000.0 },
+ { "314M-25/625-50", 1, 0x00, 12, 1, 144000, 1000000/25.0 },
+ { "314M-50/525-60", 1, 0x04, 10, 2, 240000, (1000000*1001)/30000.0 },
+ { "314M-50/625-50", 1, 0x04, 12, 2, 288000, 1000000/25.0 },
+ { "370M/1080-60i", 1, 0x14, 10, 4, 480000, (1000000*1001)/30000.0 },
+ { "370M/1080-50i", 1, 0x14, 12, 4, 576000, 1000000/25.0 },
+ { "370M/720-60p", 1, 0x18, 10, 2, 240000, (1000000*1001)/60000.0 },
+ { "370M/720-50p", 1, 0x18, 12, 2, 288000, 1000000/50.0 },
+ { NULL, 0, 0, 0, 0, 0, 0.0 }
+ };
+
+
+char const* DVVideoStreamFramer::profileName() {
+ if (fOurProfile == NULL) getProfile();
+
+ return fOurProfile != NULL ? ((DVVideoProfile const*)fOurProfile)->name : NULL;
+}
+
+Boolean DVVideoStreamFramer::getFrameParameters(unsigned& frameSize, double& frameDuration) {
+ if (fOurProfile == NULL) getProfile();
+ if (fOurProfile == NULL) return False;
+
+ frameSize = ((DVVideoProfile const*)fOurProfile)->dvFrameSize;
+ frameDuration = ((DVVideoProfile const*)fOurProfile)->frameDuration;
+ return True;
+}
+
+void DVVideoStreamFramer::getProfile() {
+ // To determine the stream's profile, we need to first read a chunk of data that we can parse:
+ fInputSource->getNextFrame(fSavedInitialBlocks, DV_SAVED_INITIAL_BLOCKS_SIZE,
+ afterGettingFrame, this, FramedSource::handleClosure, this);
+
+ // Handle events until the requested data arrives:
+ envir().taskScheduler().doEventLoop(&fInitialBlocksPresent);
+}
+
+Boolean DVVideoStreamFramer::isDVVideoStreamFramer() const {
+ return True;
+}
+
+void DVVideoStreamFramer::doGetNextFrame() {
+ fFrameSize = 0; // initially, until we deliver data
+
+ // If we have saved initial blocks (and won't be seeking back to re-read this data), so use this data first.
+ if (fInitialBlocksPresent && !fSourceIsSeekable) {
+ // For simplicity, we require the downstream object's buffer to be >= this data's size:
+ if (fMaxSize < DV_SAVED_INITIAL_BLOCKS_SIZE) {
+ fNumTruncatedBytes = fMaxSize;
+ afterGetting(this);
+ return;
+ }
+
+ memmove(fTo, fSavedInitialBlocks, DV_SAVED_INITIAL_BLOCKS_SIZE);
+ fFrameSize = DV_SAVED_INITIAL_BLOCKS_SIZE;
+ fTo += DV_SAVED_INITIAL_BLOCKS_SIZE;
+ fInitialBlocksPresent = False; // for the future
+ }
+
+ // Arrange to read the (rest of the) requested data.
+ // (But first, make sure that we read an integral multiple of the DV block size.)
+ fMaxSize -= fMaxSize%DV_DIF_BLOCK_SIZE;
+ getAndDeliverData();
+}
+
+#define DV_SMALLEST_POSSIBLE_FRAME_SIZE 120000
+
+void DVVideoStreamFramer::getAndDeliverData() {
+ unsigned const totFrameSize
+ = fOurProfile != NULL ? ((DVVideoProfile const*)fOurProfile)->dvFrameSize : DV_SMALLEST_POSSIBLE_FRAME_SIZE;
+ unsigned totBytesToDeliver = totFrameSize < fMaxSize ? totFrameSize : fMaxSize;
+ unsigned numBytesToRead = totBytesToDeliver - fFrameSize;
+
+ fInputSource->getNextFrame(fTo, numBytesToRead, afterGettingFrame, this, FramedSource::handleClosure, this);
+}
+
+void DVVideoStreamFramer::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
+ DVVideoStreamFramer* source = (DVVideoStreamFramer*)clientData;
+ source->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime);
+}
+
+#define DVSectionId(n) ptr[(n)*DV_DIF_BLOCK_SIZE + 0]
+#define DVData(n,i) ptr[(n)*DV_DIF_BLOCK_SIZE + 3+(i)]
+
+#define DV_SECTION_HEADER 0x1F
+#define DV_PACK_HEADER_10 0x3F
+#define DV_PACK_HEADER_12 0xBF
+#define DV_SECTION_VAUX_MIN 0x50
+#define DV_SECTION_VAUX_MAX 0x5F
+#define DV_PACK_VIDEO_SOURCE 60
+#ifndef MILLION
+#define MILLION 1000000
+#endif
+
+void DVVideoStreamFramer::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime) {
+ if (fOurProfile == NULL && frameSize >= DV_SAVED_INITIAL_BLOCKS_SIZE) {
+ // (Try to) parse this data enough to figure out its profile.
+ // We assume that the data begins on a (80-byte) block boundary, but not necessarily on a (150-block) sequence boundary.
+ // We therefore scan each 80-byte block, until we find the 6-block header that begins a sequence:
+ u_int8_t const* data = (fTo == NULL) ? fSavedInitialBlocks : fTo;
+ for (u_int8_t const* ptr = data; ptr + 6*DV_DIF_BLOCK_SIZE <= &data[DV_SAVED_INITIAL_BLOCKS_SIZE]; ptr += DV_DIF_BLOCK_SIZE) {
+ // Check whether "ptr" points to an appropriate header:
+ u_int8_t const sectionHeader = DVSectionId(0);
+ u_int8_t const sectionVAUX = DVSectionId(5);
+ u_int8_t const packHeaderNum = DVData(0,0);
+
+ if (sectionHeader == DV_SECTION_HEADER
+ && (packHeaderNum == DV_PACK_HEADER_10 || packHeaderNum == DV_PACK_HEADER_12)
+ && (sectionVAUX >= DV_SECTION_VAUX_MIN && sectionVAUX <= DV_SECTION_VAUX_MAX)) {
+ // This data begins a sequence; look up the DV profile from this:
+ u_int8_t const apt = DVData(0,1)&0x07;
+ u_int8_t const sType = DVData(5,48)&0x1F;
+ u_int8_t const sequenceCount = (packHeaderNum == DV_PACK_HEADER_10) ? 10 : 12;
+
+ // Use these three parameters (apt, sType, sequenceCount) to look up the DV profile:
+ for (DVVideoProfile const* profile = profiles; profile->name != NULL; ++profile) {
+ if (profile->apt == apt && profile->sType == sType && profile->sequenceCount == sequenceCount) {
+ fOurProfile = profile;
+ break;
+ }
+ }
+ break; // because we found a correct sequence header (even if we don't happen to define a profile for it)
+ }
+ }
+ }
+
+ if (fTo != NULL) { // There is a downstream object; complete delivery to it (or read more data, if necessary)
+ unsigned const totFrameSize
+ = fOurProfile != NULL ? ((DVVideoProfile const*)fOurProfile)->dvFrameSize : DV_SMALLEST_POSSIBLE_FRAME_SIZE;
+ fFrameSize += frameSize;
+ fTo += frameSize;
+ fPresentationTime = presentationTime; // by default; may get changed below
+
+ if (fFrameSize < totFrameSize && fFrameSize < fMaxSize && numTruncatedBytes == 0) {
+ // We have more data to deliver; get it now:
+ getAndDeliverData();
+ } else {
+ // We're done delivering this DV frame (but check for truncation):
+ fNumTruncatedBytes = totFrameSize - fFrameSize;
+
+ if (fOurProfile != NULL) {
+ // Also set the presentation time, and increment it for next time,
+ // based on the length of this frame:
+ if (!fLeavePresentationTimesUnmodified) fPresentationTime = fNextFramePresentationTime;
+
+ DVVideoProfile const* ourProfile =(DVVideoProfile const*)fOurProfile;
+ double durationInMicroseconds = (fFrameSize*ourProfile->frameDuration)/ourProfile->dvFrameSize;
+ fDurationInMicroseconds = (unsigned)durationInMicroseconds;
+ fNextFramePresentationTime.tv_usec += fDurationInMicroseconds;
+ fNextFramePresentationTime.tv_sec += fNextFramePresentationTime.tv_usec/MILLION;
+ fNextFramePresentationTime.tv_usec %= MILLION;
+ }
+
+ afterGetting(this);
+ }
+ } else {
+ // We read data into our special buffer; signal that it has arrived:
+ fInitialBlocksPresent = True;
+ }
+}
diff --git a/liveMedia/DeviceSource.cpp b/liveMedia/DeviceSource.cpp
new file mode 100644
index 0000000..95a1077
--- /dev/null
+++ b/liveMedia/DeviceSource.cpp
@@ -0,0 +1,156 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A template for a MediaSource encapsulating an audio/video input device
+//
+// NOTE: Sections of this code labeled "%%% TO BE WRITTEN %%%" are incomplete, and need to be written by the programmer
+// (depending on the features of the particular device).
+// Implementation
+
+#include "DeviceSource.hh"
+#include <GroupsockHelper.hh> // for "gettimeofday()"
+
+DeviceSource*
+DeviceSource::createNew(UsageEnvironment& env,
+ DeviceParameters params) {
+ return new DeviceSource(env, params);
+}
+
+EventTriggerId DeviceSource::eventTriggerId = 0;
+
+unsigned DeviceSource::referenceCount = 0;
+
+DeviceSource::DeviceSource(UsageEnvironment& env,
+ DeviceParameters params)
+ : FramedSource(env), fParams(params) {
+ if (referenceCount == 0) {
+ // Any global initialization of the device would be done here:
+ //%%% TO BE WRITTEN %%%
+ }
+ ++referenceCount;
+
+ // Any instance-specific initialization of the device would be done here:
+ //%%% TO BE WRITTEN %%%
+
+ // We arrange here for our "deliverFrame" member function to be called
+ // whenever the next frame of data becomes available from the device.
+ //
+ // If the device can be accessed as a readable socket, then one easy way to do this is using a call to
+ // envir().taskScheduler().turnOnBackgroundReadHandling( ... )
+ // (See examples of this call in the "liveMedia" directory.)
+ //
+ // If, however, the device *cannot* be accessed as a readable socket, then instead we can implement it using 'event triggers':
+ // Create an 'event trigger' for this device (if it hasn't already been done):
+ if (eventTriggerId == 0) {
+ eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
+ }
+}
+
+DeviceSource::~DeviceSource() {
+ // Any instance-specific 'destruction' (i.e., resetting) of the device would be done here:
+ //%%% TO BE WRITTEN %%%
+
+ --referenceCount;
+ if (referenceCount == 0) {
+ // Any global 'destruction' (i.e., resetting) of the device would be done here:
+ //%%% TO BE WRITTEN %%%
+
+ // Reclaim our 'event trigger'
+ envir().taskScheduler().deleteEventTrigger(eventTriggerId);
+ eventTriggerId = 0;
+ }
+}
+
+void DeviceSource::doGetNextFrame() {
+ // This function is called (by our 'downstream' object) when it asks for new data.
+
+ // Note: If, for some reason, the source device stops being readable (e.g., it gets closed), then you do the following:
+ if (0 /* the source stops being readable */ /*%%% TO BE WRITTEN %%%*/) {
+ handleClosure();
+ return;
+ }
+
+ // If a new frame of data is immediately available to be delivered, then do this now:
+ if (0 /* a new frame of data is immediately available to be delivered*/ /*%%% TO BE WRITTEN %%%*/) {
+ deliverFrame();
+ }
+
+ // No new data is immediately available to be delivered. We don't do anything more here.
+ // Instead, our event trigger must be called (e.g., from a separate thread) when new data becomes available.
+}
+
+void DeviceSource::deliverFrame0(void* clientData) {
+ ((DeviceSource*)clientData)->deliverFrame();
+}
+
+void DeviceSource::deliverFrame() {
+ // This function is called when new frame data is available from the device.
+ // We deliver this data by copying it to the 'downstream' object, using the following parameters (class members):
+ // 'in' parameters (these should *not* be modified by this function):
+ // fTo: The frame data is copied to this address.
+ // (Note that the variable "fTo" is *not* modified. Instead,
+ // the frame data is copied to the address pointed to by "fTo".)
+ // fMaxSize: This is the maximum number of bytes that can be copied
+ // (If the actual frame is larger than this, then it should
+ // be truncated, and "fNumTruncatedBytes" set accordingly.)
+ // 'out' parameters (these are modified by this function):
+ // fFrameSize: Should be set to the delivered frame size (<= fMaxSize).
+ // fNumTruncatedBytes: Should be set iff the delivered frame would have been
+ // bigger than "fMaxSize", in which case it's set to the number of bytes
+ // that have been omitted.
+ // fPresentationTime: Should be set to the frame's presentation time
+ // (seconds, microseconds). This time must be aligned with 'wall-clock time' - i.e., the time that you would get
+ // by calling "gettimeofday()".
+ // fDurationInMicroseconds: Should be set to the frame's duration, if known.
+ // If, however, the device is a 'live source' (e.g., encoded from a camera or microphone), then we probably don't need
+ // to set this variable, because - in this case - data will never arrive 'early'.
+ // Note the code below.
+
+ if (!isCurrentlyAwaitingData()) return; // we're not ready for the data yet
+
+ u_int8_t* newFrameDataStart = (u_int8_t*)0xDEADBEEF; //%%% TO BE WRITTEN %%%
+ unsigned newFrameSize = 0; //%%% TO BE WRITTEN %%%
+
+ // Deliver the data here:
+ if (newFrameSize > fMaxSize) {
+ fFrameSize = fMaxSize;
+ fNumTruncatedBytes = newFrameSize - fMaxSize;
+ } else {
+ fFrameSize = newFrameSize;
+ }
+ gettimeofday(&fPresentationTime, NULL); // If you have a more accurate time - e.g., from an encoder - then use that instead.
+ // If the device is *not* a 'live source' (e.g., it comes instead from a file or buffer), then set "fDurationInMicroseconds" here.
+ memmove(fTo, newFrameDataStart, fFrameSize);
+
+ // After delivering the data, inform the reader that it is now available:
+ FramedSource::afterGetting(this);
+}
+
+
+// The following code would be called to signal that a new frame of data has become available.
+// This (unlike other "LIVE555 Streaming Media" library code) may be called from a separate thread.
+// (Note, however, that "triggerEvent()" cannot be called with the same 'event trigger id' from different threads.
+// Also, if you want to have multiple device threads, each one using a different 'event trigger id', then you will need
+// to make "eventTriggerId" a non-static member variable of "DeviceSource".)
+void signalNewFrameData() {
+ TaskScheduler* ourScheduler = NULL; //%%% TO BE WRITTEN %%%
+ DeviceSource* ourDevice = NULL; //%%% TO BE WRITTEN %%%
+
+ if (ourScheduler != NULL) { // sanity check
+ ourScheduler->triggerEvent(DeviceSource::eventTriggerId, ourDevice);
+ }
+}
diff --git a/liveMedia/DigestAuthentication.cpp b/liveMedia/DigestAuthentication.cpp
new file mode 100644
index 0000000..1ccaf45
--- /dev/null
+++ b/liveMedia/DigestAuthentication.cpp
@@ -0,0 +1,173 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class used for digest authentication.
+// Implementation
+
+#include "DigestAuthentication.hh"
+#include "ourMD5.hh"
+#include <strDup.hh>
+#include <GroupsockHelper.hh> // for gettimeofday()
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+Authenticator::Authenticator() {
+ assign(NULL, NULL, NULL, NULL, False);
+}
+
+Authenticator::Authenticator(char const* username, char const* password, Boolean passwordIsMD5) {
+ assign(NULL, NULL, username, password, passwordIsMD5);
+}
+
+Authenticator::Authenticator(const Authenticator& orig) {
+ assign(orig.realm(), orig.nonce(), orig.username(), orig.password(), orig.fPasswordIsMD5);
+}
+
+Authenticator& Authenticator::operator=(const Authenticator& rightSide) {
+ if (&rightSide != this) {
+ reset();
+ assign(rightSide.realm(), rightSide.nonce(),
+ rightSide.username(), rightSide.password(), rightSide.fPasswordIsMD5);
+ }
+
+ return *this;
+}
+
+Boolean Authenticator::operator<(const Authenticator* rightSide) {
+ // Returns True if "rightSide" is 'newer' than us:
+ if (rightSide != NULL && rightSide != this &&
+ (rightSide->realm() != NULL || rightSide->nonce() != NULL ||
+ username() == NULL || password() == NULL ||
+ strcmp(rightSide->username(), username()) != 0 ||
+ strcmp(rightSide->password(), password()) != 0)) {
+ return True;
+ }
+
+ return False;
+}
+
+Authenticator::~Authenticator() {
+ reset();
+}
+
+void Authenticator::reset() {
+ resetRealmAndNonce();
+ resetUsernameAndPassword();
+}
+
+void Authenticator::setRealmAndNonce(char const* realm, char const* nonce) {
+ resetRealmAndNonce();
+ assignRealmAndNonce(realm, nonce);
+}
+
+void Authenticator::setRealmAndRandomNonce(char const* realm) {
+ resetRealmAndNonce();
+
+ // Construct data to seed the random nonce:
+ struct {
+ struct timeval timestamp;
+ unsigned counter;
+ } seedData;
+ gettimeofday(&seedData.timestamp, NULL);
+ static unsigned counter = 0;
+ seedData.counter = ++counter;
+
+ // Use MD5 to compute a 'random' nonce from this seed data:
+ char nonceBuf[33];
+ our_MD5Data((unsigned char*)(&seedData), sizeof seedData, nonceBuf);
+
+ assignRealmAndNonce(realm, nonceBuf);
+}
+
+void Authenticator::setUsernameAndPassword(char const* username,
+ char const* password,
+ Boolean passwordIsMD5) {
+ resetUsernameAndPassword();
+ assignUsernameAndPassword(username, password, passwordIsMD5);
+}
+
+char const* Authenticator::computeDigestResponse(char const* cmd,
+ char const* url) const {
+ // The "response" field is computed as:
+ // md5(md5(<username>:<realm>:<password>):<nonce>:md5(<cmd>:<url>))
+ // or, if "fPasswordIsMD5" is True:
+ // md5(<password>:<nonce>:md5(<cmd>:<url>))
+ char ha1Buf[33];
+ if (fPasswordIsMD5) {
+ strncpy(ha1Buf, password(), 32);
+ ha1Buf[32] = '\0'; // just in case
+ } else {
+ unsigned const ha1DataLen = strlen(username()) + 1
+ + strlen(realm()) + 1 + strlen(password());
+ unsigned char* ha1Data = new unsigned char[ha1DataLen+1];
+ sprintf((char*)ha1Data, "%s:%s:%s", username(), realm(), password());
+ our_MD5Data(ha1Data, ha1DataLen, ha1Buf);
+ delete[] ha1Data;
+ }
+
+ unsigned const ha2DataLen = strlen(cmd) + 1 + strlen(url);
+ unsigned char* ha2Data = new unsigned char[ha2DataLen+1];
+ sprintf((char*)ha2Data, "%s:%s", cmd, url);
+ char ha2Buf[33];
+ our_MD5Data(ha2Data, ha2DataLen, ha2Buf);
+ delete[] ha2Data;
+
+ unsigned const digestDataLen
+ = 32 + 1 + strlen(nonce()) + 1 + 32;
+ unsigned char* digestData = new unsigned char[digestDataLen+1];
+ sprintf((char*)digestData, "%s:%s:%s",
+ ha1Buf, nonce(), ha2Buf);
+ char const* result = our_MD5Data(digestData, digestDataLen, NULL);
+ delete[] digestData;
+ return result;
+}
+
+void Authenticator::reclaimDigestResponse(char const* responseStr) const {
+ delete[](char*)responseStr;
+}
+
+void Authenticator::resetRealmAndNonce() {
+ delete[] fRealm; fRealm = NULL;
+ delete[] fNonce; fNonce = NULL;
+}
+
+void Authenticator::resetUsernameAndPassword() {
+ delete[] fUsername; fUsername = NULL;
+ delete[] fPassword; fPassword = NULL;
+ fPasswordIsMD5 = False;
+}
+
+void Authenticator::assignRealmAndNonce(char const* realm, char const* nonce) {
+ fRealm = strDup(realm);
+ fNonce = strDup(nonce);
+}
+
+void Authenticator::assignUsernameAndPassword(char const* username, char const* password, Boolean passwordIsMD5) {
+ if (username == NULL) username = "";
+ if (password == NULL) password = "";
+
+ fUsername = strDup(username);
+ fPassword = strDup(password);
+ fPasswordIsMD5 = passwordIsMD5;
+}
+
+void Authenticator::assign(char const* realm, char const* nonce,
+ char const* username, char const* password, Boolean passwordIsMD5) {
+ assignRealmAndNonce(realm, nonce);
+ assignUsernameAndPassword(username, password, passwordIsMD5);
+}
diff --git a/liveMedia/EBMLNumber.cpp b/liveMedia/EBMLNumber.cpp
new file mode 100644
index 0000000..c8c1898
--- /dev/null
+++ b/liveMedia/EBMLNumber.cpp
@@ -0,0 +1,150 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// EBML numbers (ids and sizes)
+// Implementation
+
+#include "EBMLNumber.hh"
+
+EBMLNumber::EBMLNumber(Boolean stripLeading1)
+ : stripLeading1(stripLeading1), len(0) {
+}
+
+EBMLNumber::~EBMLNumber() {
+}
+
+char* EBMLNumber::hexString() const {
+ static char printBuf[2*EBML_NUMBER_MAX_LEN + 1];
+
+ char* to = printBuf;
+ for (unsigned i = 0; i < len; ++i) {
+ sprintf(to, "%02X", data[i]);
+ to += 2;
+ }
+
+ return printBuf;
+}
+
+u_int64_t EBMLNumber::val() const {
+ u_int64_t result = 0;
+
+ for (unsigned i = 0; i < len; ++i) {
+ result = result*256 + data[i];
+ }
+
+ return result;
+}
+
+EBMLId::EBMLId()
+ : EBMLNumber(False) {
+}
+
+EBMLId::~EBMLId() {
+}
+
+char const* EBMLId::stringName() const {
+ switch (val()) {
+ case MATROSKA_ID_EBML: { return "EBML"; }
+ case MATROSKA_ID_VOID: { return "Void"; }
+ case MATROSKA_ID_CRC_32: { return "CRC-32"; }
+ case MATROSKA_ID_SEGMENT: { return "Segment"; }
+ case MATROSKA_ID_SEEK_HEAD: { return "Seek Head"; }
+ case MATROSKA_ID_SEEK: { return "Seek"; }
+ case MATROSKA_ID_SEEK_ID: { return "Seek ID"; }
+ case MATROSKA_ID_SEEK_POSITION: { return "Seek Position"; }
+ case MATROSKA_ID_INFO: { return "Segment Info"; }
+ case MATROSKA_ID_SEGMENT_UID: { return "Segment UID"; }
+ case MATROSKA_ID_DURATION: { return "Segment Duration"; }
+ case MATROSKA_ID_TIMECODE_SCALE: { return "Timecode Scale"; }
+ case MATROSKA_ID_DATE_UTC: { return "Date (UTC)"; }
+ case MATROSKA_ID_TITLE: { return "Title"; }
+ case MATROSKA_ID_MUXING_APP: { return "Muxing App"; }
+ case MATROSKA_ID_WRITING_APP: { return "Writing App"; }
+ case MATROSKA_ID_CLUSTER: { return "Cluster"; }
+ case MATROSKA_ID_TIMECODE: { return "TimeCode"; }
+ case MATROSKA_ID_POSITION: { return "Position"; }
+ case MATROSKA_ID_PREV_SIZE: { return "Prev. Size"; }
+ case MATROSKA_ID_SIMPLEBLOCK: { return "SimpleBlock"; }
+ case MATROSKA_ID_BLOCK_GROUP: { return "Block Group"; }
+ case MATROSKA_ID_BLOCK: { return "Block"; }
+ case MATROSKA_ID_BLOCK_DURATION: { return "Block Duration"; }
+ case MATROSKA_ID_REFERENCE_BLOCK: { return "Reference Block"; }
+ case MATROSKA_ID_TRACKS: { return "Tracks"; }
+ case MATROSKA_ID_TRACK_ENTRY: { return "Track Entry"; }
+ case MATROSKA_ID_TRACK_NUMBER: { return "Track Number"; }
+ case MATROSKA_ID_TRACK_UID: { return "Track UID"; }
+ case MATROSKA_ID_TRACK_TYPE: { return "Track Type"; }
+ case MATROSKA_ID_FLAG_ENABLED: { return "Flag Enabled"; }
+ case MATROSKA_ID_FLAG_DEFAULT: { return "Flag Default"; }
+ case MATROSKA_ID_FLAG_FORCED: { return "Flag Forced"; }
+ case MATROSKA_ID_FLAG_LACING: { return "Flag Lacing"; }
+ case MATROSKA_ID_MIN_CACHE: { return "Min Cache"; }
+ case MATROSKA_ID_DEFAULT_DURATION: { return "Default Duration"; }
+ case MATROSKA_ID_TRACK_TIMECODE_SCALE: { return "Track Timecode Scale"; }
+ case MATROSKA_ID_MAX_BLOCK_ADDITION_ID: { return "Max Block Addition ID"; }
+ case MATROSKA_ID_NAME: { return "Name"; }
+ case MATROSKA_ID_LANGUAGE: { return "Language"; }
+ case MATROSKA_ID_CODEC: { return "Codec ID"; }
+ case MATROSKA_ID_CODEC_PRIVATE: { return "Codec Private"; }
+ case MATROSKA_ID_CODEC_NAME: { return "Codec Name"; }
+ case MATROSKA_ID_CODEC_DECODE_ALL: { return "Codec Decode All"; }
+ case MATROSKA_ID_VIDEO: { return "Video Settings"; }
+ case MATROSKA_ID_FLAG_INTERLACED: { return "Flag Interlaced"; }
+ case MATROSKA_ID_PIXEL_WIDTH: { return "Pixel Width"; }
+ case MATROSKA_ID_PIXEL_HEIGHT: { return "Pixel Height"; }
+ case MATROSKA_ID_DISPLAY_WIDTH: { return "Display Width"; }
+ case MATROSKA_ID_DISPLAY_HEIGHT: { return "Display Height"; }
+ case MATROSKA_ID_DISPLAY_UNIT: { return "Display Unit"; }
+ case MATROSKA_ID_AUDIO: { return "Audio Settings"; }
+ case MATROSKA_ID_SAMPLING_FREQUENCY: { return "Sampling Frequency"; }
+ case MATROSKA_ID_OUTPUT_SAMPLING_FREQUENCY: { return "Output Sampling Frequency"; }
+ case MATROSKA_ID_CHANNELS: { return "Channels"; }
+ case MATROSKA_ID_BIT_DEPTH: { return "Bit Depth"; }
+ case MATROSKA_ID_CONTENT_ENCODINGS: { return "Content Encodings"; }
+ case MATROSKA_ID_CONTENT_ENCODING: { return "Content Encoding"; }
+ case MATROSKA_ID_CONTENT_COMPRESSION: { return "Content Compression"; }
+ case MATROSKA_ID_CONTENT_COMP_ALGO: { return "Content Compression Algorithm"; }
+ case MATROSKA_ID_CONTENT_COMP_SETTINGS: { return "Content Compression Settings"; }
+ case MATROSKA_ID_CONTENT_ENCRYPTION: { return "Content Encryption"; }
+ case MATROSKA_ID_ATTACHMENTS: { return "Attachments"; }
+ case MATROSKA_ID_ATTACHED_FILE: { return "Attached File"; }
+ case MATROSKA_ID_FILE_DESCRIPTION: { return "File Description"; }
+ case MATROSKA_ID_FILE_NAME: { return "File Name"; }
+ case MATROSKA_ID_FILE_MIME_TYPE: { return "File MIME Type"; }
+ case MATROSKA_ID_FILE_DATA: { return "File Data"; }
+ case MATROSKA_ID_FILE_UID: { return "File UID"; }
+ case MATROSKA_ID_CUES: { return "Cues"; }
+ case MATROSKA_ID_CUE_POINT: { return "Cue Point"; }
+ case MATROSKA_ID_CUE_TIME: { return "Cue Time"; }
+ case MATROSKA_ID_CUE_TRACK_POSITIONS: { return "Cue Track Positions"; }
+ case MATROSKA_ID_CUE_TRACK: { return "Cue Track"; }
+ case MATROSKA_ID_CUE_CLUSTER_POSITION: { return "Cue Cluster Position"; }
+ case MATROSKA_ID_CUE_BLOCK_NUMBER: { return "Cue Block Number"; }
+ case MATROSKA_ID_TAGS: { return "Tags"; }
+ case MATROSKA_ID_SEEK_PRE_ROLL: { return "SeekPreRoll"; }
+ case MATROSKA_ID_CODEC_DELAY: { return "CodecDelay"; }
+ case MATROSKA_ID_DISCARD_PADDING: { return "DiscardPadding"; }
+ default: { return "*****unknown*****"; }
+ }
+}
+
+EBMLDataSize::EBMLDataSize()
+ : EBMLNumber(True) {
+}
+
+EBMLDataSize::~EBMLDataSize() {
+}
diff --git a/liveMedia/EBMLNumber.hh b/liveMedia/EBMLNumber.hh
new file mode 100644
index 0000000..2029da3
--- /dev/null
+++ b/liveMedia/EBMLNumber.hh
@@ -0,0 +1,144 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// EBML numbers (ids and sizes)
+// C++ header
+
+#ifndef _EBML_NUMBER_HH
+#define _EBML_NUMBER_HH
+
+#include "NetCommon.h"
+#include "Boolean.hh"
+#include <stdio.h>
+
+#define EBML_NUMBER_MAX_LEN 8
+
+class EBMLNumber {
+public:
+ EBMLNumber(Boolean stripLeading1 = True);
+ virtual ~EBMLNumber();
+
+ u_int64_t val() const;
+ char* hexString() const; // used for debugging
+ Boolean operator==(u_int64_t arg2) const { return val() == arg2; }
+ Boolean operator!=(u_int64_t arg2) const { return !(*this == arg2); }
+
+public:
+ Boolean stripLeading1;
+ unsigned len;
+ u_int8_t data[EBML_NUMBER_MAX_LEN];
+};
+
+// Definitions of some Matroska/EBML IDs (including the ones that we check for):
+#define MATROSKA_ID_EBML 0x1A45DFA3
+#define MATROSKA_ID_VOID 0xEC
+#define MATROSKA_ID_CRC_32 0xBF
+#define MATROSKA_ID_SEGMENT 0x18538067
+#define MATROSKA_ID_SEEK_HEAD 0x114D9B74
+#define MATROSKA_ID_SEEK 0x4DBB
+#define MATROSKA_ID_SEEK_ID 0x53AB
+#define MATROSKA_ID_SEEK_POSITION 0x53AC
+#define MATROSKA_ID_INFO 0x1549A966
+#define MATROSKA_ID_SEGMENT_UID 0x73A4
+#define MATROSKA_ID_TIMECODE_SCALE 0x2AD7B1
+#define MATROSKA_ID_DURATION 0x4489
+#define MATROSKA_ID_DATE_UTC 0x4461
+#define MATROSKA_ID_TITLE 0x7BA9
+#define MATROSKA_ID_MUXING_APP 0x4D80
+#define MATROSKA_ID_WRITING_APP 0x5741
+#define MATROSKA_ID_CLUSTER 0x1F43B675
+#define MATROSKA_ID_TIMECODE 0xE7
+#define MATROSKA_ID_POSITION 0xA7
+#define MATROSKA_ID_PREV_SIZE 0xAB
+#define MATROSKA_ID_SIMPLEBLOCK 0xA3
+#define MATROSKA_ID_BLOCK_GROUP 0xA0
+#define MATROSKA_ID_BLOCK 0xA1
+#define MATROSKA_ID_BLOCK_DURATION 0x9B
+#define MATROSKA_ID_REFERENCE_BLOCK 0xFB
+#define MATROSKA_ID_TRACKS 0x1654AE6B
+#define MATROSKA_ID_TRACK_ENTRY 0xAE
+#define MATROSKA_ID_TRACK_NUMBER 0xD7
+#define MATROSKA_ID_TRACK_UID 0x73C5
+#define MATROSKA_ID_TRACK_TYPE 0x83
+#define MATROSKA_ID_FLAG_ENABLED 0xB9
+#define MATROSKA_ID_FLAG_DEFAULT 0x88
+#define MATROSKA_ID_FLAG_FORCED 0x55AA
+#define MATROSKA_ID_FLAG_LACING 0x9C
+#define MATROSKA_ID_MIN_CACHE 0x6DE7
+#define MATROSKA_ID_DEFAULT_DURATION 0x23E383
+#define MATROSKA_ID_TRACK_TIMECODE_SCALE 0x23314F
+#define MATROSKA_ID_MAX_BLOCK_ADDITION_ID 0x55EE
+#define MATROSKA_ID_NAME 0x536E
+#define MATROSKA_ID_LANGUAGE 0x22B59C
+#define MATROSKA_ID_CODEC 0x86
+#define MATROSKA_ID_CODEC_PRIVATE 0x63A2
+#define MATROSKA_ID_CODEC_NAME 0x258688
+#define MATROSKA_ID_CODEC_DECODE_ALL 0xAA
+#define MATROSKA_ID_VIDEO 0xE0
+#define MATROSKA_ID_FLAG_INTERLACED 0x9A
+#define MATROSKA_ID_PIXEL_WIDTH 0xB0
+#define MATROSKA_ID_PIXEL_HEIGHT 0xBA
+#define MATROSKA_ID_DISPLAY_WIDTH 0x54B0
+#define MATROSKA_ID_DISPLAY_HEIGHT 0x54BA
+#define MATROSKA_ID_DISPLAY_UNIT 0x54B2
+#define MATROSKA_ID_AUDIO 0xE1
+#define MATROSKA_ID_SAMPLING_FREQUENCY 0xB5
+#define MATROSKA_ID_OUTPUT_SAMPLING_FREQUENCY 0x78B5
+#define MATROSKA_ID_CHANNELS 0x9F
+#define MATROSKA_ID_BIT_DEPTH 0x6264
+#define MATROSKA_ID_CONTENT_ENCODINGS 0x6D80
+#define MATROSKA_ID_CONTENT_ENCODING 0x6240
+#define MATROSKA_ID_CONTENT_COMPRESSION 0x5034
+#define MATROSKA_ID_CONTENT_COMP_ALGO 0x4254
+#define MATROSKA_ID_CONTENT_COMP_SETTINGS 0x4255
+#define MATROSKA_ID_CONTENT_ENCRYPTION 0x5035
+#define MATROSKA_ID_ATTACHMENTS 0x1941A469
+#define MATROSKA_ID_ATTACHED_FILE 0x61A7
+#define MATROSKA_ID_FILE_DESCRIPTION 0x467E
+#define MATROSKA_ID_FILE_NAME 0x466E
+#define MATROSKA_ID_FILE_MIME_TYPE 0x4660
+#define MATROSKA_ID_FILE_DATA 0x465C
+#define MATROSKA_ID_FILE_UID 0x46AE
+#define MATROSKA_ID_CUES 0x1C53BB6B
+#define MATROSKA_ID_CUE_POINT 0xBB
+#define MATROSKA_ID_CUE_TIME 0xB3
+#define MATROSKA_ID_CUE_TRACK_POSITIONS 0xB7
+#define MATROSKA_ID_CUE_TRACK 0xF7
+#define MATROSKA_ID_CUE_CLUSTER_POSITION 0xF1
+#define MATROSKA_ID_CUE_BLOCK_NUMBER 0x5378
+#define MATROSKA_ID_TAGS 0x1254C367
+#define MATROSKA_ID_SEEK_PRE_ROLL 0x56BB
+#define MATROSKA_ID_CODEC_DELAY 0x56AA
+#define MATROSKA_ID_DISCARD_PADDING 0x75A2
+#define MATROSKA_ID_COLOR_SPACE 0x2EB524
+#define MATROSKA_ID_PRIMARIES 0x55BB
+
+class EBMLId: public EBMLNumber {
+public:
+ EBMLId();
+ virtual ~EBMLId();
+
+ char const* stringName() const; // used for debugging
+};
+
+class EBMLDataSize: public EBMLNumber {
+public:
+ EBMLDataSize();
+ virtual ~EBMLDataSize();
+};
+
+#endif
diff --git a/liveMedia/FileServerMediaSubsession.cpp b/liveMedia/FileServerMediaSubsession.cpp
new file mode 100644
index 0000000..0eefd8e
--- /dev/null
+++ b/liveMedia/FileServerMediaSubsession.cpp
@@ -0,0 +1,34 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a file.
+// Implementation
+
+#include "FileServerMediaSubsession.hh"
+
+FileServerMediaSubsession
+::FileServerMediaSubsession(UsageEnvironment& env, char const* fileName,
+ Boolean reuseFirstSource)
+ : OnDemandServerMediaSubsession(env, reuseFirstSource),
+ fFileSize(0) {
+ fFileName = strDup(fileName);
+}
+
+FileServerMediaSubsession::~FileServerMediaSubsession() {
+ delete[] (char*)fFileName;
+}
diff --git a/liveMedia/FileSink.cpp b/liveMedia/FileSink.cpp
new file mode 100644
index 0000000..8efd90a
--- /dev/null
+++ b/liveMedia/FileSink.cpp
@@ -0,0 +1,151 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// File sinks
+// Implementation
+
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE)
+#include <io.h>
+#include <fcntl.h>
+#endif
+#include "FileSink.hh"
+#include "GroupsockHelper.hh"
+#include "OutputFile.hh"
+
+////////// FileSink //////////
+
+FileSink::FileSink(UsageEnvironment& env, FILE* fid, unsigned bufferSize,
+ char const* perFrameFileNamePrefix)
+ : MediaSink(env), fOutFid(fid), fBufferSize(bufferSize), fSamePresentationTimeCounter(0) {
+ fBuffer = new unsigned char[bufferSize];
+ if (perFrameFileNamePrefix != NULL) {
+ fPerFrameFileNamePrefix = strDup(perFrameFileNamePrefix);
+ fPerFrameFileNameBuffer = new char[strlen(perFrameFileNamePrefix) + 100];
+ } else {
+ fPerFrameFileNamePrefix = NULL;
+ fPerFrameFileNameBuffer = NULL;
+ }
+ fPrevPresentationTime.tv_sec = ~0; fPrevPresentationTime.tv_usec = 0;
+}
+
+FileSink::~FileSink() {
+ delete[] fPerFrameFileNameBuffer;
+ delete[] fPerFrameFileNamePrefix;
+ delete[] fBuffer;
+ if (fOutFid != NULL) fclose(fOutFid);
+}
+
+FileSink* FileSink::createNew(UsageEnvironment& env, char const* fileName,
+ unsigned bufferSize, Boolean oneFilePerFrame) {
+ do {
+ FILE* fid;
+ char const* perFrameFileNamePrefix;
+ if (oneFilePerFrame) {
+ // Create the fid for each frame
+ fid = NULL;
+ perFrameFileNamePrefix = fileName;
+ } else {
+ // Normal case: create the fid once
+ fid = OpenOutputFile(env, fileName);
+ if (fid == NULL) break;
+ perFrameFileNamePrefix = NULL;
+ }
+
+ return new FileSink(env, fid, bufferSize, perFrameFileNamePrefix);
+ } while (0);
+
+ return NULL;
+}
+
+Boolean FileSink::continuePlaying() {
+ if (fSource == NULL) return False;
+
+ fSource->getNextFrame(fBuffer, fBufferSize,
+ afterGettingFrame, this,
+ onSourceClosure, this);
+
+ return True;
+}
+
+void FileSink::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned /*durationInMicroseconds*/) {
+ FileSink* sink = (FileSink*)clientData;
+ sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime);
+}
+
+void FileSink::addData(unsigned char const* data, unsigned dataSize,
+ struct timeval presentationTime) {
+ if (fPerFrameFileNameBuffer != NULL && fOutFid == NULL) {
+ // Special case: Open a new file on-the-fly for this frame
+ if (presentationTime.tv_usec == fPrevPresentationTime.tv_usec &&
+ presentationTime.tv_sec == fPrevPresentationTime.tv_sec) {
+ // The presentation time is unchanged from the previous frame, so we add a 'counter'
+ // suffix to the file name, to distinguish them:
+ sprintf(fPerFrameFileNameBuffer, "%s-%lu.%06lu-%u", fPerFrameFileNamePrefix,
+ presentationTime.tv_sec, presentationTime.tv_usec, ++fSamePresentationTimeCounter);
+ } else {
+ sprintf(fPerFrameFileNameBuffer, "%s-%lu.%06lu", fPerFrameFileNamePrefix,
+ presentationTime.tv_sec, presentationTime.tv_usec);
+ fPrevPresentationTime = presentationTime; // for next time
+ fSamePresentationTimeCounter = 0; // for next time
+ }
+ fOutFid = OpenOutputFile(envir(), fPerFrameFileNameBuffer);
+ }
+
+ // Write to our file:
+#ifdef TEST_LOSS
+ static unsigned const framesPerPacket = 10;
+ static unsigned const frameCount = 0;
+ static Boolean const packetIsLost;
+ if ((frameCount++)%framesPerPacket == 0) {
+ packetIsLost = (our_random()%10 == 0); // simulate 10% packet loss #####
+ }
+
+ if (!packetIsLost)
+#endif
+ if (fOutFid != NULL && data != NULL) {
+ fwrite(data, 1, dataSize, fOutFid);
+ }
+}
+
+void FileSink::afterGettingFrame(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime) {
+ if (numTruncatedBytes > 0) {
+ envir() << "FileSink::afterGettingFrame(): The input frame data was too large for our buffer size ("
+ << fBufferSize << "). "
+ << numTruncatedBytes << " bytes of trailing data was dropped! Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call to at least "
+ << fBufferSize + numTruncatedBytes << "\n";
+ }
+ addData(fBuffer, frameSize, presentationTime);
+
+ if (fOutFid == NULL || fflush(fOutFid) == EOF) {
+ // The output file has closed. Handle this the same way as if the input source had closed:
+ if (fSource != NULL) fSource->stopGettingFrames();
+ onSourceClosure();
+ return;
+ }
+
+ if (fPerFrameFileNameBuffer != NULL) {
+ if (fOutFid != NULL) { fclose(fOutFid); fOutFid = NULL; }
+ }
+
+ // Then try getting the next frame:
+ continuePlaying();
+}
diff --git a/liveMedia/FramedFileSource.cpp b/liveMedia/FramedFileSource.cpp
new file mode 100644
index 0000000..9f0f033
--- /dev/null
+++ b/liveMedia/FramedFileSource.cpp
@@ -0,0 +1,30 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Framed File Sources
+// Implementation
+
+#include "FramedFileSource.hh"
+
+////////// FramedFileSource //////////
+
+FramedFileSource::FramedFileSource(UsageEnvironment& env, FILE* fid)
+ : FramedSource(env), fFid(fid) {
+}
+
+FramedFileSource::~FramedFileSource() {
+}
diff --git a/liveMedia/FramedFilter.cpp b/liveMedia/FramedFilter.cpp
new file mode 100644
index 0000000..4b64372
--- /dev/null
+++ b/liveMedia/FramedFilter.cpp
@@ -0,0 +1,59 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Framed Filters
+// Implementation
+
+#include "FramedFilter.hh"
+
+////////// FramedFilter //////////
+#include <string.h>
+
+void FramedFilter::detachInputSource() {
+ if (fInputSource != NULL) {
+ fInputSource->stopGettingFrames();
+ reassignInputSource(NULL);
+ }
+}
+
+FramedFilter::FramedFilter(UsageEnvironment& env,
+ FramedSource* inputSource)
+ : FramedSource(env),
+ fInputSource(inputSource) {
+}
+
+FramedFilter::~FramedFilter() {
+ Medium::close(fInputSource);
+}
+
+// Default implementations of needed virtual functions. These merely
+// call the same function in the input source - i.e., act like a 'null filter
+
+char const* FramedFilter::MIMEtype() const {
+ if (fInputSource == NULL) return "";
+
+ return fInputSource->MIMEtype();
+}
+
+void FramedFilter::getAttributes() const {
+ if (fInputSource != NULL) fInputSource->getAttributes();
+}
+
+void FramedFilter::doStopGettingFrames() {
+ FramedSource::doStopGettingFrames();
+ if (fInputSource != NULL) fInputSource->stopGettingFrames();
+}
diff --git a/liveMedia/FramedSource.cpp b/liveMedia/FramedSource.cpp
new file mode 100644
index 0000000..f572492
--- /dev/null
+++ b/liveMedia/FramedSource.cpp
@@ -0,0 +1,126 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Framed Sources
+// Implementation
+
+#include "FramedSource.hh"
+#include <stdlib.h>
+
+////////// FramedSource //////////
+
+FramedSource::FramedSource(UsageEnvironment& env)
+ : MediaSource(env),
+ fAfterGettingFunc(NULL), fAfterGettingClientData(NULL),
+ fOnCloseFunc(NULL), fOnCloseClientData(NULL),
+ fIsCurrentlyAwaitingData(False) {
+ fPresentationTime.tv_sec = fPresentationTime.tv_usec = 0; // initially
+}
+
+FramedSource::~FramedSource() {
+}
+
+Boolean FramedSource::isFramedSource() const {
+ return True;
+}
+
+Boolean FramedSource::lookupByName(UsageEnvironment& env, char const* sourceName,
+ FramedSource*& resultSource) {
+ resultSource = NULL; // unless we succeed
+
+ MediaSource* source;
+ if (!MediaSource::lookupByName(env, sourceName, source)) return False;
+
+ if (!source->isFramedSource()) {
+ env.setResultMsg(sourceName, " is not a framed source");
+ return False;
+ }
+
+ resultSource = (FramedSource*)source;
+ return True;
+}
+
+void FramedSource::getNextFrame(unsigned char* to, unsigned maxSize,
+ afterGettingFunc* afterGettingFunc,
+ void* afterGettingClientData,
+ onCloseFunc* onCloseFunc,
+ void* onCloseClientData) {
+ // Make sure we're not already being read:
+ if (fIsCurrentlyAwaitingData) {
+ envir() << "FramedSource[" << this << "]::getNextFrame(): attempting to read more than once at the same time!\n";
+ envir().internalError();
+ }
+
+ fTo = to;
+ fMaxSize = maxSize;
+ fNumTruncatedBytes = 0; // by default; could be changed by doGetNextFrame()
+ fDurationInMicroseconds = 0; // by default; could be changed by doGetNextFrame()
+ fAfterGettingFunc = afterGettingFunc;
+ fAfterGettingClientData = afterGettingClientData;
+ fOnCloseFunc = onCloseFunc;
+ fOnCloseClientData = onCloseClientData;
+ fIsCurrentlyAwaitingData = True;
+
+ doGetNextFrame();
+}
+
+void FramedSource::afterGetting(FramedSource* source) {
+ source->nextTask() = NULL;
+ source->fIsCurrentlyAwaitingData = False;
+ // indicates that we can be read again
+ // Note that this needs to be done here, in case the "fAfterFunc"
+ // called below tries to read another frame (which it usually will)
+
+ if (source->fAfterGettingFunc != NULL) {
+ (*(source->fAfterGettingFunc))(source->fAfterGettingClientData,
+ source->fFrameSize, source->fNumTruncatedBytes,
+ source->fPresentationTime,
+ source->fDurationInMicroseconds);
+ }
+}
+
+void FramedSource::handleClosure(void* clientData) {
+ FramedSource* source = (FramedSource*)clientData;
+ source->handleClosure();
+}
+
+void FramedSource::handleClosure() {
+ fIsCurrentlyAwaitingData = False; // because we got a close instead
+ if (fOnCloseFunc != NULL) {
+ (*fOnCloseFunc)(fOnCloseClientData);
+ }
+}
+
+void FramedSource::stopGettingFrames() {
+ fIsCurrentlyAwaitingData = False; // indicates that we can be read again
+ fAfterGettingFunc = NULL;
+ fOnCloseFunc = NULL;
+
+ // Perform any specialized action now:
+ doStopGettingFrames();
+}
+
+void FramedSource::doStopGettingFrames() {
+ // Default implementation: Do nothing except cancel any pending 'delivery' task:
+ envir().taskScheduler().unscheduleDelayedTask(nextTask());
+ // Subclasses may wish to redefine this function.
+}
+
+unsigned FramedSource::maxFrameSize() const {
+ // By default, this source has no maximum frame size.
+ return 0;
+}
diff --git a/liveMedia/GSMAudioRTPSink.cpp b/liveMedia/GSMAudioRTPSink.cpp
new file mode 100644
index 0000000..ac6ae99
--- /dev/null
+++ b/liveMedia/GSMAudioRTPSink.cpp
@@ -0,0 +1,40 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for GSM audio
+// Implementation
+
+#include "GSMAudioRTPSink.hh"
+
+GSMAudioRTPSink::GSMAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs)
+ : AudioRTPSink(env, RTPgs, 3, 8000, "GSM") {
+}
+
+GSMAudioRTPSink::~GSMAudioRTPSink() {
+}
+
+GSMAudioRTPSink*
+GSMAudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) {
+ return new GSMAudioRTPSink(env, RTPgs);
+}
+
+Boolean GSMAudioRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // Allow at most 5 frames in a single packet:
+ return numFramesUsedSoFar() < 5;
+}
diff --git a/liveMedia/GenericMediaServer.cpp b/liveMedia/GenericMediaServer.cpp
new file mode 100644
index 0000000..0210a74
--- /dev/null
+++ b/liveMedia/GenericMediaServer.cpp
@@ -0,0 +1,405 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic media server class, used to implement a RTSP server, and any other server that uses
+// "ServerMediaSession" objects to describe media to be served.
+// Implementation
+
+#include "GenericMediaServer.hh"
+#include <GroupsockHelper.hh>
+#if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4)
+#define snprintf _snprintf
+#endif
+
+////////// GenericMediaServer implementation //////////
+
+void GenericMediaServer::addServerMediaSession(ServerMediaSession* serverMediaSession) {
+ if (serverMediaSession == NULL) return;
+
+ char const* sessionName = serverMediaSession->streamName();
+ if (sessionName == NULL) sessionName = "";
+ removeServerMediaSession(sessionName); // in case an existing "ServerMediaSession" with this name already exists
+
+ fServerMediaSessions->Add(sessionName, (void*)serverMediaSession);
+}
+
+ServerMediaSession* GenericMediaServer
+::lookupServerMediaSession(char const* streamName, Boolean /*isFirstLookupInSession*/) {
+ // Default implementation:
+ return (ServerMediaSession*)(fServerMediaSessions->Lookup(streamName));
+}
+
+void GenericMediaServer::removeServerMediaSession(ServerMediaSession* serverMediaSession) {
+ if (serverMediaSession == NULL) return;
+
+ fServerMediaSessions->Remove(serverMediaSession->streamName());
+ if (serverMediaSession->referenceCount() == 0) {
+ Medium::close(serverMediaSession);
+ } else {
+ serverMediaSession->deleteWhenUnreferenced() = True;
+ }
+}
+
+void GenericMediaServer::removeServerMediaSession(char const* streamName) {
+ removeServerMediaSession(GenericMediaServer::lookupServerMediaSession(streamName));
+}
+
+void GenericMediaServer::closeAllClientSessionsForServerMediaSession(ServerMediaSession* serverMediaSession) {
+ if (serverMediaSession == NULL) return;
+
+ HashTable::Iterator* iter = HashTable::Iterator::create(*fClientSessions);
+ GenericMediaServer::ClientSession* clientSession;
+ char const* key; // dummy
+ while ((clientSession = (GenericMediaServer::ClientSession*)(iter->next(key))) != NULL) {
+ if (clientSession->fOurServerMediaSession == serverMediaSession) {
+ delete clientSession;
+ }
+ }
+ delete iter;
+}
+
+void GenericMediaServer::closeAllClientSessionsForServerMediaSession(char const* streamName) {
+ closeAllClientSessionsForServerMediaSession(lookupServerMediaSession(streamName));
+}
+
+void GenericMediaServer::deleteServerMediaSession(ServerMediaSession* serverMediaSession) {
+ if (serverMediaSession == NULL) return;
+
+ closeAllClientSessionsForServerMediaSession(serverMediaSession);
+ removeServerMediaSession(serverMediaSession);
+}
+
+void GenericMediaServer::deleteServerMediaSession(char const* streamName) {
+ deleteServerMediaSession(lookupServerMediaSession(streamName));
+}
+
+GenericMediaServer
+::GenericMediaServer(UsageEnvironment& env, int ourSocket, Port ourPort,
+ unsigned reclamationSeconds)
+ : Medium(env),
+ fServerSocket(ourSocket), fServerPort(ourPort), fReclamationSeconds(reclamationSeconds),
+ fServerMediaSessions(HashTable::create(STRING_HASH_KEYS)),
+ fClientConnections(HashTable::create(ONE_WORD_HASH_KEYS)),
+ fClientSessions(HashTable::create(STRING_HASH_KEYS)),
+ fPreviousClientSessionId(0)
+{
+ ignoreSigPipeOnSocket(fServerSocket); // so that clients on the same host that are killed don't also kill us
+
+ // Arrange to handle connections from others:
+ env.taskScheduler().turnOnBackgroundReadHandling(fServerSocket, incomingConnectionHandler, this);
+}
+
+GenericMediaServer::~GenericMediaServer() {
+ // Turn off background read handling:
+ envir().taskScheduler().turnOffBackgroundReadHandling(fServerSocket);
+ ::closeSocket(fServerSocket);
+}
+
+void GenericMediaServer::cleanup() {
+ // This member function must be called in the destructor of any subclass of
+ // "GenericMediaServer". (We don't call this in the destructor of "GenericMediaServer" itself,
+ // because by that time, the subclass destructor will already have been called, and this may
+ // affect (break) the destruction of the "ClientSession" and "ClientConnection" objects, which
+ // themselves will have been subclassed.)
+
+ // Close all client session objects:
+ GenericMediaServer::ClientSession* clientSession;
+ while ((clientSession = (GenericMediaServer::ClientSession*)fClientSessions->getFirst()) != NULL) {
+ delete clientSession;
+ }
+ delete fClientSessions;
+
+ // Close all client connection objects:
+ GenericMediaServer::ClientConnection* connection;
+ while ((connection = (GenericMediaServer::ClientConnection*)fClientConnections->getFirst()) != NULL) {
+ delete connection;
+ }
+ delete fClientConnections;
+
+ // Delete all server media sessions
+ ServerMediaSession* serverMediaSession;
+ while ((serverMediaSession = (ServerMediaSession*)fServerMediaSessions->getFirst()) != NULL) {
+ removeServerMediaSession(serverMediaSession); // will delete it, because it no longer has any 'client session' objects using it
+ }
+ delete fServerMediaSessions;
+}
+
+#define LISTEN_BACKLOG_SIZE 20
+
+int GenericMediaServer::setUpOurSocket(UsageEnvironment& env, Port& ourPort) {
+ int ourSocket = -1;
+
+ do {
+ // The following statement is enabled by default.
+ // Don't disable it (by defining ALLOW_SERVER_PORT_REUSE) unless you know what you're doing.
+#if !defined(ALLOW_SERVER_PORT_REUSE) && !defined(ALLOW_RTSP_SERVER_PORT_REUSE)
+ // ALLOW_RTSP_SERVER_PORT_REUSE is for backwards-compatibility #####
+ NoReuse dummy(env); // Don't use this socket if there's already a local server using it
+#endif
+
+ ourSocket = setupStreamSocket(env, ourPort, True, True);
+ if (ourSocket < 0) break;
+
+ // Make sure we have a big send buffer:
+ if (!increaseSendBufferTo(env, ourSocket, 50*1024)) break;
+
+ // Allow multiple simultaneous connections:
+ if (listen(ourSocket, LISTEN_BACKLOG_SIZE) < 0) {
+ env.setResultErrMsg("listen() failed: ");
+ break;
+ }
+
+ if (ourPort.num() == 0) {
+ // bind() will have chosen a port for us; return it also:
+ if (!getSourcePort(env, ourSocket, ourPort)) break;
+ }
+
+ return ourSocket;
+ } while (0);
+
+ if (ourSocket != -1) ::closeSocket(ourSocket);
+ return -1;
+}
+
+void GenericMediaServer::incomingConnectionHandler(void* instance, int /*mask*/) {
+ GenericMediaServer* server = (GenericMediaServer*)instance;
+ server->incomingConnectionHandler();
+}
+void GenericMediaServer::incomingConnectionHandler() {
+ incomingConnectionHandlerOnSocket(fServerSocket);
+}
+
+void GenericMediaServer::incomingConnectionHandlerOnSocket(int serverSocket) {
+ struct sockaddr_in clientAddr;
+ SOCKLEN_T clientAddrLen = sizeof clientAddr;
+ int clientSocket = accept(serverSocket, (struct sockaddr*)&clientAddr, &clientAddrLen);
+ if (clientSocket < 0) {
+ int err = envir().getErrno();
+ if (err != EWOULDBLOCK) {
+ envir().setResultErrMsg("accept() failed: ");
+ }
+ return;
+ }
+ ignoreSigPipeOnSocket(clientSocket); // so that clients on the same host that are killed don't also kill us
+ makeSocketNonBlocking(clientSocket);
+ increaseSendBufferTo(envir(), clientSocket, 50*1024);
+
+#ifdef DEBUG
+ envir() << "accept()ed connection from " << AddressString(clientAddr).val() << "\n";
+#endif
+
+ // Create a new object for handling this connection:
+ (void)createNewClientConnection(clientSocket, clientAddr);
+}
+
+
+////////// GenericMediaServer::ClientConnection implementation //////////
+
+GenericMediaServer::ClientConnection
+::ClientConnection(GenericMediaServer& ourServer, int clientSocket, struct sockaddr_in clientAddr)
+ : fOurServer(ourServer), fOurSocket(clientSocket), fClientAddr(clientAddr) {
+ // Add ourself to our 'client connections' table:
+ fOurServer.fClientConnections->Add((char const*)this, this);
+
+ // Arrange to handle incoming requests:
+ resetRequestBuffer();
+ envir().taskScheduler()
+ .setBackgroundHandling(fOurSocket, SOCKET_READABLE|SOCKET_EXCEPTION, incomingRequestHandler, this);
+}
+
+GenericMediaServer::ClientConnection::~ClientConnection() {
+ // Remove ourself from the server's 'client connections' hash table before we go:
+ fOurServer.fClientConnections->Remove((char const*)this);
+
+ closeSockets();
+}
+
+void GenericMediaServer::ClientConnection::closeSockets() {
+ // Turn off background handling on our socket:
+ envir().taskScheduler().disableBackgroundHandling(fOurSocket);
+ if (fOurSocket>= 0) ::closeSocket(fOurSocket);
+
+ fOurSocket = -1;
+}
+
+void GenericMediaServer::ClientConnection::incomingRequestHandler(void* instance, int /*mask*/) {
+ ClientConnection* connection = (ClientConnection*)instance;
+ connection->incomingRequestHandler();
+}
+
+void GenericMediaServer::ClientConnection::incomingRequestHandler() {
+ struct sockaddr_in dummy; // 'from' address, meaningless in this case
+
+ int bytesRead = readSocket(envir(), fOurSocket, &fRequestBuffer[fRequestBytesAlreadySeen], fRequestBufferBytesLeft, dummy);
+ handleRequestBytes(bytesRead);
+}
+
+void GenericMediaServer::ClientConnection::resetRequestBuffer() {
+ fRequestBytesAlreadySeen = 0;
+ fRequestBufferBytesLeft = sizeof fRequestBuffer;
+}
+
+
+////////// GenericMediaServer::ClientSession implementation //////////
+
+GenericMediaServer::ClientSession
+::ClientSession(GenericMediaServer& ourServer, u_int32_t sessionId)
+ : fOurServer(ourServer), fOurSessionId(sessionId), fOurServerMediaSession(NULL),
+ fLivenessCheckTask(NULL) {
+ noteLiveness();
+}
+
+GenericMediaServer::ClientSession::~ClientSession() {
+ // Turn off any liveness checking:
+ envir().taskScheduler().unscheduleDelayedTask(fLivenessCheckTask);
+
+ // Remove ourself from the server's 'client sessions' hash table before we go:
+ char sessionIdStr[8+1];
+ sprintf(sessionIdStr, "%08X", fOurSessionId);
+ fOurServer.fClientSessions->Remove(sessionIdStr);
+
+ if (fOurServerMediaSession != NULL) {
+ fOurServerMediaSession->decrementReferenceCount();
+ if (fOurServerMediaSession->referenceCount() == 0
+ && fOurServerMediaSession->deleteWhenUnreferenced()) {
+ fOurServer.removeServerMediaSession(fOurServerMediaSession);
+ fOurServerMediaSession = NULL;
+ }
+ }
+}
+
+void GenericMediaServer::ClientSession::noteLiveness() {
+#ifdef DEBUG
+ char const* streamName
+ = (fOurServerMediaSession == NULL) ? "???" : fOurServerMediaSession->streamName();
+ fprintf(stderr, "Client session (id \"%08X\", stream name \"%s\"): Liveness indication\n",
+ fOurSessionId, streamName);
+#endif
+ if (fOurServerMediaSession != NULL) fOurServerMediaSession->noteLiveness();
+
+ if (fOurServer.fReclamationSeconds > 0) {
+ envir().taskScheduler().rescheduleDelayedTask(fLivenessCheckTask,
+ fOurServer.fReclamationSeconds*1000000,
+ (TaskFunc*)livenessTimeoutTask, this);
+ }
+}
+
+void GenericMediaServer::ClientSession::noteClientLiveness(ClientSession* clientSession) {
+ clientSession->noteLiveness();
+}
+
+void GenericMediaServer::ClientSession::livenessTimeoutTask(ClientSession* clientSession) {
+ // If this gets called, the client session is assumed to have timed out, so delete it:
+#ifdef DEBUG
+ char const* streamName
+ = (clientSession->fOurServerMediaSession == NULL) ? "???" : clientSession->fOurServerMediaSession->streamName();
+ fprintf(stderr, "Client session (id \"%08X\", stream name \"%s\") has timed out (due to inactivity)\n",
+ clientSession->fOurSessionId, streamName);
+#endif
+ clientSession->fLivenessCheckTask = NULL;
+ delete clientSession;
+}
+
+GenericMediaServer::ClientSession* GenericMediaServer::createNewClientSessionWithId() {
+ u_int32_t sessionId;
+ char sessionIdStr[8+1];
+
+ // Choose a random (unused) 32-bit integer for the session id
+ // (it will be encoded as a 8-digit hex number). (We avoid choosing session id 0,
+ // because that has a special use by some servers. Similarly, we avoid choosing the same
+ // session id twice in a row.)
+ do {
+ sessionId = (u_int32_t)our_random32();
+ snprintf(sessionIdStr, sizeof sessionIdStr, "%08X", sessionId);
+ } while (sessionId == 0 || sessionId == fPreviousClientSessionId
+ || lookupClientSession(sessionIdStr) != NULL);
+ fPreviousClientSessionId = sessionId;
+
+ ClientSession* clientSession = createNewClientSession(sessionId);
+ if (clientSession != NULL) fClientSessions->Add(sessionIdStr, clientSession);
+
+ return clientSession;
+}
+
+GenericMediaServer::ClientSession*
+GenericMediaServer::lookupClientSession(u_int32_t sessionId) {
+ char sessionIdStr[8+1];
+ snprintf(sessionIdStr, sizeof sessionIdStr, "%08X", sessionId);
+ return lookupClientSession(sessionIdStr);
+}
+
+GenericMediaServer::ClientSession*
+GenericMediaServer::lookupClientSession(char const* sessionIdStr) {
+ return (GenericMediaServer::ClientSession*)fClientSessions->Lookup(sessionIdStr);
+}
+
+
+////////// ServerMediaSessionIterator implementation //////////
+
+GenericMediaServer::ServerMediaSessionIterator
+::ServerMediaSessionIterator(GenericMediaServer& server)
+ : fOurIterator((server.fServerMediaSessions == NULL)
+ ? NULL : HashTable::Iterator::create(*server.fServerMediaSessions)) {
+}
+
+GenericMediaServer::ServerMediaSessionIterator::~ServerMediaSessionIterator() {
+ delete fOurIterator;
+}
+
+ServerMediaSession* GenericMediaServer::ServerMediaSessionIterator::next() {
+ if (fOurIterator == NULL) return NULL;
+
+ char const* key; // dummy
+ return (ServerMediaSession*)(fOurIterator->next(key));
+}
+
+
+////////// UserAuthenticationDatabase implementation //////////
+
+UserAuthenticationDatabase::UserAuthenticationDatabase(char const* realm,
+ Boolean passwordsAreMD5)
+ : fTable(HashTable::create(STRING_HASH_KEYS)),
+ fRealm(strDup(realm == NULL ? "LIVE555 Streaming Media" : realm)),
+ fPasswordsAreMD5(passwordsAreMD5) {
+}
+
+UserAuthenticationDatabase::~UserAuthenticationDatabase() {
+ delete[] fRealm;
+
+ // Delete the allocated 'password' strings that we stored in the table, and then the table itself:
+ char* password;
+ while ((password = (char*)fTable->RemoveNext()) != NULL) {
+ delete[] password;
+ }
+ delete fTable;
+}
+
+void UserAuthenticationDatabase::addUserRecord(char const* username,
+ char const* password) {
+ char* oldPassword = (char*)fTable->Add(username, (void*)(strDup(password)));
+ delete[] oldPassword; // if any
+}
+
+void UserAuthenticationDatabase::removeUserRecord(char const* username) {
+ char* password = (char*)(fTable->Lookup(username));
+ fTable->Remove(username);
+ delete[] password;
+}
+
+char const* UserAuthenticationDatabase::lookupPassword(char const* username) {
+ return (char const*)(fTable->Lookup(username));
+}
diff --git a/liveMedia/H261VideoRTPSource.cpp b/liveMedia/H261VideoRTPSource.cpp
new file mode 100644
index 0000000..0544ea5
--- /dev/null
+++ b/liveMedia/H261VideoRTPSource.cpp
@@ -0,0 +1,67 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.261 Video RTP Sources
+// Implementation
+
+#include "H261VideoRTPSource.hh"
+
+H261VideoRTPSource*
+H261VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new H261VideoRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+H261VideoRTPSource
+::H261VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency),
+ fLastSpecialHeader(0) {
+}
+
+H261VideoRTPSource::~H261VideoRTPSource() {
+}
+
+Boolean H261VideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ // There's a 4-byte video-specific header
+ if (packet->dataSize() < 4) return False;
+
+ unsigned char* headerStart = packet->data();
+ fLastSpecialHeader
+ = (headerStart[0]<<24)|(headerStart[1]<<16)|(headerStart[2]<<8)|headerStart[3];
+
+#ifdef DELIVER_COMPLETE_FRAMES
+ fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame;
+ // whether the *previous* packet ended a frame
+
+ // The RTP "M" (marker) bit indicates the last fragment of a frame:
+ fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
+#endif
+
+ resultSpecialHeaderSize = 4;
+ return True;
+}
+
+char const* H261VideoRTPSource::MIMEtype() const {
+ return "video/H261";
+}
diff --git a/liveMedia/H263plusVideoFileServerMediaSubsession.cpp b/liveMedia/H263plusVideoFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..71c7d32
--- /dev/null
+++ b/liveMedia/H263plusVideoFileServerMediaSubsession.cpp
@@ -0,0 +1,64 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a H263 video file.
+// Implementation
+
+// Author: Bernhard Feiten. // Based on MPEG4VideoFileServerMediaSubsession
+// Updated by Ross FInlayson (December 2007)
+
+#include "H263plusVideoFileServerMediaSubsession.hh"
+#include "H263plusVideoRTPSink.hh"
+#include "ByteStreamFileSource.hh"
+#include "H263plusVideoStreamFramer.hh"
+
+H263plusVideoFileServerMediaSubsession*
+H263plusVideoFileServerMediaSubsession::createNew(UsageEnvironment& env,
+ char const* fileName,
+ Boolean reuseFirstSource) {
+ return new H263plusVideoFileServerMediaSubsession(env, fileName, reuseFirstSource);
+}
+
+H263plusVideoFileServerMediaSubsession
+::H263plusVideoFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName,
+ Boolean reuseFirstSource)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource) {
+}
+
+H263plusVideoFileServerMediaSubsession::~H263plusVideoFileServerMediaSubsession() {
+}
+
+FramedSource* H263plusVideoFileServerMediaSubsession
+::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
+ estBitrate = 500; // kbps, estimate ??
+
+ // Create the video source:
+ ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName);
+ if (fileSource == NULL) return NULL;
+ fFileSize = fileSource->fileSize();
+
+ // Create a framer for the Video Elementary Stream:
+ return H263plusVideoStreamFramer::createNew(envir(), fileSource);
+}
+
+RTPSink* H263plusVideoFileServerMediaSubsession::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* /*inputSource*/) {
+ return H263plusVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+}
diff --git a/liveMedia/H263plusVideoRTPSink.cpp b/liveMedia/H263plusVideoRTPSink.cpp
new file mode 100644
index 0000000..595177c
--- /dev/null
+++ b/liveMedia/H263plusVideoRTPSink.cpp
@@ -0,0 +1,91 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for H.263+ video (RFC 4629)
+// Implementation
+
+#include "H263plusVideoRTPSink.hh"
+
+H263plusVideoRTPSink
+::H263plusVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency)
+ : VideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "H263-1998") {
+}
+
+H263plusVideoRTPSink::~H263plusVideoRTPSink() {
+}
+
+H263plusVideoRTPSink*
+H263plusVideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency) {
+ return new H263plusVideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency);
+}
+
+Boolean H263plusVideoRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // A packet can contain only one frame
+ return False;
+}
+
+void H263plusVideoRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ if (fragmentationOffset == 0) {
+ // This packet contains the first (or only) fragment of the frame.
+ // Set the 'P' bit in the special header:
+ unsigned short specialHeader = 0x0400;
+
+ // Also, reuse the first two bytes of the payload for this special
+ // header. (They should both have been zero.)
+ if (numBytesInFrame < 2) {
+ envir() << "H263plusVideoRTPSink::doSpecialFrameHandling(): bad frame size "
+ << numBytesInFrame << "\n";
+ return;
+ }
+ if (frameStart[0] != 0 || frameStart[1] != 0) {
+ envir() << "H263plusVideoRTPSink::doSpecialFrameHandling(): unexpected non-zero first two bytes!\n";
+ }
+ frameStart[0] = specialHeader>>8;
+ frameStart[1] = (unsigned char)specialHeader;
+ } else {
+ unsigned short specialHeader = 0;
+ setSpecialHeaderBytes((unsigned char*)&specialHeader, 2);
+ }
+
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the RTP 'M' ('marker') bit:
+ setMarkerBit();
+ }
+
+ // Also set the RTP timestamp:
+ setTimestamp(framePresentationTime);
+}
+
+
+unsigned H263plusVideoRTPSink::specialHeaderSize() const {
+ // There's a 2-byte special video header. However, if we're the first
+ // (or only) fragment of a frame, then we reuse the first 2 bytes of
+ // the payload instead.
+ return (curFragmentationOffset() == 0) ? 0 : 2;
+}
diff --git a/liveMedia/H263plusVideoRTPSource.cpp b/liveMedia/H263plusVideoRTPSource.cpp
new file mode 100644
index 0000000..c5fc2ba
--- /dev/null
+++ b/liveMedia/H263plusVideoRTPSource.cpp
@@ -0,0 +1,106 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.263+ Video RTP Sources
+// Implementation
+
+#include "H263plusVideoRTPSource.hh"
+
+H263plusVideoRTPSource*
+H263plusVideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new H263plusVideoRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+H263plusVideoRTPSource
+::H263plusVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency),
+ fNumSpecialHeaders(0), fSpecialHeaderBytesLength(0) {
+}
+
+H263plusVideoRTPSource::~H263plusVideoRTPSource() {
+}
+
+Boolean H263plusVideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ // The H.263+ payload header is at least 2 bytes in size.
+ // Extract the known fields from the first 2 bytes:
+ unsigned expectedHeaderSize = 2;
+ if (packetSize < expectedHeaderSize) return False;
+
+ //unsigned char RR = headerStart[0]>>3;
+ Boolean P = (headerStart[0]&0x4) != 0;
+ Boolean V = (headerStart[0]&0x2) != 0;
+ unsigned char PLEN = ((headerStart[0]&0x1)<<5)|(headerStart[1]>>3);
+ //unsigned char PEBIT = headerStart[1]&0x7;
+
+ if (V) {
+ // There's an extra VRC byte at the end of the header:
+ ++expectedHeaderSize;
+ if (packetSize < expectedHeaderSize) return False;
+ }
+
+ if (PLEN > 0) {
+ // There's an extra picture header at the end:
+ expectedHeaderSize += PLEN;
+ if (packetSize < expectedHeaderSize) return False;
+ }
+
+ fCurrentPacketBeginsFrame = P;
+ if (fCurrentPacketBeginsFrame) {
+ fNumSpecialHeaders = fSpecialHeaderBytesLength = 0;
+ }
+
+ // Make a copy of the special header bytes, in case a reader
+ // can use them:
+ unsigned bytesAvailable
+ = SPECIAL_HEADER_BUFFER_SIZE - fSpecialHeaderBytesLength - 1;
+ if (expectedHeaderSize <= bytesAvailable) {
+ fSpecialHeaderBytes[fSpecialHeaderBytesLength++] = expectedHeaderSize;
+ for (unsigned i = 0; i < expectedHeaderSize; ++i) {
+ fSpecialHeaderBytes[fSpecialHeaderBytesLength++] = headerStart[i];
+ }
+ fPacketSizes[fNumSpecialHeaders++] = packetSize;
+ }
+
+ if (P) {
+ // Prepend two zero bytes to the start of the payload proper.
+ // Hack: Do this by shrinking this special header by 2 bytes:
+ expectedHeaderSize -= 2;
+ headerStart[expectedHeaderSize] = 0;
+ headerStart[expectedHeaderSize+1] = 0;
+ }
+
+ // The RTP "M" (marker) bit indicates the last fragment of a frame:
+ fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
+
+ resultSpecialHeaderSize = expectedHeaderSize;
+ return True;
+}
+
+char const* H263plusVideoRTPSource::MIMEtype() const {
+ return "video/H263-1998";
+}
diff --git a/liveMedia/H263plusVideoStreamFramer.cpp b/liveMedia/H263plusVideoStreamFramer.cpp
new file mode 100644
index 0000000..d5649ef
--- /dev/null
+++ b/liveMedia/H263plusVideoStreamFramer.cpp
@@ -0,0 +1,129 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Author Bernhard Feiten
+// A filter that breaks up an H.263plus video stream into frames.
+//
+
+#include "H263plusVideoStreamFramer.hh"
+#include "H263plusVideoStreamParser.hh"
+
+#include <string.h>
+#include <GroupsockHelper.hh>
+
+
+///////////////////////////////////////////////////////////////////////////////
+////////// H263plusVideoStreamFramer implementation //////////
+//public///////////////////////////////////////////////////////////////////////
+H263plusVideoStreamFramer* H263plusVideoStreamFramer::createNew(
+ UsageEnvironment& env,
+ FramedSource* inputSource)
+{
+ // Need to add source type checking here??? #####
+ H263plusVideoStreamFramer* fr;
+ fr = new H263plusVideoStreamFramer(env, inputSource);
+ return fr;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+H263plusVideoStreamFramer::H263plusVideoStreamFramer(
+ UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean createParser)
+ : FramedFilter(env, inputSource),
+ fFrameRate(0.0), // until we learn otherwise
+ fPictureEndMarker(False)
+{
+ // Use the current wallclock time as the base 'presentation time':
+ gettimeofday(&fPresentationTimeBase, NULL);
+ fParser = createParser ? new H263plusVideoStreamParser(this, inputSource) : NULL;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+H263plusVideoStreamFramer::~H263plusVideoStreamFramer()
+{
+ delete fParser;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+void H263plusVideoStreamFramer::doGetNextFrame()
+{
+ fParser->registerReadInterest(fTo, fMaxSize);
+ continueReadProcessing();
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+Boolean H263plusVideoStreamFramer::isH263plusVideoStreamFramer() const
+{
+ return True;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void H263plusVideoStreamFramer::continueReadProcessing(
+ void* clientData,
+ unsigned char* /*ptr*/, unsigned /*size*/,
+ struct timeval /*presentationTime*/)
+{
+ H263plusVideoStreamFramer* framer = (H263plusVideoStreamFramer*)clientData;
+ framer->continueReadProcessing();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void H263plusVideoStreamFramer::continueReadProcessing()
+{
+ unsigned acquiredFrameSize;
+
+ u_int64_t frameDuration; // in ms
+
+ acquiredFrameSize = fParser->parse(frameDuration);
+// Calculate some average bitrate information (to be adapted)
+// avgBitrate = (totalBytes * 8 * H263_TIMESCALE) / totalDuration;
+
+ if (acquiredFrameSize > 0) {
+ // We were able to acquire a frame from the input.
+ // It has already been copied to the reader's space.
+ fFrameSize = acquiredFrameSize;
+// fNumTruncatedBytes = fParser->numTruncatedBytes(); // not needed so far
+
+ fFrameRate = frameDuration == 0 ? 0.0 : 1000./(long)frameDuration;
+
+ // Compute "fPresentationTime"
+ if (acquiredFrameSize == 5) // first frame
+ fPresentationTime = fPresentationTimeBase;
+ else
+ fPresentationTime.tv_usec += (long) frameDuration*1000;
+
+ while (fPresentationTime.tv_usec >= 1000000) {
+ fPresentationTime.tv_usec -= 1000000;
+ ++fPresentationTime.tv_sec;
+ }
+
+ // Compute "fDurationInMicroseconds"
+ fDurationInMicroseconds = (unsigned int) frameDuration*1000;;
+
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking infinite recursion.
+ afterGetting(this);
+ } else {
+ // We were unable to parse a complete frame from the input, because:
+ // - we had to read more data from the source stream, or
+ // - the source stream has ended.
+ }
+}
diff --git a/liveMedia/H263plusVideoStreamParser.cpp b/liveMedia/H263plusVideoStreamParser.cpp
new file mode 100644
index 0000000..b27a6a1
--- /dev/null
+++ b/liveMedia/H263plusVideoStreamParser.cpp
@@ -0,0 +1,859 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Author Bernhard Feiten
+// A filter that breaks up an H.263plus video stream into frames.
+// Based on MPEG4IP/mp4creator/h263.c
+
+#include "H263plusVideoStreamParser.hh"
+#include "H263plusVideoStreamFramer.hh"
+//#include <string.h>
+//#include "GroupsockHelper.hh"
+
+
+H263plusVideoStreamParser::H263plusVideoStreamParser(
+ H263plusVideoStreamFramer* usingSource,
+ FramedSource* inputSource)
+ : StreamParser(inputSource,
+ FramedSource::handleClosure,
+ usingSource,
+ &H263plusVideoStreamFramer::continueReadProcessing,
+ usingSource),
+ fUsingSource(usingSource),
+ fnextTR(0),
+ fcurrentPT(0)
+{
+ memset(fStates, 0, sizeof(fStates));
+ memset(&fNextInfo, 0, sizeof(fNextInfo));
+ memset(&fCurrentInfo, 0, sizeof(fCurrentInfo));
+ memset(&fMaxBitrateCtx, 0, sizeof(fMaxBitrateCtx));
+ memset(fNextHeader,0, H263_REQUIRE_HEADER_SIZE_BYTES);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+H263plusVideoStreamParser::~H263plusVideoStreamParser()
+{
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void H263plusVideoStreamParser::restoreSavedParserState()
+{
+ StreamParser::restoreSavedParserState();
+ fTo = fSavedTo;
+ fNumTruncatedBytes = fSavedNumTruncatedBytes;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+void H263plusVideoStreamParser::setParseState()
+{
+ fSavedTo = fTo;
+ fSavedNumTruncatedBytes = fNumTruncatedBytes;
+ saveParserState(); // Needed for the parsing process in StreamParser
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+void H263plusVideoStreamParser::registerReadInterest(
+ unsigned char* to,
+ unsigned maxSize)
+{
+ fStartOfFrame = fTo = fSavedTo = to;
+ fLimit = to + maxSize;
+ fMaxSize = maxSize;
+ fNumTruncatedBytes = fSavedNumTruncatedBytes = 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// parse() , derived from H263Creator of MPEG4IP, h263.c
+unsigned H263plusVideoStreamParser::parse(u_int64_t & currentDuration)
+{
+
+// u_int8_t frameBuffer[H263_BUFFER_SIZE]; // The input buffer
+ // Pointer which tells LoadNextH263Object where to read data to
+// u_int8_t* pFrameBuffer = fTo + H263_REQUIRE_HEADER_SIZE_BYTES;
+ u_int32_t frameSize; // The current frame size
+ // Pointer to receive address of the header data
+// u_int8_t* pCurrentHeader;// = pFrameBuffer;
+// u_int64_t currentDuration; // The current frame's duration
+ u_int8_t trDifference; // The current TR difference
+ // The previous TR difference
+// u_int8_t prevTrDifference = H263_BASIC_FRAME_RATE;
+// u_int64_t totalDuration = 0;// Duration accumulator
+// u_int64_t avgBitrate; // Average bitrate
+// u_int64_t totalBytes = 0; // Size accumulator
+
+
+ try // The get data routines of the class FramedFilter returns an error when
+ { // the buffer is empty. This occurs at the beginning and at the end of the file.
+ fCurrentInfo = fNextInfo;
+
+ // Parse 1 frame
+ // For the first time, only the first frame's header is returned.
+ // The second time the full first frame is returned
+ frameSize = parseH263Frame();
+
+ currentDuration = 0;
+ if ((frameSize > 0)){
+ // We were able to acquire a frame from the input.
+
+ // Parse the returned frame header (if any)
+ if (!ParseShortHeader(fTo, &fNextInfo)) {
+#ifdef DEBUG
+ fprintf(stderr,"H263plusVideoStreamParser: Fatal error\n");
+#endif
+ }
+
+ trDifference = GetTRDifference(fNextInfo.tr, fCurrentInfo.tr);
+
+ // calculate the current frame duration
+ currentDuration = CalculateDuration(trDifference);
+
+ // Accumulate the frame's size and duration for avgBitrate calculation
+ //totalDuration += currentDuration;
+ //totalBytes += frameSize;
+ // If needed, recalculate bitrate information
+ // if (h263Bitrates)
+ //GetMaxBitrate(&fMaxBitrateCtx, frameSize, prevTrDifference);
+ //prevTrDifference = trDifference;
+
+ setParseState(); // Needed for the parsing process in StreamParser
+ }
+ } catch (int /*e*/) {
+#ifdef DEBUG
+ fprintf(stderr, "H263plusVideoStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
+#endif
+ frameSize=0;
+ }
+
+ return frameSize;
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// parseH263Frame derived from LoadNextH263Object of MPEG4IP
+// - service routine that reads a single frame from the input file.
+// It shall fill the input buffer with data up until - and including - the
+// next start code and shall report back both the number of bytes read and a
+// pointer to the next start code. The first call to this function shall only
+// yield a pointer with 0 data bytes and the last call to this function shall
+// only yield data bytes with a NULL pointer as the next header.
+//
+// TODO: This function only supports valid bit streams. Upon error, it fails
+// without the possibility to recover. A Better idea would be to skip frames
+// until a parsable frame is read from the file.
+//
+// Parameters:
+// ppNextHeader - output parameter that upon return points to the location
+// of the next frame's head in the buffer.
+// This pointer shall be NULL for the last frame read.
+// Returns the total number of bytes read.
+// Uses FrameFileSource intantiated by constructor.
+///////////////////////////////////////////////////////////////////////////////
+int H263plusVideoStreamParser::parseH263Frame( )
+{
+ char row = 0;
+ u_int8_t * bufferIndex = fTo;
+ // The buffer end which will allow the loop to leave place for
+ // the additionalBytesNeeded
+ u_int8_t * bufferEnd = fTo + fMaxSize - ADDITIONAL_BYTES_NEEDED - 1;
+
+ memcpy(fTo, fNextHeader, H263_REQUIRE_HEADER_SIZE_BYTES);
+ bufferIndex += H263_REQUIRE_HEADER_SIZE_BYTES;
+
+
+ // The state table and the following loop implements a state machine enabling
+ // us to read bytes from the file until (and inclusing) the requested
+ // start code (00 00 8X) is found
+
+ // Initialize the states array, if it hasn't been initialized yet...
+ if (!fStates[0][0]) {
+ // One 00 was read
+ fStates[0][0] = 1;
+ // Two sequential 0x00 ware read
+ fStates[1][0] = fStates[2][0] = 2;
+ // A full start code was read
+ fStates[2][128] = fStates[2][129] = fStates[2][130] = fStates[2][131] = -1;
+ }
+
+ // Read data from file into the output buffer until either a start code
+ // is found, or the end of file has been reached.
+ do {
+ *bufferIndex = get1Byte();
+ } while ((bufferIndex < bufferEnd) && // We have place in the buffer
+ ((row = fStates[(unsigned char)row][*(bufferIndex++)]) != -1)); // Start code was not found
+
+ if (row != -1) {
+ fprintf(stderr, "%s: Buffer too small (%u)\n",
+ "h263reader:", bufferEnd - fTo + ADDITIONAL_BYTES_NEEDED);
+ return 0;
+ }
+
+ // Cool ... now we have a start code
+ // Now we just have to read the additionalBytesNeeded
+ getBytes(bufferIndex, ADDITIONAL_BYTES_NEEDED);
+ memcpy(fNextHeader, bufferIndex - H263_STARTCODE_SIZE_BYTES, H263_REQUIRE_HEADER_SIZE_BYTES);
+
+ int sz = bufferIndex - fTo - H263_STARTCODE_SIZE_BYTES;
+
+ if (sz == 5) // first frame
+ memcpy(fTo, fTo+H263_REQUIRE_HEADER_SIZE_BYTES, H263_REQUIRE_HEADER_SIZE_BYTES);
+
+ return sz;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// ParseShortHeader - service routine that accepts a buffer containing a frame
+// header and extracts relevant codec information from it.
+//
+// NOTE: the first bit in the following commnets is 0 (zero).
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | PSC (Picture Start Code=22 bits) | (TR=8 bits) | >
+// |0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0| |1 0>
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// < (PTYPE=13 bits) |
+// <. . .|(FMT)|Z|. . . .|
+// +-+-+-+-+-+-+-+-+-+-+-+
+// -> PTYPE.FMT contains a width/height identification
+// -> PTYPE.Z is 1 for P-Frames, 0 for I-Frames
+// Note: When FMT is 111, there is an extended PTYPE...
+//
+// Inputs:
+// headerBuffer - pointer to the current header buffer
+// outputInfoStruct - pointer to the structure receiving the data
+// Outputs:
+// This function returns a structure of important codec-specific
+// information (The Temporal Reference bits, width & height of the current
+// frame and the sync - or "frame type" - bit. It reports success or
+// failure to the calling function.
+////////////////////////////////////////////////////////////////////////////////
+bool H263plusVideoStreamParser::ParseShortHeader(
+ u_int8_t *headerBuffer,
+ H263INFO *outputInfoStruct)
+{
+ u_int8_t fmt = 0;
+ // Extract temporal reference (TR) from the buffer (bits 22-29 inclusive)
+ outputInfoStruct->tr = (headerBuffer[2] << 6) & 0xC0; // 2 LS bits out of the 3rd byte
+ outputInfoStruct->tr |= (headerBuffer[3] >> 2) & 0x3F; // 6 MS bits out of the 4th byte
+ // Extract the FMT part of PTYPE from the buffer (bits 35-37 inclusive)
+ fmt = (headerBuffer[4] >> 2) & 0x07; // bits 3-5 ouf of the 5th byte
+ // If PTYPE is not supported, return a failure notice to the calling function
+ // FIXME: PLUSPTYPE is not supported
+ if (fmt == 0x07) {
+ return false;
+ }
+ // If PTYPE is supported, calculate the current width and height according to
+ // a predefined table
+ if (!GetWidthAndHeight(fmt, &(outputInfoStruct->width),
+ &(outputInfoStruct->height))) {
+ return false;
+ }
+ // Extract the frame-type bit, which is the 9th bit of PTYPE (bit 38)
+ outputInfoStruct->isSyncFrame = !(headerBuffer[4] & 0x02);
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// GetMaxBitrate- service routine that accepts frame information and
+// derives bitrate information from it. This function uses a sliding window
+// technique to calculate the maximum bitrates in any window of 1 second
+// inside the file.
+// The sliding window is implemented with a table of bitrates for the last
+// second (30 entries - one entry per TR unit).
+//
+// Inputs:
+// ctx - context for this function
+// frameSize - the size of the current frame in bytes
+// frameTRDiff - the "duration" of the frame in TR units
+// Outputs:
+// This function returns the up-to-date maximum bitrate
+////////////////////////////////////////////////////////////////////////////////
+void H263plusVideoStreamParser::GetMaxBitrate( MaxBitrate_CTX *ctx,
+ u_int32_t frameSize,
+ u_int8_t frameTRDiff)
+{
+ if (frameTRDiff == 0)
+ return;
+
+ // Calculate the current frame's bitrate as bits per TR unit (round the result
+ // upwards)
+ u_int32_t frameBitrate = frameSize * 8 / frameTRDiff + 1;
+
+ // for each TRdiff received,
+ while (frameTRDiff--) {
+ // Subtract the oldest bitrate entry from the current bitrate
+ ctx->windowBitrate -= ctx->bitrateTable[ctx->tableIndex];
+ // Update the oldest bitrate entry with the current frame's bitrate
+ ctx->bitrateTable[ctx->tableIndex] = frameBitrate;
+ // Add the current frame's bitrate to the current bitrate
+ ctx->windowBitrate += frameBitrate;
+ // Check if we have a new maximum bitrate
+ if (ctx->windowBitrate > ctx->maxBitrate) {
+ ctx->maxBitrate = ctx->windowBitrate;
+ }
+ // Advance the table index
+ // Wrapping around the bitrateTable size
+ ctx->tableIndex = (ctx->tableIndex + 1) %
+ ( sizeof(ctx->bitrateTable) / sizeof(ctx->bitrateTable[0]) );
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// CalculateDuration - service routine that calculates the current frame's
+// duration in milli-seconds using it's duration in TR units.
+// - In order not to accumulate the calculation error, we are using the TR
+// duration to calculate the current and the next frame's presentation time in
+// milli-seconds.
+//
+// Inputs: trDiff - The current frame's duration in TR units
+// Return: The current frame's duration in milli-seconds
+////////////////////////////////////////////////////////////////////////////////
+u_int64_t H263plusVideoStreamParser::CalculateDuration(u_int8_t trDiff)
+{
+ u_int64_t nextPT; // The next frame's presentation time in milli-seconds
+ u_int64_t duration; // The current frame's duration in milli-seconds
+
+ fnextTR += trDiff;
+ // Calculate the next frame's presentation time, in milli-seconds
+ nextPT = (fnextTR * 1001) / H263_BASIC_FRAME_RATE;
+ // The frame's duration is the difference between the next presentation
+ // time and the current presentation time.
+ duration = nextPT - fcurrentPT;
+ // "Remember" the next presentation time for the next time this function is called
+ fcurrentPT = nextPT;
+
+ return duration;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+bool H263plusVideoStreamParser::GetWidthAndHeight( u_int8_t fmt,
+ u_int16_t *width,
+ u_int16_t *height)
+{
+ // The 'fmt' corresponds to bits 5-7 of the PTYPE
+ static struct {
+ u_int16_t width;
+ u_int16_t height;
+ } const dimensionsTable[8] = {
+ { 0, 0 }, // 000 - 0 - forbidden, generates an error
+ { 128, 96 }, // 001 - 1 - Sub QCIF
+ { 176, 144 }, // 010 - 2 - QCIF
+ { 352, 288 }, // 011 - 3 - CIF
+ { 704, 576 }, // 100 - 4 - 4CIF
+ { 1409, 1152 }, // 101 - 5 - 16CIF
+ { 0, 0 }, // 110 - 6 - reserved, generates an error
+ { 0, 0 } // 111 - 7 - extended, not supported by profile 0
+ };
+
+ if (fmt > 7)
+ return false;
+
+ *width = dimensionsTable[fmt].width;
+ *height = dimensionsTable[fmt].height;
+
+ if (*width == 0)
+ return false;
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+u_int8_t H263plusVideoStreamParser::GetTRDifference(
+ u_int8_t nextTR,
+ u_int8_t currentTR)
+{
+ if (currentTR > nextTR) {
+ // Wrap around 255...
+ return nextTR + (256 - currentTR);
+ } else {
+ return nextTR - currentTR;
+ }
+}
+
+
+
+
+
+
+
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+// this is the h263.c file of MPEG4IP mp4creator
+/*
+#include "mp4creator.h"
+
+// Default timescale for H.263 (1000ms)
+#define H263_TIMESCALE 1000
+// Default H263 frame rate (30fps)
+#define H263_BASIC_FRAME_RATE 30
+
+// Minimum number of bytes needed to parse an H263 header
+#define H263_REQUIRE_HEADER_SIZE_BYTES 5
+// Number of bytes the start code requries
+#define H263_STARTCODE_SIZE_BYTES 3
+// This is the input buffer's size. It should contain
+// 1 frame with the following start code
+#define H263_BUFFER_SIZE 256 * 1024
+// The default max different (in %) betwqeen max and average bitrates
+#define H263_DEFAULT_CBR_TOLERANCE 10
+
+// The following structure holds information extracted from each frame's header:
+typedef struct _H263INFO {
+ u_int8_t tr; // Temporal Reference, used in duration calculation
+ u_int16_t width; // Width of the picture
+ u_int16_t height; // Height of the picture
+ bool isSyncFrame; // Frame type (true = I frame = "sync" frame)
+} H263INFO;
+
+// Context for the GetMaxBitrate function
+typedef struct _MaxBitrate_CTX {
+ u_int32_t bitrateTable[H263_BASIC_FRAME_RATE];// Window of 1 second
+ u_int32_t windowBitrate; // The bitrate of the current window
+ u_int32_t maxBitrate; // The up-to-date maximum bitrate
+ u_int32_t tableIndex; // The next TR unit to update
+} MaxBitrate_CTX;
+
+// Forward declarations:
+static int LoadNextH263Object( FILE *inputFileHandle,
+ u_int8_t *frameBuffer,
+ u_int32_t *frameBufferSize,
+ u_int32_t additionalBytesNeeded,
+ u_int8_t **ppNextHeader);
+
+static bool ParseShortHeader( u_int8_t *headerBuffer,
+ H263INFO *outputInfoStruct);
+
+static u_int8_t GetTRDifference(u_int8_t nextTR,
+ u_int8_t currentTR);
+
+static void GetMaxBitrate( MaxBitrate_CTX *ctx,
+ u_int32_t frameSize,
+ u_int8_t frameTRDiff);
+
+static MP4Duration CalculateDuration(u_int8_t trDiff);
+
+static bool GetWidthAndHeight( u_int8_t fmt,
+ u_int16_t *width,
+ u_int16_t *height);
+
+static char states[3][256];
+/ *
+ * H263Creator - Main function
+ * Inputs:
+ * outputFileHandle - The handle of the output file
+ * inputFileHandle - The handle of the input file
+ * Codec-specific parameters:
+ * H263Level - H.263 Level used for this track
+ * H263Profile - H.263 Profile used for this track
+ * H263Bitrates - A Parameter indicating whether the function
+ * should calculate H263 bitrates or not.
+ * cbrTolerance - CBR tolerance indicates when to set the
+ * average bitrate.
+ * Outputs:
+ * This function returns either the track ID of the newly added track upon
+ * success or a predefined value representing an erroneous state.
+ * /
+MP4TrackId H263Creator(MP4FileHandle outputFileHandle,
+ FILE* inputFileHandle,
+ u_int8_t h263Profile,
+ u_int8_t h263Level,
+ bool h263Bitrates,
+ u_int8_t cbrTolerance)
+{
+ H263INFO nextInfo; // Holds information about the next frame
+ H263INFO currentInfo;// Holds information about the current frame
+ MaxBitrate_CTX maxBitrateCtx;// Context for the GetMaxBitrate function
+ memset(&nextInfo, 0, sizeof(nextInfo));
+ memset(¤tInfo, 0, sizeof(currentInfo));
+ memset(&maxBitrateCtx, 0, sizeof(maxBitrateCtx));
+ memset(states, 0, sizeof(states));
+ u_int8_t frameBuffer[H263_BUFFER_SIZE]; // The input buffer
+ // Pointer which tells LoadNextH263Object where to read data to
+ u_int8_t* pFrameBuffer = frameBuffer + H263_REQUIRE_HEADER_SIZE_BYTES;
+ u_int32_t frameSize; // The current frame size
+ // Pointer to receive address of the header data
+ u_int8_t* pCurrentHeader = pFrameBuffer;
+ MP4Duration currentDuration; // The current frame's duration
+ u_int8_t trDifference; // The current TR difference
+ // The previous TR difference
+ u_int8_t prevTrDifference = H263_BASIC_FRAME_RATE;
+ MP4Duration totalDuration = 0;// Duration accumulator
+ MP4Duration avgBitrate; // Average bitrate
+ u_int64_t totalBytes = 0; // Size accumulator
+ MP4TrackId trackId = MP4_INVALID_TRACK_ID; // Our MP4 track
+ bool stay = true; // loop flag
+
+ while (stay) {
+ currentInfo = nextInfo;
+ memmove(frameBuffer, pCurrentHeader, H263_REQUIRE_HEADER_SIZE_BYTES);
+ frameSize = H263_BUFFER_SIZE - H263_REQUIRE_HEADER_SIZE_BYTES;
+ // Read 1 frame and the next frame's header from the file.
+ // For the first frame, only the first frame's header is returned.
+ // For the last frame, only the last frame's data is returned.
+ if (! LoadNextH263Object(inputFileHandle, pFrameBuffer, &frameSize,
+ H263_REQUIRE_HEADER_SIZE_BYTES - H263_STARTCODE_SIZE_BYTES,
+ &pCurrentHeader))
+ break; // Fatal error ...
+
+ if (pCurrentHeader) {
+ // Parse the returned frame header (if any)
+ if (!ParseShortHeader(pCurrentHeader, &nextInfo))
+ break; // Fatal error
+ trDifference = GetTRDifference(nextInfo.tr, currentInfo.tr);
+ } else {
+ // This is the last frame ... we have to fake the trDifference ...
+ trDifference = 1;
+ // No header data has been read at this iteration, so we have to manually
+ // add the frame's header we read at the previous iteration.
+ // Note that LoadNextH263Object returns the number of bytes read, which
+ // are the current frame's data and the next frame's header
+ frameSize += H263_REQUIRE_HEADER_SIZE_BYTES;
+ // There is no need for the next iteration ...
+ stay = false;
+ }
+
+ // If this is the first iteration ...
+ if (currentInfo.width == 0) {
+ // If we have more data than just the header
+ if ((frameSize > H263_REQUIRE_HEADER_SIZE_BYTES) ||
+ !pCurrentHeader) // Or no header at all
+ break; // Fatal error
+ else
+ continue; // We have only the first frame's header ...
+ }
+
+ if (trackId == MP4_INVALID_TRACK_ID) {
+ // If a track has not been added yet, add the track to the file.
+ trackId = MP4AddH263VideoTrack(outputFileHandle, H263_TIMESCALE,
+ 0, currentInfo.width, currentInfo.height,
+ h263Level, h263Profile, 0, 0);
+ if (trackId == MP4_INVALID_TRACK_ID)
+ break; // Fatal error
+ }
+
+ // calculate the current frame duration
+ currentDuration = CalculateDuration(trDifference);
+ // Write the current frame to the file.
+ if (!MP4WriteSample(outputFileHandle, trackId, frameBuffer, frameSize,
+ currentDuration, 0, currentInfo.isSyncFrame))
+ break; // Fatal error
+
+ // Accumulate the frame's size and duration for avgBitrate calculation
+ totalDuration += currentDuration;
+ totalBytes += frameSize;
+ // If needed, recalculate bitrate information
+ if (h263Bitrates)
+ GetMaxBitrate(&maxBitrateCtx, frameSize, prevTrDifference);
+ prevTrDifference = trDifference;
+ } // while (stay)
+
+ // If this is the last frame,
+ if (!stay) {
+ // If needed and possible, update bitrate information in the file
+ if (h263Bitrates && totalDuration) {
+ avgBitrate = (totalBytes * 8 * H263_TIMESCALE) / totalDuration;
+ if (cbrTolerance == 0)
+ cbrTolerance = H263_DEFAULT_CBR_TOLERANCE;
+ // Same as: if (maxBitrate / avgBitrate > (cbrTolerance + 100) / 100.0)
+ if (maxBitrateCtx.maxBitrate * 100 > (cbrTolerance + 100) * avgBitrate)
+ avgBitrate = 0;
+ MP4SetH263Bitrates(outputFileHandle, trackId,
+ avgBitrate, maxBitrateCtx.maxBitrate);
+ }
+ // Return the newly added track ID
+ return trackId;
+ }
+
+ // If we got to here... something went wrong ...
+ fprintf(stderr,
+ "%s: Could not parse input file, invalid video stream?\n", ProgName);
+ // Upon failure, delete the newly added track if it has been added
+ if (trackId != MP4_INVALID_TRACK_ID) {
+ MP4DeleteTrack(outputFileHandle, trackId);
+ }
+ return MP4_INVALID_TRACK_ID;
+}
+
+/ *
+ * LoadNextH263Object - service routine that reads a single frame from the input
+ * file. It shall fill the input buffer with data up until - and including - the
+ * next start code and shall report back both the number of bytes read and a
+ * pointer to the next start code. The first call to this function shall only
+ * yield a pointer with 0 data bytes and the last call to this function shall
+ * only yield data bytes with a NULL pointer as the next header.
+ *
+ * TODO: This function only supports valid bit streams. Upon error, it fails
+ * without the possibility to recover. A Better idea would be to skip frames
+ * until a parsable frame is read from the file.
+ *
+ * Parameters:
+ * inputFileHandle - The handle of the input file
+ * frameBuffer - buffer where to place read data
+ * frameBufferSize - in/out parameter indicating the size of the buffer on
+ * entry and the number of bytes copied to the buffer upon
+ * return
+ * additionalBytesNeeded - indicates how many additional bytes are to be read
+ * from the next frame's header (over the 3 bytes that
+ * are already read).
+ * NOTE: This number MUST be > 0
+ * ppNextHeader - output parameter that upon return points to the location
+ * of the next frame's head in the buffer
+ * Outputs:
+ * This function returns two pieces of information:
+ * 1. The total number of bytes read.
+ * 2. A Pointer to the header of the next frame. This pointer shall be NULL
+ * for the last frame read.
+ * /
+static int LoadNextH263Object( FILE *inputFileHandle,
+ u_int8_t *frameBuffer,
+ u_int32_t *frameBufferSize,
+ u_int32_t additionalBytesNeeded,
+ u_int8_t **ppNextHeader)
+{
+ // This table and the following loop implements a state machine enabling
+ // us to read bytes from the file untill (and inclusing) the requested
+ // start code (00 00 8X) is found
+ char row = 0;
+ u_int8_t *bufferStart = frameBuffer;
+ // The buffer end which will allow the loop to leave place for
+ // the additionalBytesNeeded
+ u_int8_t *bufferEnd = frameBuffer + *frameBufferSize -
+ additionalBytesNeeded - 1;
+
+ // Initialize the states array, if it hasn't been initialized yet...
+ if (!states[0][0]) {
+ // One 00 was read
+ states[0][0] = 1;
+ // Two sequential 0x00 ware read
+ states[1][0] = states[2][0] = 2;
+ // A full start code was read
+ states[2][128] = states[2][129] = states[2][130] = states[2][131] = -1;
+ }
+
+ // Read data from file into the output buffer until either a start code
+ // is found, or the end of file has been reached.
+ do {
+ if (fread(frameBuffer, 1, 1, inputFileHandle) != 1){
+ // EOF or other error before we got a start code
+ *ppNextHeader = NULL;
+ *frameBufferSize = frameBuffer - bufferStart;
+ return 1;
+ }
+ } while ((frameBuffer < bufferEnd) && // We have place in the buffer
+ ((row = states[row][*(frameBuffer++)]) != -1)); // Start code was not found
+ if (row != -1) {
+ fprintf(stderr, "%s: Buffer too small (%u)\n",
+ ProgName, bufferEnd - bufferStart + additionalBytesNeeded);
+ return 0;
+ }
+
+ // Cool ... now we have a start code
+ *ppNextHeader = frameBuffer - H263_STARTCODE_SIZE_BYTES;
+ *frameBufferSize = frameBuffer - bufferStart + additionalBytesNeeded;
+
+ // Now we just have to read the additionalBytesNeeded
+ if(fread(frameBuffer, additionalBytesNeeded, 1, inputFileHandle) != 1) {
+ /// We got a start code but can't read additionalBytesNeeded ... that's a fatal error
+ fprintf(stderr, "%s: Invalid H263 bitstream\n", ProgName);
+ return 0;
+ }
+
+ return 1;
+}
+
+
+/ *
+ * ParseShortHeader - service routine that accepts a buffer containing a frame
+ * header and extracts relevant codec information from it.
+ *
+ * NOTE: the first bit in the following commnets is 0 (zero).
+ *
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | PSC (Picture Start Code=22 bits) | (TR=8 bits) | >
+ * |0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0| |1 0>
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * < (PTYPE=13 bits) |
+ * <. . .|(FMT)|Z|. . . .|
+ * +-+-+-+-+-+-+-+-+-+-+-+
+ * -> PTYPE.FMT contains a width/height identification
+ * -> PTYPE.Z is 1 for P-Frames, 0 for I-Frames
+ * Note: When FMT is 111, there is an extended PTYPE...
+ *
+ * Inputs:
+ * headerBuffer - pointer to the current header buffer
+ * outputInfoStruct - pointer to the structure receiving the data
+ * Outputs:
+ * This function returns a structure of important codec-specific
+ * information (The Temporal Reference bits, width & height of the current
+ * frame and the sync - or "frame type" - bit. It reports success or
+ * failure to the calling function.
+ * /
+static bool ParseShortHeader( u_int8_t *headerBuffer,
+ H263INFO *outputInfoStruct)
+{
+ u_int8_t fmt = 0;
+ // Extract temporal reference (TR) from the buffer (bits 22-29 inclusive)
+ outputInfoStruct->tr = (headerBuffer[2] << 6) & 0xC0; // 2 LS bits out of the 3rd byte
+ outputInfoStruct->tr |= (headerBuffer[3] >> 2) & 0x3F; // 6 MS bits out of the 4th byte
+ // Extract the FMT part of PTYPE from the buffer (bits 35-37 inclusive)
+ fmt = (headerBuffer[4] >> 2) & 0x07; // bits 3-5 ouf of the 5th byte
+ // If PTYPE is not supported, return a failure notice to the calling function
+ // FIXME: PLUSPTYPE is not supported
+ if (fmt == 0x07) {
+ return false;
+ }
+ // If PTYPE is supported, calculate the current width and height according to
+ // a predefined table
+ if (!GetWidthAndHeight(fmt, &(outputInfoStruct->width),
+ &(outputInfoStruct->height))) {
+ return false;
+ }
+ // Extract the frame-type bit, which is the 9th bit of PTYPE (bit 38)
+ outputInfoStruct->isSyncFrame = !(headerBuffer[4] & 0x02);
+
+ return true;
+}
+
+/ *
+ * GetMaxBitrate- service routine that accepts frame information and
+ * derives bitrate information from it. This function uses a sliding window
+ * technique to calculate the maximum bitrates in any window of 1 second
+ * inside the file.
+ * The sliding window is implemented with a table of bitrates for the last
+ * second (30 entries - one entry per TR unit).
+ *
+ * Inputs:
+ * ctx - context for this function
+ * frameSize - the size of the current frame in bytes
+ * frameTRDiff - the "duration" of the frame in TR units
+ * Outputs:
+ * This function returns the up-to-date maximum bitrate
+ * /
+static void GetMaxBitrate( MaxBitrate_CTX *ctx,
+ u_int32_t frameSize,
+ u_int8_t frameTRDiff)
+{
+ if (frameTRDiff == 0)
+ return;
+
+ // Calculate the current frame's bitrate as bits per TR unit (round the result
+ // upwards)
+ u_int32_t frameBitrate = frameSize * 8 / frameTRDiff + 1;
+
+ // for each TRdiff received,
+ while (frameTRDiff--) {
+ // Subtract the oldest bitrate entry from the current bitrate
+ ctx->windowBitrate -= ctx->bitrateTable[ctx->tableIndex];
+ // Update the oldest bitrate entry with the current frame's bitrate
+ ctx->bitrateTable[ctx->tableIndex] = frameBitrate;
+ // Add the current frame's bitrate to the current bitrate
+ ctx->windowBitrate += frameBitrate;
+ // Check if we have a new maximum bitrate
+ if (ctx->windowBitrate > ctx->maxBitrate) {
+ ctx->maxBitrate = ctx->windowBitrate;
+ }
+ // Advance the table index
+ ctx->tableIndex = (ctx->tableIndex + 1) %
+ // Wrapping around the bitrateTable size
+ ( sizeof(ctx->bitrateTable) / sizeof(ctx->bitrateTable[0]) );
+ }
+}
+
+/ *
+ * CalculateDuration - service routine that calculates the current frame's
+ * duration in milli-seconds using it's duration in TR units.
+ * - In order not to accumulate the calculation error, we are using the TR
+ * duration to calculate the current and the next frame's presentation time in
+ * milli-seconds.
+ *
+ * Inputs:
+ * trDiff - The current frame's duration in TR units
+ * Outputs:
+ * The current frame's duration in milli-seconds
+ * /
+static MP4Duration CalculateDuration(u_int8_t trDiff)
+{
+ static u_int32_t const nextTR = 0; // The next frame's presentation time in TR units
+ static MP4Duration const currentPT = 0; // The current frame's presentation time in milli-seconds
+ MP4Duration nextPT; // The next frame's presentation time in milli-seconds
+ MP4Duration duration; // The current frame's duration in milli-seconds
+
+ nextTR += trDiff;
+ // Calculate the next frame's presentation time, in milli-seconds
+ nextPT = (nextTR * 1001) / H263_BASIC_FRAME_RATE;
+ // The frame's duration is the difference between the next presentation
+ // time and the current presentation time.
+ duration = nextPT - currentPT;
+ // "Remember" the next presentation time for the next time this function is
+ // called
+ currentPT = nextPT;
+
+ return duration;
+}
+
+static bool GetWidthAndHeight( u_int8_t fmt,
+ u_int16_t *width,
+ u_int16_t *height)
+{
+ // The 'fmt' corresponds to bits 5-7 of the PTYPE
+ static struct {
+ u_int16_t width;
+ u_int16_t height;
+ } const dimensionsTable[8] = {
+ { 0, 0 }, // 000 - 0 - forbidden, generates an error
+ { 128, 96 }, // 001 - 1 - Sub QCIF
+ { 176, 144 }, // 010 - 2 - QCIF
+ { 352, 288 }, // 011 - 3 - CIF
+ { 704, 576 }, // 100 - 4 - 4CIF
+ { 1409, 1152 }, // 101 - 5 - 16CIF
+ { 0, 0 }, // 110 - 6 - reserved, generates an error
+ { 0, 0 } // 111 - 7 - extended, not supported by profile 0
+ };
+
+ if (fmt > 7)
+ return false;
+
+ *width = dimensionsTable[fmt].width;
+ *height = dimensionsTable[fmt].height;
+
+ if (*width == 0)
+ return false;
+
+ return true;
+}
+
+static u_int8_t GetTRDifference(u_int8_t nextTR,
+ u_int8_t currentTR)
+{
+ if (currentTR > nextTR) {
+ // Wrap around 255...
+ return nextTR + (256 - currentTR);
+ } else {
+ return nextTR - currentTR;
+ }
+}
+
+*/
+
diff --git a/liveMedia/H263plusVideoStreamParser.hh b/liveMedia/H263plusVideoStreamParser.hh
new file mode 100644
index 0000000..3dd44df
--- /dev/null
+++ b/liveMedia/H263plusVideoStreamParser.hh
@@ -0,0 +1,127 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an H263 video stream into frames.
+// derived from MPEG4IP h263.c
+// Author Benhard Feiten
+
+#ifndef _H263PLUS_VIDEO_STREAM_PARSER_HH
+#define _H263PLUS_VIDEO_STREAM_PARSER_HH
+
+#ifndef _STREAM_PARSER_HH
+#include "StreamParser.hh"
+#endif
+
+
+// Default timescale for H.263 (1000ms)
+#define H263_TIMESCALE 1000
+
+// Default H263 frame rate (30fps)
+#define H263_BASIC_FRAME_RATE 30
+
+// Minimum number of bytes needed to parse an H263 header
+#define H263_REQUIRE_HEADER_SIZE_BYTES 5
+
+// Number of bytes the start code requries
+#define H263_STARTCODE_SIZE_BYTES 3
+
+// This is the input buffer's size. It should contain
+// 1 frame with the following start code
+#define H263_BUFFER_SIZE 256 * 1024
+
+// additionalBytesNeeded - indicates how many additional bytes are to be read
+// from the next frame's header (over the 3 bytes that are already read).
+#define ADDITIONAL_BYTES_NEEDED H263_REQUIRE_HEADER_SIZE_BYTES - H263_STARTCODE_SIZE_BYTES
+
+// The default max different (in %) betwqeen max and average bitrates
+#define H263_DEFAULT_CBR_TOLERANCE 10
+
+
+
+// The following structure holds information extracted from each frame's header:
+typedef struct _H263INFO {
+ u_int8_t tr; // Temporal Reference, used in duration calculation
+ u_int16_t width; // Width of the picture
+ u_int16_t height; // Height of the picture
+ bool isSyncFrame; // Frame type (true = I frame = "sync" frame)
+} H263INFO;
+
+typedef struct _MaxBitrate_CTX {
+ u_int32_t bitrateTable[H263_BASIC_FRAME_RATE];// Window of 1 second
+ u_int32_t windowBitrate; // The bitrate of the current window
+ u_int32_t maxBitrate; // The up-to-date maximum bitrate
+ u_int32_t tableIndex; // The next TR unit to update
+} MaxBitrate_CTX;
+
+
+class H263plusVideoStreamParser : public StreamParser {
+
+public:
+ H263plusVideoStreamParser( class H263plusVideoStreamFramer* usingSource,
+ FramedSource* inputSource);
+
+ virtual ~H263plusVideoStreamParser();
+
+ void registerReadInterest(unsigned char* to, unsigned maxSize);
+
+ unsigned parse(u_int64_t & currentDuration); // returns the size of the frame that was acquired, or 0 if none
+ unsigned numTruncatedBytes() const { return fNumTruncatedBytes; } // The number of truncated bytes (if any)
+
+
+protected:
+// H263plusVideoStreamFramer* usingSource() {
+// return (H263plusVideoStreamFramer*)fUsingSource;
+// }
+ void setParseState();
+
+// void setParseState(H263plusParseState parseState);
+
+
+private:
+ int parseH263Frame( );
+ bool ParseShortHeader(u_int8_t *headerBuffer, H263INFO *outputInfoStruct);
+ void GetMaxBitrate( MaxBitrate_CTX *ctx, u_int32_t frameSize, u_int8_t frameTRDiff);
+ u_int64_t CalculateDuration(u_int8_t trDiff);
+ bool GetWidthAndHeight( u_int8_t fmt, u_int16_t *width, u_int16_t *height);
+ u_int8_t GetTRDifference( u_int8_t nextTR, u_int8_t currentTR);
+
+ virtual void restoreSavedParserState();
+
+protected:
+ class H263plusVideoStreamFramer* fUsingSource;
+
+ unsigned char* fTo;
+ unsigned fMaxSize;
+ unsigned char* fStartOfFrame;
+ unsigned char* fSavedTo;
+ unsigned char* fLimit;
+ unsigned fNumTruncatedBytes;
+ unsigned fSavedNumTruncatedBytes;
+
+private:
+ H263INFO fNextInfo; // Holds information about the next frame
+ H263INFO fCurrentInfo; // Holds information about the current frame
+ MaxBitrate_CTX fMaxBitrateCtx; // Context for the GetMaxBitrate function
+ char fStates[3][256];
+ u_int8_t fNextHeader[H263_REQUIRE_HEADER_SIZE_BYTES];
+
+ u_int32_t fnextTR; // The next frame's presentation time in TR units
+ u_int64_t fcurrentPT; // The current frame's presentation time in milli-seconds
+
+};
+
+#endif
diff --git a/liveMedia/H264VideoFileServerMediaSubsession.cpp b/liveMedia/H264VideoFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..cd56af1
--- /dev/null
+++ b/liveMedia/H264VideoFileServerMediaSubsession.cpp
@@ -0,0 +1,120 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a H264 video file.
+// Implementation
+
+#include "H264VideoFileServerMediaSubsession.hh"
+#include "H264VideoRTPSink.hh"
+#include "ByteStreamFileSource.hh"
+#include "H264VideoStreamFramer.hh"
+
+H264VideoFileServerMediaSubsession*
+H264VideoFileServerMediaSubsession::createNew(UsageEnvironment& env,
+ char const* fileName,
+ Boolean reuseFirstSource) {
+ return new H264VideoFileServerMediaSubsession(env, fileName, reuseFirstSource);
+}
+
+H264VideoFileServerMediaSubsession::H264VideoFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource),
+ fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) {
+}
+
+H264VideoFileServerMediaSubsession::~H264VideoFileServerMediaSubsession() {
+ delete[] fAuxSDPLine;
+}
+
+static void afterPlayingDummy(void* clientData) {
+ H264VideoFileServerMediaSubsession* subsess = (H264VideoFileServerMediaSubsession*)clientData;
+ subsess->afterPlayingDummy1();
+}
+
+void H264VideoFileServerMediaSubsession::afterPlayingDummy1() {
+ // Unschedule any pending 'checking' task:
+ envir().taskScheduler().unscheduleDelayedTask(nextTask());
+ // Signal the event loop that we're done:
+ setDoneFlag();
+}
+
+static void checkForAuxSDPLine(void* clientData) {
+ H264VideoFileServerMediaSubsession* subsess = (H264VideoFileServerMediaSubsession*)clientData;
+ subsess->checkForAuxSDPLine1();
+}
+
+void H264VideoFileServerMediaSubsession::checkForAuxSDPLine1() {
+ nextTask() = NULL;
+
+ char const* dasl;
+ if (fAuxSDPLine != NULL) {
+ // Signal the event loop that we're done:
+ setDoneFlag();
+ } else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) {
+ fAuxSDPLine = strDup(dasl);
+ fDummyRTPSink = NULL;
+
+ // Signal the event loop that we're done:
+ setDoneFlag();
+ } else if (!fDoneFlag) {
+ // try again after a brief delay:
+ int uSecsToDelay = 100000; // 100 ms
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay,
+ (TaskFunc*)checkForAuxSDPLine, this);
+ }
+}
+
+char const* H264VideoFileServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) {
+ if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client)
+
+ if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream
+ // Note: For H264 video files, the 'config' information ("profile-level-id" and "sprop-parameter-sets") isn't known
+ // until we start reading the file. This means that "rtpSink"s "auxSDPLine()" will be NULL initially,
+ // and we need to start reading data from our file until this changes.
+ fDummyRTPSink = rtpSink;
+
+ // Start reading the file:
+ fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this);
+
+ // Check whether the sink's 'auxSDPLine()' is ready:
+ checkForAuxSDPLine(this);
+ }
+
+ envir().taskScheduler().doEventLoop(&fDoneFlag);
+
+ return fAuxSDPLine;
+}
+
+FramedSource* H264VideoFileServerMediaSubsession::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
+ estBitrate = 500; // kbps, estimate
+
+ // Create the video source:
+ ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName);
+ if (fileSource == NULL) return NULL;
+ fFileSize = fileSource->fileSize();
+
+ // Create a framer for the Video Elementary Stream:
+ return H264VideoStreamFramer::createNew(envir(), fileSource);
+}
+
+RTPSink* H264VideoFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* /*inputSource*/) {
+ return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+}
diff --git a/liveMedia/H264VideoFileSink.cpp b/liveMedia/H264VideoFileSink.cpp
new file mode 100644
index 0000000..5af2870
--- /dev/null
+++ b/liveMedia/H264VideoFileSink.cpp
@@ -0,0 +1,59 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.264 Video File sinks
+// Implementation
+
+#include "H264VideoFileSink.hh"
+#include "OutputFile.hh"
+
+////////// H264VideoFileSink //////////
+
+H264VideoFileSink
+::H264VideoFileSink(UsageEnvironment& env, FILE* fid,
+ char const* sPropParameterSetsStr,
+ unsigned bufferSize, char const* perFrameFileNamePrefix)
+ : H264or5VideoFileSink(env, fid, bufferSize, perFrameFileNamePrefix,
+ sPropParameterSetsStr, NULL, NULL) {
+}
+
+H264VideoFileSink::~H264VideoFileSink() {
+}
+
+H264VideoFileSink*
+H264VideoFileSink::createNew(UsageEnvironment& env, char const* fileName,
+ char const* sPropParameterSetsStr,
+ unsigned bufferSize, Boolean oneFilePerFrame) {
+ do {
+ FILE* fid;
+ char const* perFrameFileNamePrefix;
+ if (oneFilePerFrame) {
+ // Create the fid for each frame
+ fid = NULL;
+ perFrameFileNamePrefix = fileName;
+ } else {
+ // Normal case: create the fid once
+ fid = OpenOutputFile(env, fileName);
+ if (fid == NULL) break;
+ perFrameFileNamePrefix = NULL;
+ }
+
+ return new H264VideoFileSink(env, fid, sPropParameterSetsStr, bufferSize, perFrameFileNamePrefix);
+ } while (0);
+
+ return NULL;
+}
diff --git a/liveMedia/H264VideoRTPSink.cpp b/liveMedia/H264VideoRTPSink.cpp
new file mode 100644
index 0000000..82fb90b
--- /dev/null
+++ b/liveMedia/H264VideoRTPSink.cpp
@@ -0,0 +1,131 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for H.264 video (RFC 3984)
+// Implementation
+
+#include "H264VideoRTPSink.hh"
+#include "H264VideoStreamFramer.hh"
+#include "Base64.hh"
+#include "H264VideoRTPSource.hh" // for "parseSPropParameterSets()"
+
+////////// H264VideoRTPSink implementation //////////
+
+H264VideoRTPSink
+::H264VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize)
+ : H264or5VideoRTPSink(264, env, RTPgs, rtpPayloadFormat,
+ NULL, 0, sps, spsSize, pps, ppsSize) {
+}
+
+H264VideoRTPSink::~H264VideoRTPSink() {
+}
+
+H264VideoRTPSink* H264VideoRTPSink
+::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) {
+ return new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat);
+}
+
+H264VideoRTPSink* H264VideoRTPSink
+::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize) {
+ return new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat, sps, spsSize, pps, ppsSize);
+}
+
+H264VideoRTPSink* H264VideoRTPSink
+::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ char const* sPropParameterSetsStr) {
+ u_int8_t* sps = NULL; unsigned spsSize = 0;
+ u_int8_t* pps = NULL; unsigned ppsSize = 0;
+
+ unsigned numSPropRecords;
+ SPropRecord* sPropRecords = parseSPropParameterSets(sPropParameterSetsStr, numSPropRecords);
+ for (unsigned i = 0; i < numSPropRecords; ++i) {
+ if (sPropRecords[i].sPropLength == 0) continue; // bad data
+ u_int8_t nal_unit_type = (sPropRecords[i].sPropBytes[0])&0x1F;
+ if (nal_unit_type == 7/*SPS*/) {
+ sps = sPropRecords[i].sPropBytes;
+ spsSize = sPropRecords[i].sPropLength;
+ } else if (nal_unit_type == 8/*PPS*/) {
+ pps = sPropRecords[i].sPropBytes;
+ ppsSize = sPropRecords[i].sPropLength;
+ }
+ }
+
+ H264VideoRTPSink* result
+ = new H264VideoRTPSink(env, RTPgs, rtpPayloadFormat, sps, spsSize, pps, ppsSize);
+ delete[] sPropRecords;
+
+ return result;
+}
+
+Boolean H264VideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
+ // Our source must be an appropriate framer:
+ return source.isH264VideoStreamFramer();
+}
+
+char const* H264VideoRTPSink::auxSDPLine() {
+ // Generate a new "a=fmtp:" line each time, using our SPS and PPS (if we have them),
+ // otherwise parameters from our framer source (in case they've changed since the last time that
+ // we were called):
+ H264or5VideoStreamFramer* framerSource = NULL;
+ u_int8_t* vpsDummy = NULL; unsigned vpsDummySize = 0;
+ u_int8_t* sps = fSPS; unsigned spsSize = fSPSSize;
+ u_int8_t* pps = fPPS; unsigned ppsSize = fPPSSize;
+ if (sps == NULL || pps == NULL) {
+ // We need to get SPS and PPS from our framer source:
+ if (fOurFragmenter == NULL) return NULL; // we don't yet have a fragmenter (and therefore not a source)
+ framerSource = (H264or5VideoStreamFramer*)(fOurFragmenter->inputSource());
+ if (framerSource == NULL) return NULL; // we don't yet have a source
+
+ framerSource->getVPSandSPSandPPS(vpsDummy, vpsDummySize, sps, spsSize, pps, ppsSize);
+ if (sps == NULL || pps == NULL) return NULL; // our source isn't ready
+ }
+
+ // Set up the "a=fmtp:" SDP line for this stream:
+ u_int8_t* spsWEB = new u_int8_t[spsSize]; // "WEB" means "Without Emulation Bytes"
+ unsigned spsWEBSize = removeH264or5EmulationBytes(spsWEB, spsSize, sps, spsSize);
+ if (spsWEBSize < 4) { // Bad SPS size => assume our source isn't ready
+ delete[] spsWEB;
+ return NULL;
+ }
+ u_int32_t profileLevelId = (spsWEB[1]<<16) | (spsWEB[2]<<8) | spsWEB[3];
+ delete[] spsWEB;
+
+ char* sps_base64 = base64Encode((char*)sps, spsSize);
+ char* pps_base64 = base64Encode((char*)pps, ppsSize);
+
+ char const* fmtpFmt =
+ "a=fmtp:%d packetization-mode=1"
+ ";profile-level-id=%06X"
+ ";sprop-parameter-sets=%s,%s\r\n";
+ unsigned fmtpFmtSize = strlen(fmtpFmt)
+ + 3 /* max char len */
+ + 6 /* 3 bytes in hex */
+ + strlen(sps_base64) + strlen(pps_base64);
+ char* fmtp = new char[fmtpFmtSize];
+ sprintf(fmtp, fmtpFmt,
+ rtpPayloadType(),
+ profileLevelId,
+ sps_base64, pps_base64);
+
+ delete[] sps_base64;
+ delete[] pps_base64;
+
+ delete[] fFmtpSDPLine; fFmtpSDPLine = fmtp;
+ return fFmtpSDPLine;
+}
diff --git a/liveMedia/H264VideoRTPSource.cpp b/liveMedia/H264VideoRTPSource.cpp
new file mode 100644
index 0000000..787b109
--- /dev/null
+++ b/liveMedia/H264VideoRTPSource.cpp
@@ -0,0 +1,199 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.264 Video RTP Sources
+// Implementation
+
+#include "H264VideoRTPSource.hh"
+#include "Base64.hh"
+
+////////// H264BufferedPacket and H264BufferedPacketFactory //////////
+
+class H264BufferedPacket: public BufferedPacket {
+public:
+ H264BufferedPacket(H264VideoRTPSource& ourSource);
+ virtual ~H264BufferedPacket();
+
+private: // redefined virtual functions
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+private:
+ H264VideoRTPSource& fOurSource;
+};
+
+class H264BufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+
+///////// H264VideoRTPSource implementation ////////
+
+H264VideoRTPSource*
+H264VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new H264VideoRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+H264VideoRTPSource
+::H264VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency,
+ new H264BufferedPacketFactory) {
+}
+
+H264VideoRTPSource::~H264VideoRTPSource() {
+}
+
+Boolean H264VideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+ unsigned numBytesToSkip;
+
+ // Check the 'nal_unit_type' for special 'aggregation' or 'fragmentation' packets:
+ if (packetSize < 1) return False;
+ fCurPacketNALUnitType = (headerStart[0]&0x1F);
+ switch (fCurPacketNALUnitType) {
+ case 24: { // STAP-A
+ numBytesToSkip = 1; // discard the type byte
+ break;
+ }
+ case 25: case 26: case 27: { // STAP-B, MTAP16, or MTAP24
+ numBytesToSkip = 3; // discard the type byte, and the initial DON
+ break;
+ }
+ case 28: case 29: { // // FU-A or FU-B
+ // For these NALUs, the first two bytes are the FU indicator and the FU header.
+ // If the start bit is set, we reconstruct the original NAL header into byte 1:
+ if (packetSize < 2) return False;
+ unsigned char startBit = headerStart[1]&0x80;
+ unsigned char endBit = headerStart[1]&0x40;
+ if (startBit) {
+ fCurrentPacketBeginsFrame = True;
+
+ headerStart[1] = (headerStart[0]&0xE0)|(headerStart[1]&0x1F);
+ numBytesToSkip = 1;
+ } else {
+ // The start bit is not set, so we skip both the FU indicator and header:
+ fCurrentPacketBeginsFrame = False;
+ numBytesToSkip = 2;
+ }
+ fCurrentPacketCompletesFrame = (endBit != 0);
+ break;
+ }
+ default: {
+ // This packet contains one complete NAL unit:
+ fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame = True;
+ numBytesToSkip = 0;
+ break;
+ }
+ }
+
+ resultSpecialHeaderSize = numBytesToSkip;
+ return True;
+}
+
+char const* H264VideoRTPSource::MIMEtype() const {
+ return "video/H264";
+}
+
+SPropRecord* parseSPropParameterSets(char const* sPropParameterSetsStr,
+ // result parameter:
+ unsigned& numSPropRecords) {
+ // Make a copy of the input string, so we can replace the commas with '\0's:
+ char* inStr = strDup(sPropParameterSetsStr);
+ if (inStr == NULL) {
+ numSPropRecords = 0;
+ return NULL;
+ }
+
+ // Count the number of commas (and thus the number of parameter sets):
+ numSPropRecords = 1;
+ char* s;
+ for (s = inStr; *s != '\0'; ++s) {
+ if (*s == ',') {
+ ++numSPropRecords;
+ *s = '\0';
+ }
+ }
+
+ // Allocate and fill in the result array:
+ SPropRecord* resultArray = new SPropRecord[numSPropRecords];
+ s = inStr;
+ for (unsigned i = 0; i < numSPropRecords; ++i) {
+ resultArray[i].sPropBytes = base64Decode(s, resultArray[i].sPropLength);
+ s += strlen(s) + 1;
+ }
+
+ delete[] inStr;
+ return resultArray;
+}
+
+
+////////// H264BufferedPacket and H264BufferedPacketFactory implementation //////////
+
+H264BufferedPacket::H264BufferedPacket(H264VideoRTPSource& ourSource)
+ : fOurSource(ourSource) {
+}
+
+H264BufferedPacket::~H264BufferedPacket() {
+}
+
+unsigned H264BufferedPacket
+::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
+ unsigned resultNALUSize = 0; // if an error occurs
+
+ switch (fOurSource.fCurPacketNALUnitType) {
+ case 24: case 25: { // STAP-A or STAP-B
+ // The first two bytes are NALU size:
+ if (dataSize < 2) break;
+ resultNALUSize = (framePtr[0]<<8)|framePtr[1];
+ framePtr += 2;
+ break;
+ }
+ case 26: { // MTAP16
+ // The first two bytes are NALU size. The next three are the DOND and TS offset:
+ if (dataSize < 5) break;
+ resultNALUSize = (framePtr[0]<<8)|framePtr[1];
+ framePtr += 5;
+ break;
+ }
+ case 27: { // MTAP24
+ // The first two bytes are NALU size. The next four are the DOND and TS offset:
+ if (dataSize < 6) break;
+ resultNALUSize = (framePtr[0]<<8)|framePtr[1];
+ framePtr += 6;
+ break;
+ }
+ default: {
+ // Common case: We use the entire packet data:
+ return dataSize;
+ }
+ }
+
+ return (resultNALUSize <= dataSize) ? resultNALUSize : dataSize;
+}
+
+BufferedPacket* H264BufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* ourSource) {
+ return new H264BufferedPacket((H264VideoRTPSource&)(*ourSource));
+}
diff --git a/liveMedia/H264VideoStreamDiscreteFramer.cpp b/liveMedia/H264VideoStreamDiscreteFramer.cpp
new file mode 100644
index 0000000..90e62dd
--- /dev/null
+++ b/liveMedia/H264VideoStreamDiscreteFramer.cpp
@@ -0,0 +1,46 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simplified version of "H264VideoStreamFramer" that takes only complete,
+// discrete frames (rather than an arbitrary byte stream) as input.
+// This avoids the parsing and data copying overhead of the full
+// "H264VideoStreamFramer".
+// Implementation
+
+#include "H264VideoStreamDiscreteFramer.hh"
+
+H264VideoStreamDiscreteFramer*
+H264VideoStreamDiscreteFramer
+::createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters) {
+ return new H264VideoStreamDiscreteFramer(env, inputSource,
+ includeStartCodeInOutput, insertAccessUnitDelimiters);
+}
+
+H264VideoStreamDiscreteFramer
+::H264VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters)
+ : H264or5VideoStreamDiscreteFramer(264, env, inputSource,
+ includeStartCodeInOutput, insertAccessUnitDelimiters) {
+}
+
+H264VideoStreamDiscreteFramer::~H264VideoStreamDiscreteFramer() {
+}
+
+Boolean H264VideoStreamDiscreteFramer::isH264VideoStreamFramer() const {
+ return True;
+}
diff --git a/liveMedia/H264VideoStreamFramer.cpp b/liveMedia/H264VideoStreamFramer.cpp
new file mode 100644
index 0000000..5ed3cc9
--- /dev/null
+++ b/liveMedia/H264VideoStreamFramer.cpp
@@ -0,0 +1,42 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up a H.264 Video Elementary Stream into NAL units.
+// Implementation
+
+#include "H264VideoStreamFramer.hh"
+
+H264VideoStreamFramer* H264VideoStreamFramer
+::createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters) {
+ return new H264VideoStreamFramer(env, inputSource, True,
+ includeStartCodeInOutput, insertAccessUnitDelimiters);
+}
+
+H264VideoStreamFramer
+::H264VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean createParser,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters)
+ : H264or5VideoStreamFramer(264, env, inputSource, createParser,
+ includeStartCodeInOutput, insertAccessUnitDelimiters) {
+}
+
+H264VideoStreamFramer::~H264VideoStreamFramer() {
+}
+
+Boolean H264VideoStreamFramer::isH264VideoStreamFramer() const {
+ return True;
+}
diff --git a/liveMedia/H264or5VideoFileSink.cpp b/liveMedia/H264or5VideoFileSink.cpp
new file mode 100644
index 0000000..082285a
--- /dev/null
+++ b/liveMedia/H264or5VideoFileSink.cpp
@@ -0,0 +1,66 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.264 or H.265 Video File sinks
+// Implementation
+
+#include "H264or5VideoFileSink.hh"
+#include "H264VideoRTPSource.hh" // for "parseSPropParameterSets()"
+
+////////// H264or5VideoFileSink //////////
+
+H264or5VideoFileSink
+::H264or5VideoFileSink(UsageEnvironment& env, FILE* fid,
+ unsigned bufferSize, char const* perFrameFileNamePrefix,
+ char const* sPropParameterSetsStr1,
+ char const* sPropParameterSetsStr2,
+ char const* sPropParameterSetsStr3)
+ : FileSink(env, fid, bufferSize, perFrameFileNamePrefix),
+ fHaveWrittenFirstFrame(False) {
+ fSPropParameterSetsStr[0] = strDup(sPropParameterSetsStr1);
+ fSPropParameterSetsStr[1] = strDup(sPropParameterSetsStr2);
+ fSPropParameterSetsStr[2] = strDup(sPropParameterSetsStr3);
+}
+
+H264or5VideoFileSink::~H264or5VideoFileSink() {
+ for (unsigned j = 0; j < 3; ++j) delete[] (char*)fSPropParameterSetsStr[j];
+}
+
+void H264or5VideoFileSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime) {
+ unsigned char const start_code[4] = {0x00, 0x00, 0x00, 0x01};
+
+ if (!fHaveWrittenFirstFrame) {
+ // If we have NAL units encoded in "sprop parameter strings", prepend these to the file:
+ for (unsigned j = 0; j < 3; ++j) {
+ unsigned numSPropRecords;
+ SPropRecord* sPropRecords
+ = parseSPropParameterSets(fSPropParameterSetsStr[j], numSPropRecords);
+ for (unsigned i = 0; i < numSPropRecords; ++i) {
+ if (sPropRecords[i].sPropLength > 0) addData(start_code, 4, presentationTime);
+ addData(sPropRecords[i].sPropBytes, sPropRecords[i].sPropLength, presentationTime);
+ }
+ delete[] sPropRecords;
+ }
+ fHaveWrittenFirstFrame = True; // for next time
+ }
+
+ // Write the input data to the file, with the start code in front:
+ addData(start_code, 4, presentationTime);
+
+ // Call the parent class to complete the normal file write with the input data:
+ FileSink::afterGettingFrame(frameSize, numTruncatedBytes, presentationTime);
+}
diff --git a/liveMedia/H264or5VideoRTPSink.cpp b/liveMedia/H264or5VideoRTPSink.cpp
new file mode 100644
index 0000000..c5568b3
--- /dev/null
+++ b/liveMedia/H264or5VideoRTPSink.cpp
@@ -0,0 +1,299 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for H.264 or H.265 video
+// Implementation
+
+#include "H264or5VideoRTPSink.hh"
+#include "H264or5VideoStreamFramer.hh"
+
+////////// H264or5Fragmenter definition //////////
+
+// Because of the ideosyncracies of the H.264 RTP payload format, we implement
+// "H264or5VideoRTPSink" using a separate "H264or5Fragmenter" class that delivers,
+// to the "H264or5VideoRTPSink", only fragments that will fit within an outgoing
+// RTP packet. I.e., we implement fragmentation in this separate "H264or5Fragmenter"
+// class, rather than in "H264or5VideoRTPSink".
+// (Note: This class should be used only by "H264or5VideoRTPSink", or a subclass.)
+
+class H264or5Fragmenter: public FramedFilter {
+public:
+ H264or5Fragmenter(int hNumber, UsageEnvironment& env, FramedSource* inputSource,
+ unsigned inputBufferMax, unsigned maxOutputPacketSize);
+ virtual ~H264or5Fragmenter();
+
+ Boolean lastFragmentCompletedNALUnit() const { return fLastFragmentCompletedNALUnit; }
+
+private: // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void reset();
+
+private:
+ int fHNumber;
+ unsigned fInputBufferSize;
+ unsigned fMaxOutputPacketSize;
+ unsigned char* fInputBuffer;
+ unsigned fNumValidDataBytes;
+ unsigned fCurDataOffset;
+ unsigned fSaveNumTruncatedBytes;
+ Boolean fLastFragmentCompletedNALUnit;
+};
+
+
+////////// H264or5VideoRTPSink implementation //////////
+
+H264or5VideoRTPSink
+::H264or5VideoRTPSink(int hNumber,
+ UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int8_t const* vps, unsigned vpsSize,
+ u_int8_t const* sps, unsigned spsSize,
+ u_int8_t const* pps, unsigned ppsSize)
+ : VideoRTPSink(env, RTPgs, rtpPayloadFormat, 90000, hNumber == 264 ? "H264" : "H265"),
+ fHNumber(hNumber), fOurFragmenter(NULL), fFmtpSDPLine(NULL) {
+ if (vps != NULL) {
+ fVPSSize = vpsSize;
+ fVPS = new u_int8_t[fVPSSize];
+ memmove(fVPS, vps, fVPSSize);
+ } else {
+ fVPSSize = 0;
+ fVPS = NULL;
+ }
+ if (sps != NULL) {
+ fSPSSize = spsSize;
+ fSPS = new u_int8_t[fSPSSize];
+ memmove(fSPS, sps, fSPSSize);
+ } else {
+ fSPSSize = 0;
+ fSPS = NULL;
+ }
+ if (pps != NULL) {
+ fPPSSize = ppsSize;
+ fPPS = new u_int8_t[fPPSSize];
+ memmove(fPPS, pps, fPPSSize);
+ } else {
+ fPPSSize = 0;
+ fPPS = NULL;
+ }
+}
+
+H264or5VideoRTPSink::~H264or5VideoRTPSink() {
+ fSource = fOurFragmenter; // hack: in case "fSource" had gotten set to NULL before we were called
+ delete[] fFmtpSDPLine;
+ delete[] fVPS; delete[] fSPS; delete[] fPPS;
+ stopPlaying(); // call this now, because we won't have our 'fragmenter' when the base class destructor calls it later.
+
+ // Close our 'fragmenter' as well:
+ Medium::close(fOurFragmenter);
+ fSource = NULL; // for the base class destructor, which gets called next
+}
+
+Boolean H264or5VideoRTPSink::continuePlaying() {
+ // First, check whether we have a 'fragmenter' class set up yet.
+ // If not, create it now:
+ if (fOurFragmenter == NULL) {
+ fOurFragmenter = new H264or5Fragmenter(fHNumber, envir(), fSource, OutPacketBuffer::maxSize,
+ ourMaxPacketSize() - 12/*RTP hdr size*/);
+ } else {
+ fOurFragmenter->reassignInputSource(fSource);
+ }
+ fSource = fOurFragmenter;
+
+ // Then call the parent class's implementation:
+ return MultiFramedRTPSink::continuePlaying();
+}
+
+void H264or5VideoRTPSink::doSpecialFrameHandling(unsigned /*fragmentationOffset*/,
+ unsigned char* /*frameStart*/,
+ unsigned /*numBytesInFrame*/,
+ struct timeval framePresentationTime,
+ unsigned /*numRemainingBytes*/) {
+ // Set the RTP 'M' (marker) bit iff
+ // 1/ The most recently delivered fragment was the end of (or the only fragment of) an NAL unit, and
+ // 2/ This NAL unit was the last NAL unit of an 'access unit' (i.e. video frame).
+ if (fOurFragmenter != NULL) {
+ H264or5VideoStreamFramer* framerSource
+ = (H264or5VideoStreamFramer*)(fOurFragmenter->inputSource());
+ // This relies on our fragmenter's source being a "H264or5VideoStreamFramer".
+ if (((H264or5Fragmenter*)fOurFragmenter)->lastFragmentCompletedNALUnit()
+ && framerSource != NULL && framerSource->pictureEndMarker()) {
+ setMarkerBit();
+ framerSource->pictureEndMarker() = False;
+ }
+ }
+
+ setTimestamp(framePresentationTime);
+}
+
+Boolean H264or5VideoRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ return False;
+}
+
+
+////////// H264or5Fragmenter implementation //////////
+
+H264or5Fragmenter::H264or5Fragmenter(int hNumber,
+ UsageEnvironment& env, FramedSource* inputSource,
+ unsigned inputBufferMax, unsigned maxOutputPacketSize)
+ : FramedFilter(env, inputSource),
+ fHNumber(hNumber),
+ fInputBufferSize(inputBufferMax+1), fMaxOutputPacketSize(maxOutputPacketSize) {
+ fInputBuffer = new unsigned char[fInputBufferSize];
+ reset();
+}
+
+H264or5Fragmenter::~H264or5Fragmenter() {
+ delete[] fInputBuffer;
+ detachInputSource(); // so that the subsequent ~FramedFilter() doesn't delete it
+}
+
+void H264or5Fragmenter::doGetNextFrame() {
+ if (fNumValidDataBytes == 1) {
+ // We have no NAL unit data currently in the buffer. Read a new one:
+ fInputSource->getNextFrame(&fInputBuffer[1], fInputBufferSize - 1,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+ } else {
+ // We have NAL unit data in the buffer. There are three cases to consider:
+ // 1. There is a new NAL unit in the buffer, and it's small enough to deliver
+ // to the RTP sink (as is).
+ // 2. There is a new NAL unit in the buffer, but it's too large to deliver to
+ // the RTP sink in its entirety. Deliver the first fragment of this data,
+ // as a FU packet, with one extra preceding header byte (for the "FU header").
+ // 3. There is a NAL unit in the buffer, and we've already delivered some
+ // fragment(s) of this. Deliver the next fragment of this data,
+ // as a FU packet, with two (H.264) or three (H.265) extra preceding header bytes
+ // (for the "NAL header" and the "FU header").
+
+ if (fMaxSize < fMaxOutputPacketSize) { // shouldn't happen
+ envir() << "H264or5Fragmenter::doGetNextFrame(): fMaxSize ("
+ << fMaxSize << ") is smaller than expected\n";
+ } else {
+ fMaxSize = fMaxOutputPacketSize;
+ }
+
+ fLastFragmentCompletedNALUnit = True; // by default
+ if (fCurDataOffset == 1) { // case 1 or 2
+ if (fNumValidDataBytes - 1 <= fMaxSize) { // case 1
+ memmove(fTo, &fInputBuffer[1], fNumValidDataBytes - 1);
+ fFrameSize = fNumValidDataBytes - 1;
+ fCurDataOffset = fNumValidDataBytes;
+ } else { // case 2
+ // We need to send the NAL unit data as FU packets. Deliver the first
+ // packet now. Note that we add "NAL header" and "FU header" bytes to the front
+ // of the packet (overwriting the existing "NAL header").
+ if (fHNumber == 264) {
+ fInputBuffer[0] = (fInputBuffer[1] & 0xE0) | 28; // FU indicator
+ fInputBuffer[1] = 0x80 | (fInputBuffer[1] & 0x1F); // FU header (with S bit)
+ } else { // 265
+ u_int8_t nal_unit_type = (fInputBuffer[1]&0x7E)>>1;
+ fInputBuffer[0] = (fInputBuffer[1] & 0x81) | (49<<1); // Payload header (1st byte)
+ fInputBuffer[1] = fInputBuffer[2]; // Payload header (2nd byte)
+ fInputBuffer[2] = 0x80 | nal_unit_type; // FU header (with S bit)
+ }
+ memmove(fTo, fInputBuffer, fMaxSize);
+ fFrameSize = fMaxSize;
+ fCurDataOffset += fMaxSize - 1;
+ fLastFragmentCompletedNALUnit = False;
+ }
+ } else { // case 3
+ // We are sending this NAL unit data as FU packets. We've already sent the
+ // first packet (fragment). Now, send the next fragment. Note that we add
+ // "NAL header" and "FU header" bytes to the front. (We reuse these bytes that
+ // we already sent for the first fragment, but clear the S bit, and add the E
+ // bit if this is the last fragment.)
+ unsigned numExtraHeaderBytes;
+ if (fHNumber == 264) {
+ fInputBuffer[fCurDataOffset-2] = fInputBuffer[0]; // FU indicator
+ fInputBuffer[fCurDataOffset-1] = fInputBuffer[1]&~0x80; // FU header (no S bit)
+ numExtraHeaderBytes = 2;
+ } else { // 265
+ fInputBuffer[fCurDataOffset-3] = fInputBuffer[0]; // Payload header (1st byte)
+ fInputBuffer[fCurDataOffset-2] = fInputBuffer[1]; // Payload header (2nd byte)
+ fInputBuffer[fCurDataOffset-1] = fInputBuffer[2]&~0x80; // FU header (no S bit)
+ numExtraHeaderBytes = 3;
+ }
+ unsigned numBytesToSend = numExtraHeaderBytes + (fNumValidDataBytes - fCurDataOffset);
+ if (numBytesToSend > fMaxSize) {
+ // We can't send all of the remaining data this time:
+ numBytesToSend = fMaxSize;
+ fLastFragmentCompletedNALUnit = False;
+ } else {
+ // This is the last fragment:
+ fInputBuffer[fCurDataOffset-1] |= 0x40; // set the E bit in the FU header
+ fNumTruncatedBytes = fSaveNumTruncatedBytes;
+ }
+ memmove(fTo, &fInputBuffer[fCurDataOffset-numExtraHeaderBytes], numBytesToSend);
+ fFrameSize = numBytesToSend;
+ fCurDataOffset += numBytesToSend - numExtraHeaderBytes;
+ }
+
+ if (fCurDataOffset >= fNumValidDataBytes) {
+ // We're done with this data. Reset the pointers for receiving new data:
+ fNumValidDataBytes = fCurDataOffset = 1;
+ }
+
+ // Complete delivery to the client:
+ FramedSource::afterGetting(this);
+ }
+}
+
+void H264or5Fragmenter::doStopGettingFrames() {
+ // Make sure that we don't have any stale data fragments lying around, should we later resume:
+ reset();
+ FramedFilter::doStopGettingFrames();
+}
+
+void H264or5Fragmenter::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ H264or5Fragmenter* fragmenter = (H264or5Fragmenter*)clientData;
+ fragmenter->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime,
+ durationInMicroseconds);
+}
+
+void H264or5Fragmenter::afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ fNumValidDataBytes += frameSize;
+ fSaveNumTruncatedBytes = numTruncatedBytes;
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+
+ // Deliver data to the client:
+ doGetNextFrame();
+}
+
+void H264or5Fragmenter::reset() {
+ fNumValidDataBytes = fCurDataOffset = 1;
+ fSaveNumTruncatedBytes = 0;
+ fLastFragmentCompletedNALUnit = True;
+}
diff --git a/liveMedia/H264or5VideoStreamDiscreteFramer.cpp b/liveMedia/H264or5VideoStreamDiscreteFramer.cpp
new file mode 100644
index 0000000..d4d3bbf
--- /dev/null
+++ b/liveMedia/H264or5VideoStreamDiscreteFramer.cpp
@@ -0,0 +1,141 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simplified version of "H264or5VideoStreamFramer" that takes only complete,
+// discrete frames (rather than an arbitrary byte stream) as input.
+// This avoids the parsing and data copying overhead of the full
+// "H264or5VideoStreamFramer".
+// Implementation
+
+#include "H264or5VideoStreamDiscreteFramer.hh"
+
+H264or5VideoStreamDiscreteFramer
+::H264or5VideoStreamDiscreteFramer(int hNumber, UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput,
+ Boolean insertAccessUnitDelimiters)
+ : H264or5VideoStreamFramer(hNumber, env, inputSource, False/*don't create a parser*/,
+ includeStartCodeInOutput, insertAccessUnitDelimiters) {
+}
+
+H264or5VideoStreamDiscreteFramer::~H264or5VideoStreamDiscreteFramer() {
+}
+
+void H264or5VideoStreamDiscreteFramer::doGetNextFrame() {
+ if (fIncludeStartCodeInOutput) {
+ // Prepend a 4-byte 'start code' (0x00000001) to the output:
+ if (fMaxSize < 4) { // there's no space
+ fNumTruncatedBytes = 4 - fMaxSize;
+ handleClosure();
+ return;
+ }
+ *fTo++ = 0x00; *fTo++ = 0x00; *fTo++ = 0x00; *fTo++ = 0x01;
+ fMaxSize -= 4;
+ }
+
+ if (fInsertAccessUnitDelimiters && pictureEndMarker()) {
+ // Deliver an "access_unit_delimiter" NAL unit instead:
+ unsigned const audNALSize = fHNumber == 264 ? 2 : 3;
+
+ if (audNALSize > fMaxSize) { // there's no space
+ fNumTruncatedBytes = audNALSize - fMaxSize;
+ handleClosure();
+ return;
+ }
+
+ if (fHNumber == 264) {
+ *fTo++ = 9; // "Access unit delimiter" nal_unit_type
+ *fTo++ = 0xF0; // "primary_pic_type" (7); "rbsp_trailing_bits()"
+ } else { // H.265
+ *fTo++ = 35<<1; // "Access unit delimiter" nal_unit_type
+ *fTo++ = 0; // "nuh_layer_id" (0); "nuh_temporal_id_plus1" (0) (Is this correct??)
+ *fTo++ = 0x50; // "pic_type" (2); "rbsp_trailing_bits()" (Is this correct??)
+ }
+
+ fFrameSize = (fIncludeStartCodeInOutput ? 4: 0) + audNALSize;
+ pictureEndMarker() = False; // for next time
+ afterGetting(this); // complete delivery to the downstream object
+ } else {
+ // Normal case:
+ // Arrange to read data (which should be a complete H.264 or H.265 NAL unit)
+ // from our data source, directly into the client's input buffer.
+ // After reading this, we'll do some parsing on the frame.
+ fInputSource->getNextFrame(fTo, fMaxSize,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+ }
+}
+
+void H264or5VideoStreamDiscreteFramer
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ H264or5VideoStreamDiscreteFramer* source = (H264or5VideoStreamDiscreteFramer*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
+}
+
+void H264or5VideoStreamDiscreteFramer
+::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // Get the "nal_unit_type", to see if this NAL unit is one that we want to save a copy of:
+ u_int8_t nal_unit_type;
+ if (fHNumber == 264 && frameSize >= 1) {
+ nal_unit_type = fTo[0]&0x1F;
+ } else if (fHNumber == 265 && frameSize >= 2) {
+ nal_unit_type = (fTo[0]&0x7E)>>1;
+ } else {
+ // This is too short to be a valid NAL unit, so just assume a bogus nal_unit_type
+ nal_unit_type = 0xFF;
+ }
+
+ // Begin by checking for a (likely) common error: NAL units that (erroneously) begin with a
+ // 0x00000001 or 0x000001 'start code'. (Those start codes should only be in byte-stream data;
+ // *not* data that consists of discrete NAL units.)
+ // Once again, to be clear: The NAL units that you feed to a "H264or5VideoStreamDiscreteFramer"
+ // MUST NOT include start codes.
+ if (frameSize >= 4 && fTo[0] == 0 && fTo[1] == 0 && ((fTo[2] == 0 && fTo[3] == 1) || fTo[2] == 1)) {
+ envir() << "H264or5VideoStreamDiscreteFramer error: MPEG 'start code' seen in the input\n";
+ } else if (isVPS(nal_unit_type)) { // Video parameter set (VPS)
+ saveCopyOfVPS(fTo, frameSize);
+ } else if (isSPS(nal_unit_type)) { // Sequence parameter set (SPS)
+ saveCopyOfSPS(fTo, frameSize);
+ } else if (isPPS(nal_unit_type)) { // Picture parameter set (PPS)
+ saveCopyOfPPS(fTo, frameSize);
+ }
+
+ fPictureEndMarker = nalUnitEndsAccessUnit(nal_unit_type);
+
+ // Finally, complete delivery to the client:
+ fFrameSize = fIncludeStartCodeInOutput ? (4+frameSize) : frameSize;
+ fNumTruncatedBytes = numTruncatedBytes;
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+ afterGetting(this);
+}
+
+Boolean H264or5VideoStreamDiscreteFramer::nalUnitEndsAccessUnit(u_int8_t nal_unit_type) {
+ // Check whether this NAL unit ends the current 'access unit' (basically, a video frame).
+ // Unfortunately, we can't do this reliably, because we don't yet know anything about the
+ // *next* NAL unit that we'll see. So, we guess this as best as we can, by assuming that
+ // if this NAL unit is a VCL NAL unit, then it ends the current 'access unit'.
+ //
+ // This will be wrong if you are streaming multiple 'slices' per picture. In that case,
+ // you can define a subclass that reimplements this virtual function to do the right thing.
+
+ return isVCL(nal_unit_type);
+}
diff --git a/liveMedia/H264or5VideoStreamFramer.cpp b/liveMedia/H264or5VideoStreamFramer.cpp
new file mode 100644
index 0000000..85ab433
--- /dev/null
+++ b/liveMedia/H264or5VideoStreamFramer.cpp
@@ -0,0 +1,1229 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up a H.264 or H.265 Video Elementary Stream into NAL units.
+// Implementation
+
+#include "H264or5VideoStreamFramer.hh"
+#include "MPEGVideoStreamParser.hh"
+#include "BitVector.hh"
+#include <GroupsockHelper.hh> // for "gettimeofday()"
+
+////////// H264or5VideoStreamParser definition //////////
+
+class H264or5VideoStreamParser: public MPEGVideoStreamParser {
+public:
+ H264or5VideoStreamParser(int hNumber, H264or5VideoStreamFramer* usingSource,
+ FramedSource* inputSource, Boolean includeStartCodeInOutput);
+ virtual ~H264or5VideoStreamParser();
+
+private: // redefined virtual functions:
+ virtual void flushInput();
+ virtual unsigned parse();
+
+private:
+ H264or5VideoStreamFramer* usingSource() {
+ return (H264or5VideoStreamFramer*)fUsingSource;
+ }
+
+ Boolean isVPS(u_int8_t nal_unit_type) { return usingSource()->isVPS(nal_unit_type); }
+ Boolean isSPS(u_int8_t nal_unit_type) { return usingSource()->isSPS(nal_unit_type); }
+ Boolean isPPS(u_int8_t nal_unit_type) { return usingSource()->isPPS(nal_unit_type); }
+ Boolean isVCL(u_int8_t nal_unit_type) { return usingSource()->isVCL(nal_unit_type); }
+ Boolean isSEI(u_int8_t nal_unit_type);
+ Boolean isEOF(u_int8_t nal_unit_type);
+ Boolean usuallyBeginsAccessUnit(u_int8_t nal_unit_type);
+
+ void removeEmulationBytes(u_int8_t* nalUnitCopy, unsigned maxSize, unsigned& nalUnitCopySize);
+
+ void analyze_video_parameter_set_data(unsigned& num_units_in_tick, unsigned& time_scale);
+ void analyze_seq_parameter_set_data(unsigned& num_units_in_tick, unsigned& time_scale);
+ void profile_tier_level(BitVector& bv, unsigned max_sub_layers_minus1);
+ void analyze_vui_parameters(BitVector& bv, unsigned& num_units_in_tick, unsigned& time_scale);
+ void analyze_hrd_parameters(BitVector& bv);
+ void analyze_sei_data(u_int8_t nal_unit_type);
+ void analyze_sei_payload(unsigned payloadType, unsigned payloadSize, u_int8_t* payload);
+
+private:
+ int fHNumber; // 264 or 265
+ unsigned fOutputStartCodeSize;
+ Boolean fHaveSeenFirstStartCode, fHaveSeenFirstByteOfNALUnit;
+ u_int8_t fFirstByteOfNALUnit;
+ double fParsedFrameRate;
+ // variables set & used in the specification:
+ unsigned cpb_removal_delay_length_minus1, dpb_output_delay_length_minus1;
+ Boolean CpbDpbDelaysPresentFlag, pic_struct_present_flag;
+ double DeltaTfiDivisor;
+};
+
+
+////////// H264or5VideoStreamFramer implementation //////////
+
+H264or5VideoStreamFramer
+::H264or5VideoStreamFramer(int hNumber, UsageEnvironment& env, FramedSource* inputSource,
+ Boolean createParser,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters)
+ : MPEGVideoStreamFramer(env, inputSource),
+ fHNumber(hNumber), fIncludeStartCodeInOutput(includeStartCodeInOutput),
+ fInsertAccessUnitDelimiters(insertAccessUnitDelimiters),
+ fLastSeenVPS(NULL), fLastSeenVPSSize(0),
+ fLastSeenSPS(NULL), fLastSeenSPSSize(0),
+ fLastSeenPPS(NULL), fLastSeenPPSSize(0) {
+ fParser = createParser
+ ? new H264or5VideoStreamParser(hNumber, this, inputSource, includeStartCodeInOutput)
+ : NULL;
+ fFrameRate = 25.0; // We assume a frame rate of 25 fps, unless we learn otherwise (from parsing a VPS or SPS NAL unit)
+}
+
+H264or5VideoStreamFramer::~H264or5VideoStreamFramer() {
+ delete[] fLastSeenPPS;
+ delete[] fLastSeenSPS;
+ delete[] fLastSeenVPS;
+}
+
+#define VPS_MAX_SIZE 1000 // larger than the largest possible VPS (Video Parameter Set) NAL unit
+
+void H264or5VideoStreamFramer::saveCopyOfVPS(u_int8_t* from, unsigned size) {
+ if (from == NULL) return;
+ delete[] fLastSeenVPS;
+ fLastSeenVPS = new u_int8_t[size];
+ memmove(fLastSeenVPS, from, size);
+
+ fLastSeenVPSSize = size;
+}
+
+#define SPS_MAX_SIZE 1000 // larger than the largest possible SPS (Sequence Parameter Set) NAL unit
+
+void H264or5VideoStreamFramer::saveCopyOfSPS(u_int8_t* from, unsigned size) {
+ if (from == NULL) return;
+ delete[] fLastSeenSPS;
+ fLastSeenSPS = new u_int8_t[size];
+ memmove(fLastSeenSPS, from, size);
+
+ fLastSeenSPSSize = size;
+}
+
+void H264or5VideoStreamFramer::saveCopyOfPPS(u_int8_t* from, unsigned size) {
+ if (from == NULL) return;
+ delete[] fLastSeenPPS;
+ fLastSeenPPS = new u_int8_t[size];
+ memmove(fLastSeenPPS, from, size);
+
+ fLastSeenPPSSize = size;
+}
+
+void H264or5VideoStreamFramer::setPresentationTime() {
+ if (fPresentationTimeBase.tv_sec == 0 && fPresentationTimeBase.tv_usec == 0) {
+ // Set to the current time:
+ gettimeofday(&fPresentationTimeBase, NULL);
+ fNextPresentationTime = fPresentationTimeBase;
+ }
+ fPresentationTime = fNextPresentationTime;
+}
+
+Boolean H264or5VideoStreamFramer::isVPS(u_int8_t nal_unit_type) {
+ // VPS NAL units occur in H.265 only:
+ return fHNumber == 265 && nal_unit_type == 32;
+}
+
+Boolean H264or5VideoStreamFramer::isSPS(u_int8_t nal_unit_type) {
+ return fHNumber == 264 ? nal_unit_type == 7 : nal_unit_type == 33;
+}
+
+Boolean H264or5VideoStreamFramer::isPPS(u_int8_t nal_unit_type) {
+ return fHNumber == 264 ? nal_unit_type == 8 : nal_unit_type == 34;
+}
+
+Boolean H264or5VideoStreamFramer::isVCL(u_int8_t nal_unit_type) {
+ return fHNumber == 264
+ ? (nal_unit_type <= 5 && nal_unit_type > 0)
+ : (nal_unit_type <= 31);
+}
+
+void H264or5VideoStreamFramer::doGetNextFrame() {
+ if (fInsertAccessUnitDelimiters && pictureEndMarker()) {
+ // Deliver an "access_unit_delimiter" NAL unit instead:
+ unsigned const startCodeSize = fIncludeStartCodeInOutput ? 4: 0;
+ unsigned const audNALSize = fHNumber == 264 ? 2 : 3;
+
+ fFrameSize = startCodeSize + audNALSize;
+ if (fFrameSize > fMaxSize) { // there's no space
+ fNumTruncatedBytes = fFrameSize - fMaxSize;
+ fFrameSize = fMaxSize;
+ handleClosure();
+ return;
+ }
+
+ if (fIncludeStartCodeInOutput) {
+ *fTo++ = 0x00; *fTo++ = 0x00; *fTo++ = 0x00; *fTo++ = 0x01;
+ }
+ if (fHNumber == 264) {
+ *fTo++ = 9; // "Access unit delimiter" nal_unit_type
+ *fTo++ = 0xF0; // "primary_pic_type" (7); "rbsp_trailing_bits()"
+ } else { // H.265
+ *fTo++ = 35<<1; // "Access unit delimiter" nal_unit_type
+ *fTo++ = 0; // "nuh_layer_id" (0); "nuh_temporal_id_plus1" (0) (Is this correct??)
+ *fTo++ = 0x50; // "pic_type" (2); "rbsp_trailing_bits()" (Is this correct??)
+ }
+
+ pictureEndMarker() = False; // for next time
+ afterGetting(this);
+ } else {
+ // Do the normal delivery of a NAL unit from the parser:
+ MPEGVideoStreamFramer::doGetNextFrame();
+ }
+}
+
+
+////////// H264or5VideoStreamParser implementation //////////
+
+H264or5VideoStreamParser
+::H264or5VideoStreamParser(int hNumber, H264or5VideoStreamFramer* usingSource,
+ FramedSource* inputSource, Boolean includeStartCodeInOutput)
+ : MPEGVideoStreamParser(usingSource, inputSource),
+ fHNumber(hNumber), fOutputStartCodeSize(includeStartCodeInOutput ? 4 : 0), fHaveSeenFirstStartCode(False), fHaveSeenFirstByteOfNALUnit(False), fParsedFrameRate(0.0),
+ cpb_removal_delay_length_minus1(23), dpb_output_delay_length_minus1(23),
+ CpbDpbDelaysPresentFlag(0), pic_struct_present_flag(0),
+ DeltaTfiDivisor(2.0) {
+}
+
+H264or5VideoStreamParser::~H264or5VideoStreamParser() {
+}
+
+#define PREFIX_SEI_NUT 39 // for H.265
+#define SUFFIX_SEI_NUT 40 // for H.265
+Boolean H264or5VideoStreamParser::isSEI(u_int8_t nal_unit_type) {
+ return fHNumber == 264
+ ? nal_unit_type == 6
+ : (nal_unit_type == PREFIX_SEI_NUT || nal_unit_type == SUFFIX_SEI_NUT);
+}
+
+Boolean H264or5VideoStreamParser::isEOF(u_int8_t nal_unit_type) {
+ // "end of sequence" or "end of (bit)stream"
+ return fHNumber == 264
+ ? (nal_unit_type == 10 || nal_unit_type == 11)
+ : (nal_unit_type == 36 || nal_unit_type == 37);
+}
+
+Boolean H264or5VideoStreamParser::usuallyBeginsAccessUnit(u_int8_t nal_unit_type) {
+ return fHNumber == 264
+ ? (nal_unit_type >= 6 && nal_unit_type <= 9) || (nal_unit_type >= 14 && nal_unit_type <= 18)
+ : (nal_unit_type >= 32 && nal_unit_type <= 35) || (nal_unit_type == 39)
+ || (nal_unit_type >= 41 && nal_unit_type <= 44)
+ || (nal_unit_type >= 48 && nal_unit_type <= 55);
+}
+
+void H264or5VideoStreamParser
+::removeEmulationBytes(u_int8_t* nalUnitCopy, unsigned maxSize, unsigned& nalUnitCopySize) {
+ u_int8_t const* nalUnitOrig = fStartOfFrame + fOutputStartCodeSize;
+ unsigned const numBytesInNALunit = fTo - nalUnitOrig;
+ nalUnitCopySize
+ = removeH264or5EmulationBytes(nalUnitCopy, maxSize, nalUnitOrig, numBytesInNALunit);
+}
+
+#ifdef DEBUG
+char const* nal_unit_type_description_h264[32] = {
+ "Unspecified", //0
+ "Coded slice of a non-IDR picture", //1
+ "Coded slice data partition A", //2
+ "Coded slice data partition B", //3
+ "Coded slice data partition C", //4
+ "Coded slice of an IDR picture", //5
+ "Supplemental enhancement information (SEI)", //6
+ "Sequence parameter set", //7
+ "Picture parameter set", //8
+ "Access unit delimiter", //9
+ "End of sequence", //10
+ "End of stream", //11
+ "Filler data", //12
+ "Sequence parameter set extension", //13
+ "Prefix NAL unit", //14
+ "Subset sequence parameter set", //15
+ "Reserved", //16
+ "Reserved", //17
+ "Reserved", //18
+ "Coded slice of an auxiliary coded picture without partitioning", //19
+ "Coded slice extension", //20
+ "Reserved", //21
+ "Reserved", //22
+ "Reserved", //23
+ "Unspecified", //24
+ "Unspecified", //25
+ "Unspecified", //26
+ "Unspecified", //27
+ "Unspecified", //28
+ "Unspecified", //29
+ "Unspecified", //30
+ "Unspecified" //31
+};
+char const* nal_unit_type_description_h265[64] = {
+ "Coded slice segment of a non-TSA, non-STSA trailing picture", //0
+ "Coded slice segment of a non-TSA, non-STSA trailing picture", //1
+ "Coded slice segment of a TSA picture", //2
+ "Coded slice segment of a TSA picture", //3
+ "Coded slice segment of a STSA picture", //4
+ "Coded slice segment of a STSA picture", //5
+ "Coded slice segment of a RADL picture", //6
+ "Coded slice segment of a RADL picture", //7
+ "Coded slice segment of a RASL picture", //8
+ "Coded slice segment of a RASL picture", //9
+ "Reserved", //10
+ "Reserved", //11
+ "Reserved", //12
+ "Reserved", //13
+ "Reserved", //14
+ "Reserved", //15
+ "Coded slice segment of a BLA picture", //16
+ "Coded slice segment of a BLA picture", //17
+ "Coded slice segment of a BLA picture", //18
+ "Coded slice segment of an IDR picture", //19
+ "Coded slice segment of an IDR picture", //20
+ "Coded slice segment of a CRA picture", //21
+ "Reserved", //22
+ "Reserved", //23
+ "Reserved", //24
+ "Reserved", //25
+ "Reserved", //26
+ "Reserved", //27
+ "Reserved", //28
+ "Reserved", //29
+ "Reserved", //30
+ "Reserved", //31
+ "Video parameter set", //32
+ "Sequence parameter set", //33
+ "Picture parameter set", //34
+ "Access unit delimiter", //35
+ "End of sequence", //36
+ "End of bitstream", //37
+ "Filler data", //38
+ "Supplemental enhancement information (SEI)", //39
+ "Supplemental enhancement information (SEI)", //40
+ "Reserved", //41
+ "Reserved", //42
+ "Reserved", //43
+ "Reserved", //44
+ "Reserved", //45
+ "Reserved", //46
+ "Reserved", //47
+ "Unspecified", //48
+ "Unspecified", //49
+ "Unspecified", //50
+ "Unspecified", //51
+ "Unspecified", //52
+ "Unspecified", //53
+ "Unspecified", //54
+ "Unspecified", //55
+ "Unspecified", //56
+ "Unspecified", //57
+ "Unspecified", //58
+ "Unspecified", //59
+ "Unspecified", //60
+ "Unspecified", //61
+ "Unspecified", //62
+ "Unspecified", //63
+};
+#endif
+
+#ifdef DEBUG
+static unsigned numDebugTabs = 1;
+#define DEBUG_PRINT_TABS for (unsigned _i = 0; _i < numDebugTabs; ++_i) fprintf(stderr, "\t")
+#define DEBUG_PRINT(x) do { DEBUG_PRINT_TABS; fprintf(stderr, "%s: %d\n", #x, x); } while (0)
+#define DEBUG_STR(x) do { DEBUG_PRINT_TABS; fprintf(stderr, "%s\n", x); } while (0)
+class DebugTab {
+public:
+ DebugTab() {++numDebugTabs;}
+ ~DebugTab() {--numDebugTabs;}
+};
+#define DEBUG_TAB DebugTab dummy
+#else
+#define DEBUG_PRINT(x) do {x = x;} while (0)
+ // Note: the "x=x;" statement is intended to eliminate "unused variable" compiler warning messages
+#define DEBUG_STR(x) do {} while (0)
+#define DEBUG_TAB do {} while (0)
+#endif
+
+void H264or5VideoStreamParser::profile_tier_level(BitVector& bv, unsigned max_sub_layers_minus1) {
+ bv.skipBits(96);
+
+ unsigned i;
+ Boolean sub_layer_profile_present_flag[7], sub_layer_level_present_flag[7];
+ for (i = 0; i < max_sub_layers_minus1; ++i) {
+ sub_layer_profile_present_flag[i] = bv.get1BitBoolean();
+ sub_layer_level_present_flag[i] = bv.get1BitBoolean();
+ }
+ if (max_sub_layers_minus1 > 0) {
+ bv.skipBits(2*(8-max_sub_layers_minus1)); // reserved_zero_2bits
+ }
+ for (i = 0; i < max_sub_layers_minus1; ++i) {
+ if (sub_layer_profile_present_flag[i]) {
+ bv.skipBits(88);
+ }
+ if (sub_layer_level_present_flag[i]) {
+ bv.skipBits(8); // sub_layer_level_idc[i]
+ }
+ }
+}
+
+void H264or5VideoStreamParser
+::analyze_vui_parameters(BitVector& bv,
+ unsigned& num_units_in_tick, unsigned& time_scale) {
+ Boolean aspect_ratio_info_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(aspect_ratio_info_present_flag);
+ if (aspect_ratio_info_present_flag) {
+ DEBUG_TAB;
+ unsigned aspect_ratio_idc = bv.getBits(8);
+ DEBUG_PRINT(aspect_ratio_idc);
+ if (aspect_ratio_idc == 255/*Extended_SAR*/) {
+ bv.skipBits(32); // sar_width; sar_height
+ }
+ }
+ Boolean overscan_info_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(overscan_info_present_flag);
+ if (overscan_info_present_flag) {
+ bv.skipBits(1); // overscan_appropriate_flag
+ }
+ Boolean video_signal_type_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(video_signal_type_present_flag);
+ if (video_signal_type_present_flag) {
+ DEBUG_TAB;
+ bv.skipBits(4); // video_format; video_full_range_flag
+ Boolean colour_description_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(colour_description_present_flag);
+ if (colour_description_present_flag) {
+ bv.skipBits(24); // colour_primaries; transfer_characteristics; matrix_coefficients
+ }
+ }
+ Boolean chroma_loc_info_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(chroma_loc_info_present_flag);
+ if (chroma_loc_info_present_flag) {
+ (void)bv.get_expGolomb(); // chroma_sample_loc_type_top_field
+ (void)bv.get_expGolomb(); // chroma_sample_loc_type_bottom_field
+ }
+ if (fHNumber == 265) {
+ bv.skipBits(2); // neutral_chroma_indication_flag, field_seq_flag
+ Boolean frame_field_info_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(frame_field_info_present_flag);
+ pic_struct_present_flag = frame_field_info_present_flag; // hack to make H.265 like H.264
+ Boolean default_display_window_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(default_display_window_flag);
+ if (default_display_window_flag) {
+ (void)bv.get_expGolomb(); // def_disp_win_left_offset
+ (void)bv.get_expGolomb(); // def_disp_win_right_offset
+ (void)bv.get_expGolomb(); // def_disp_win_top_offset
+ (void)bv.get_expGolomb(); // def_disp_win_bottom_offset
+ }
+ }
+ Boolean timing_info_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(timing_info_present_flag);
+ if (timing_info_present_flag) {
+ DEBUG_TAB;
+ num_units_in_tick = bv.getBits(32);
+ DEBUG_PRINT(num_units_in_tick);
+ time_scale = bv.getBits(32);
+ DEBUG_PRINT(time_scale);
+ if (fHNumber == 264) {
+ Boolean fixed_frame_rate_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(fixed_frame_rate_flag);
+ } else { // 265
+ Boolean vui_poc_proportional_to_timing_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(vui_poc_proportional_to_timing_flag);
+ if (vui_poc_proportional_to_timing_flag) {
+ unsigned vui_num_ticks_poc_diff_one_minus1 = bv.get_expGolomb();
+ DEBUG_PRINT(vui_num_ticks_poc_diff_one_minus1);
+ }
+ return; // For H.265, don't bother parsing any more of this #####
+ }
+ }
+ // The following is H.264 only: #####
+ Boolean nal_hrd_parameters_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(nal_hrd_parameters_present_flag);
+ if (nal_hrd_parameters_present_flag) analyze_hrd_parameters(bv);
+ Boolean vcl_hrd_parameters_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(vcl_hrd_parameters_present_flag);
+ if (vcl_hrd_parameters_present_flag) analyze_hrd_parameters(bv);
+ CpbDpbDelaysPresentFlag = nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag;
+ if (CpbDpbDelaysPresentFlag) {
+ bv.skipBits(1); // low_delay_hrd_flag
+ }
+ pic_struct_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(pic_struct_present_flag);
+}
+
+void H264or5VideoStreamParser::analyze_hrd_parameters(BitVector& bv) {
+ DEBUG_TAB;
+ unsigned cpb_cnt_minus1 = bv.get_expGolomb();
+ DEBUG_PRINT(cpb_cnt_minus1);
+ unsigned bit_rate_scale = bv.getBits(4);
+ DEBUG_PRINT(bit_rate_scale);
+ unsigned cpb_size_scale = bv.getBits(4);
+ DEBUG_PRINT(cpb_size_scale);
+ for (unsigned SchedSelIdx = 0; SchedSelIdx <= cpb_cnt_minus1; ++SchedSelIdx) {
+ DEBUG_TAB;
+ DEBUG_PRINT(SchedSelIdx);
+ unsigned bit_rate_value_minus1 = bv.get_expGolomb();
+ DEBUG_PRINT(bit_rate_value_minus1);
+ unsigned cpb_size_value_minus1 = bv.get_expGolomb();
+ DEBUG_PRINT(cpb_size_value_minus1);
+ Boolean cbr_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(cbr_flag);
+ }
+ unsigned initial_cpb_removal_delay_length_minus1 = bv.getBits(5);
+ DEBUG_PRINT(initial_cpb_removal_delay_length_minus1);
+ cpb_removal_delay_length_minus1 = bv.getBits(5);
+ DEBUG_PRINT(cpb_removal_delay_length_minus1);
+ dpb_output_delay_length_minus1 = bv.getBits(5);
+ DEBUG_PRINT(dpb_output_delay_length_minus1);
+ unsigned time_offset_length = bv.getBits(5);
+ DEBUG_PRINT(time_offset_length);
+}
+
+void H264or5VideoStreamParser
+::analyze_video_parameter_set_data(unsigned& num_units_in_tick, unsigned& time_scale) {
+ num_units_in_tick = time_scale = 0; // default values
+
+ // Begin by making a copy of the NAL unit data, removing any 'emulation prevention' bytes:
+ u_int8_t vps[VPS_MAX_SIZE];
+ unsigned vpsSize;
+ removeEmulationBytes(vps, sizeof vps, vpsSize);
+
+ BitVector bv(vps, 0, 8*vpsSize);
+
+ // Assert: fHNumber == 265 (because this function is called only when parsing H.265)
+ unsigned i;
+
+ bv.skipBits(28); // nal_unit_header, vps_video_parameter_set_id, vps_reserved_three_2bits, vps_max_layers_minus1
+ unsigned vps_max_sub_layers_minus1 = bv.getBits(3);
+ DEBUG_PRINT(vps_max_sub_layers_minus1);
+ bv.skipBits(17); // vps_temporal_id_nesting_flag, vps_reserved_0xffff_16bits
+ profile_tier_level(bv, vps_max_sub_layers_minus1);
+ Boolean vps_sub_layer_ordering_info_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(vps_sub_layer_ordering_info_present_flag);
+ for (i = vps_sub_layer_ordering_info_present_flag ? 0 : vps_max_sub_layers_minus1;
+ i <= vps_max_sub_layers_minus1; ++i) {
+ (void)bv.get_expGolomb(); // vps_max_dec_pic_buffering_minus1[i]
+ (void)bv.get_expGolomb(); // vps_max_num_reorder_pics[i]
+ (void)bv.get_expGolomb(); // vps_max_latency_increase_plus1[i]
+ }
+ unsigned vps_max_layer_id = bv.getBits(6);
+ DEBUG_PRINT(vps_max_layer_id);
+ unsigned vps_num_layer_sets_minus1 = bv.get_expGolomb();
+ DEBUG_PRINT(vps_num_layer_sets_minus1);
+ for (i = 1; i <= vps_num_layer_sets_minus1; ++i) {
+ bv.skipBits(vps_max_layer_id+1); // layer_id_included_flag[i][0..vps_max_layer_id]
+ }
+ Boolean vps_timing_info_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(vps_timing_info_present_flag);
+ if (vps_timing_info_present_flag) {
+ DEBUG_TAB;
+ num_units_in_tick = bv.getBits(32);
+ DEBUG_PRINT(num_units_in_tick);
+ time_scale = bv.getBits(32);
+ DEBUG_PRINT(time_scale);
+ Boolean vps_poc_proportional_to_timing_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(vps_poc_proportional_to_timing_flag);
+ if (vps_poc_proportional_to_timing_flag) {
+ unsigned vps_num_ticks_poc_diff_one_minus1 = bv.get_expGolomb();
+ DEBUG_PRINT(vps_num_ticks_poc_diff_one_minus1);
+ }
+ }
+ Boolean vps_extension_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(vps_extension_flag);
+}
+
+void H264or5VideoStreamParser
+::analyze_seq_parameter_set_data(unsigned& num_units_in_tick, unsigned& time_scale) {
+ num_units_in_tick = time_scale = 0; // default values
+
+ // Begin by making a copy of the NAL unit data, removing any 'emulation prevention' bytes:
+ u_int8_t sps[SPS_MAX_SIZE];
+ unsigned spsSize;
+ removeEmulationBytes(sps, sizeof sps, spsSize);
+
+ BitVector bv(sps, 0, 8*spsSize);
+
+ if (fHNumber == 264) {
+ bv.skipBits(8); // forbidden_zero_bit; nal_ref_idc; nal_unit_type
+ unsigned profile_idc = bv.getBits(8);
+ DEBUG_PRINT(profile_idc);
+ unsigned constraint_setN_flag = bv.getBits(8); // also "reserved_zero_2bits" at end
+ DEBUG_PRINT(constraint_setN_flag);
+ unsigned level_idc = bv.getBits(8);
+ DEBUG_PRINT(level_idc);
+ unsigned seq_parameter_set_id = bv.get_expGolomb();
+ DEBUG_PRINT(seq_parameter_set_id);
+ if (profile_idc == 100 || profile_idc == 110 || profile_idc == 122 || profile_idc == 244 || profile_idc == 44 || profile_idc == 83 || profile_idc == 86 || profile_idc == 118 || profile_idc == 128 ) {
+ DEBUG_TAB;
+ unsigned chroma_format_idc = bv.get_expGolomb();
+ DEBUG_PRINT(chroma_format_idc);
+ if (chroma_format_idc == 3) {
+ DEBUG_TAB;
+ Boolean separate_colour_plane_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(separate_colour_plane_flag);
+ }
+ (void)bv.get_expGolomb(); // bit_depth_luma_minus8
+ (void)bv.get_expGolomb(); // bit_depth_chroma_minus8
+ bv.skipBits(1); // qpprime_y_zero_transform_bypass_flag
+ Boolean seq_scaling_matrix_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(seq_scaling_matrix_present_flag);
+ if (seq_scaling_matrix_present_flag) {
+ for (int i = 0; i < ((chroma_format_idc != 3) ? 8 : 12); ++i) {
+ DEBUG_TAB;
+ DEBUG_PRINT(i);
+ Boolean seq_scaling_list_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(seq_scaling_list_present_flag);
+ if (seq_scaling_list_present_flag) {
+ DEBUG_TAB;
+ unsigned sizeOfScalingList = i < 6 ? 16 : 64;
+ unsigned lastScale = 8;
+ unsigned nextScale = 8;
+ for (unsigned j = 0; j < sizeOfScalingList; ++j) {
+ DEBUG_TAB;
+ DEBUG_PRINT(j);
+ DEBUG_PRINT(nextScale);
+ if (nextScale != 0) {
+ DEBUG_TAB;
+ int delta_scale = bv.get_expGolombSigned();
+ DEBUG_PRINT(delta_scale);
+ nextScale = (lastScale + delta_scale + 256) % 256;
+ }
+ lastScale = (nextScale == 0) ? lastScale : nextScale;
+ DEBUG_PRINT(lastScale);
+ }
+ }
+ }
+ }
+ }
+ unsigned log2_max_frame_num_minus4 = bv.get_expGolomb();
+ DEBUG_PRINT(log2_max_frame_num_minus4);
+ unsigned pic_order_cnt_type = bv.get_expGolomb();
+ DEBUG_PRINT(pic_order_cnt_type);
+ if (pic_order_cnt_type == 0) {
+ DEBUG_TAB;
+ unsigned log2_max_pic_order_cnt_lsb_minus4 = bv.get_expGolomb();
+ DEBUG_PRINT(log2_max_pic_order_cnt_lsb_minus4);
+ } else if (pic_order_cnt_type == 1) {
+ DEBUG_TAB;
+ bv.skipBits(1); // delta_pic_order_always_zero_flag
+ (void)bv.get_expGolombSigned(); // offset_for_non_ref_pic
+ (void)bv.get_expGolombSigned(); // offset_for_top_to_bottom_field
+ unsigned num_ref_frames_in_pic_order_cnt_cycle = bv.get_expGolomb();
+ DEBUG_PRINT(num_ref_frames_in_pic_order_cnt_cycle);
+ for (unsigned i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; ++i) {
+ (void)bv.get_expGolombSigned(); // offset_for_ref_frame[i]
+ }
+ }
+ unsigned max_num_ref_frames = bv.get_expGolomb();
+ DEBUG_PRINT(max_num_ref_frames);
+ Boolean gaps_in_frame_num_value_allowed_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(gaps_in_frame_num_value_allowed_flag);
+ unsigned pic_width_in_mbs_minus1 = bv.get_expGolomb();
+ DEBUG_PRINT(pic_width_in_mbs_minus1);
+ unsigned pic_height_in_map_units_minus1 = bv.get_expGolomb();
+ DEBUG_PRINT(pic_height_in_map_units_minus1);
+ Boolean frame_mbs_only_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(frame_mbs_only_flag);
+ if (!frame_mbs_only_flag) {
+ bv.skipBits(1); // mb_adaptive_frame_field_flag
+ }
+ bv.skipBits(1); // direct_8x8_inference_flag
+ Boolean frame_cropping_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(frame_cropping_flag);
+ if (frame_cropping_flag) {
+ (void)bv.get_expGolomb(); // frame_crop_left_offset
+ (void)bv.get_expGolomb(); // frame_crop_right_offset
+ (void)bv.get_expGolomb(); // frame_crop_top_offset
+ (void)bv.get_expGolomb(); // frame_crop_bottom_offset
+ }
+ Boolean vui_parameters_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(vui_parameters_present_flag);
+ if (vui_parameters_present_flag) {
+ DEBUG_TAB;
+ analyze_vui_parameters(bv, num_units_in_tick, time_scale);
+ }
+ } else { // 265
+ unsigned i;
+
+ bv.skipBits(16); // nal_unit_header
+ bv.skipBits(4); // sps_video_parameter_set_id
+ unsigned sps_max_sub_layers_minus1 = bv.getBits(3);
+ DEBUG_PRINT(sps_max_sub_layers_minus1);
+ bv.skipBits(1); // sps_temporal_id_nesting_flag
+ profile_tier_level(bv, sps_max_sub_layers_minus1);
+ (void)bv.get_expGolomb(); // sps_seq_parameter_set_id
+ unsigned chroma_format_idc = bv.get_expGolomb();
+ DEBUG_PRINT(chroma_format_idc);
+ if (chroma_format_idc == 3) bv.skipBits(1); // separate_colour_plane_flag
+ unsigned pic_width_in_luma_samples = bv.get_expGolomb();
+ DEBUG_PRINT(pic_width_in_luma_samples);
+ unsigned pic_height_in_luma_samples = bv.get_expGolomb();
+ DEBUG_PRINT(pic_height_in_luma_samples);
+ Boolean conformance_window_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(conformance_window_flag);
+ if (conformance_window_flag) {
+ DEBUG_TAB;
+ unsigned conf_win_left_offset = bv.get_expGolomb();
+ DEBUG_PRINT(conf_win_left_offset);
+ unsigned conf_win_right_offset = bv.get_expGolomb();
+ DEBUG_PRINT(conf_win_right_offset);
+ unsigned conf_win_top_offset = bv.get_expGolomb();
+ DEBUG_PRINT(conf_win_top_offset);
+ unsigned conf_win_bottom_offset = bv.get_expGolomb();
+ DEBUG_PRINT(conf_win_bottom_offset);
+ }
+ (void)bv.get_expGolomb(); // bit_depth_luma_minus8
+ (void)bv.get_expGolomb(); // bit_depth_chroma_minus8
+ unsigned log2_max_pic_order_cnt_lsb_minus4 = bv.get_expGolomb();
+ Boolean sps_sub_layer_ordering_info_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(sps_sub_layer_ordering_info_present_flag);
+ for (i = (sps_sub_layer_ordering_info_present_flag ? 0 : sps_max_sub_layers_minus1);
+ i <= sps_max_sub_layers_minus1; ++i) {
+ (void)bv.get_expGolomb(); // sps_max_dec_pic_buffering_minus1[i]
+ (void)bv.get_expGolomb(); // sps_max_num_reorder_pics[i]
+ (void)bv.get_expGolomb(); // sps_max_latency_increase[i]
+ }
+ (void)bv.get_expGolomb(); // log2_min_luma_coding_block_size_minus3
+ (void)bv.get_expGolomb(); // log2_diff_max_min_luma_coding_block_size
+ (void)bv.get_expGolomb(); // log2_min_transform_block_size_minus2
+ (void)bv.get_expGolomb(); // log2_diff_max_min_transform_block_size
+ (void)bv.get_expGolomb(); // max_transform_hierarchy_depth_inter
+ (void)bv.get_expGolomb(); // max_transform_hierarchy_depth_intra
+ Boolean scaling_list_enabled_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(scaling_list_enabled_flag);
+ if (scaling_list_enabled_flag) {
+ DEBUG_TAB;
+ Boolean sps_scaling_list_data_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(sps_scaling_list_data_present_flag);
+ if (sps_scaling_list_data_present_flag) {
+ // scaling_list_data()
+ DEBUG_TAB;
+ for (unsigned sizeId = 0; sizeId < 4; ++sizeId) {
+ DEBUG_PRINT(sizeId);
+ for (unsigned matrixId = 0; matrixId < (sizeId == 3 ? 2 : 6); ++matrixId) {
+ DEBUG_TAB;
+ DEBUG_PRINT(matrixId);
+ Boolean scaling_list_pred_mode_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(scaling_list_pred_mode_flag);
+ if (!scaling_list_pred_mode_flag) {
+ (void)bv.get_expGolomb(); // scaling_list_pred_matrix_id_delta[sizeId][matrixId]
+ } else {
+ unsigned const c = 1 << (4+(sizeId<<1));
+ unsigned coefNum = c < 64 ? c : 64;
+ if (sizeId > 1) {
+ (void)bv.get_expGolomb(); // scaling_list_dc_coef_minus8[sizeId][matrixId]
+ }
+ for (i = 0; i < coefNum; ++i) {
+ (void)bv.get_expGolomb(); // scaling_list_delta_coef
+ }
+ }
+ }
+ }
+ }
+ }
+ bv.skipBits(2); // amp_enabled_flag, sample_adaptive_offset_enabled_flag
+ Boolean pcm_enabled_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(pcm_enabled_flag);
+ if (pcm_enabled_flag) {
+ bv.skipBits(8); // pcm_sample_bit_depth_luma_minus1, pcm_sample_bit_depth_chroma_minus1
+ (void)bv.get_expGolomb(); // log2_min_pcm_luma_coding_block_size_minus3
+ (void)bv.get_expGolomb(); // log2_diff_max_min_pcm_luma_coding_block_size
+ bv.skipBits(1); // pcm_loop_filter_disabled_flag
+ }
+ unsigned num_short_term_ref_pic_sets = bv.get_expGolomb();
+ DEBUG_PRINT(num_short_term_ref_pic_sets);
+ unsigned num_negative_pics = 0, prev_num_negative_pics = 0;
+ unsigned num_positive_pics = 0, prev_num_positive_pics = 0;
+ for (i = 0; i < num_short_term_ref_pic_sets; ++i) {
+ // short_term_ref_pic_set(i):
+ DEBUG_TAB;
+ DEBUG_PRINT(i);
+ Boolean inter_ref_pic_set_prediction_flag = False;
+ if (i != 0) {
+ inter_ref_pic_set_prediction_flag = bv.get1BitBoolean();
+ }
+ DEBUG_PRINT(inter_ref_pic_set_prediction_flag);
+ if (inter_ref_pic_set_prediction_flag) {
+ DEBUG_TAB;
+ if (i == num_short_term_ref_pic_sets) {
+ // This can't happen here, but it's in the spec, so we include it for completeness
+ (void)bv.get_expGolomb(); // delta_idx_minus1
+ }
+ bv.skipBits(1); // delta_rps_sign
+ (void)bv.get_expGolomb(); // abs_delta_rps_minus1
+ unsigned NumDeltaPocs = prev_num_negative_pics + prev_num_positive_pics; // correct???
+ for (unsigned j = 0; j < NumDeltaPocs; ++j) {
+ DEBUG_PRINT(j);
+ Boolean used_by_curr_pic_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(used_by_curr_pic_flag);
+ if (!used_by_curr_pic_flag) bv.skipBits(1); // use_delta_flag[j]
+ }
+ } else {
+ prev_num_negative_pics = num_negative_pics;
+ num_negative_pics = bv.get_expGolomb();
+ DEBUG_PRINT(num_negative_pics);
+ prev_num_positive_pics = num_positive_pics;
+ num_positive_pics = bv.get_expGolomb();
+ DEBUG_PRINT(num_positive_pics);
+ unsigned k;
+ for (k = 0; k < num_negative_pics; ++k) {
+ (void)bv.get_expGolomb(); // delta_poc_s0_minus1[k]
+ bv.skipBits(1); // used_by_curr_pic_s0_flag[k]
+ }
+ for (k = 0; k < num_positive_pics; ++k) {
+ (void)bv.get_expGolomb(); // delta_poc_s1_minus1[k]
+ bv.skipBits(1); // used_by_curr_pic_s1_flag[k]
+ }
+ }
+ }
+ Boolean long_term_ref_pics_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(long_term_ref_pics_present_flag);
+ if (long_term_ref_pics_present_flag) {
+ DEBUG_TAB;
+ unsigned num_long_term_ref_pics_sps = bv.get_expGolomb();
+ DEBUG_PRINT(num_long_term_ref_pics_sps);
+ for (i = 0; i < num_long_term_ref_pics_sps; ++i) {
+ bv.skipBits(log2_max_pic_order_cnt_lsb_minus4); // lt_ref_pic_poc_lsb_sps[i]
+ bv.skipBits(1); // used_by_curr_pic_lt_sps_flag[1]
+ }
+ }
+ bv.skipBits(2); // sps_temporal_mvp_enabled_flag, strong_intra_smoothing_enabled_flag
+ Boolean vui_parameters_present_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(vui_parameters_present_flag);
+ if (vui_parameters_present_flag) {
+ DEBUG_TAB;
+ analyze_vui_parameters(bv, num_units_in_tick, time_scale);
+ }
+ Boolean sps_extension_flag = bv.get1BitBoolean();
+ DEBUG_PRINT(sps_extension_flag);
+ }
+}
+
+#define SEI_MAX_SIZE 5000 // larger than the largest possible SEI NAL unit
+
+#ifdef DEBUG
+#define MAX_SEI_PAYLOAD_TYPE_DESCRIPTION_H264 46
+char const* sei_payloadType_description_h264[MAX_SEI_PAYLOAD_TYPE_DESCRIPTION_H264+1] = {
+ "buffering_period", //0
+ "pic_timing", //1
+ "pan_scan_rect", //2
+ "filler_payload", //3
+ "user_data_registered_itu_t_t35", //4
+ "user_data_unregistered", //5
+ "recovery_point", //6
+ "dec_ref_pic_marking_repetition", //7
+ "spare_pic", //8
+ "scene_info", //9
+ "sub_seq_info", //10
+ "sub_seq_layer_characteristics", //11
+ "sub_seq_characteristics", //12
+ "full_frame_freeze", //13
+ "full_frame_freeze_release", //14
+ "full_frame_snapshot", //15
+ "progressive_refinement_segment_start", //16
+ "progressive_refinement_segment_end", //17
+ "motion_constrained_slice_group_set", //18
+ "film_grain_characteristics", //19
+ "deblocking_filter_display_preference", //20
+ "stereo_video_info", //21
+ "post_filter_hint", //22
+ "tone_mapping_info", //23
+ "scalability_info", //24
+ "sub_pic_scalable_layer", //25
+ "non_required_layer_rep", //26
+ "priority_layer_info", //27
+ "layers_not_present", //28
+ "layer_dependency_change", //29
+ "scalable_nesting", //30
+ "base_layer_temporal_hrd", //31
+ "quality_layer_integrity_check", //32
+ "redundant_pic_property", //33
+ "tl0_dep_rep_index", //34
+ "tl_switching_point", //35
+ "parallel_decoding_info", //36
+ "mvc_scalable_nesting", //37
+ "view_scalability_info", //38
+ "multiview_scene_info", //39
+ "multiview_acquisition_info", //40
+ "non_required_view_component", //41
+ "view_dependency_change", //42
+ "operation_points_not_present", //43
+ "base_view_temporal_hrd", //44
+ "frame_packing_arrangement", //45
+ "reserved_sei_message" // 46 or higher
+};
+#endif
+
+void H264or5VideoStreamParser::analyze_sei_data(u_int8_t nal_unit_type) {
+ // Begin by making a copy of the NAL unit data, removing any 'emulation prevention' bytes:
+ u_int8_t sei[SEI_MAX_SIZE];
+ unsigned seiSize;
+ removeEmulationBytes(sei, sizeof sei, seiSize);
+
+ unsigned j = 1; // skip the initial byte (forbidden_zero_bit; nal_ref_idc; nal_unit_type); we've already seen it
+ while (j < seiSize) {
+ unsigned payloadType = 0;
+ do {
+ payloadType += sei[j];
+ } while (sei[j++] == 255 && j < seiSize);
+ if (j >= seiSize) break;
+
+ unsigned payloadSize = 0;
+ do {
+ payloadSize += sei[j];
+ } while (sei[j++] == 255 && j < seiSize);
+ if (j >= seiSize) break;
+
+#ifdef DEBUG
+ char const* description;
+ if (fHNumber == 264) {
+ unsigned descriptionNum = payloadType <= MAX_SEI_PAYLOAD_TYPE_DESCRIPTION_H264
+ ? payloadType : MAX_SEI_PAYLOAD_TYPE_DESCRIPTION_H264;
+ description = sei_payloadType_description_h264[descriptionNum];
+ } else { // 265
+ description =
+ payloadType == 3 ? "filler_payload" :
+ payloadType == 4 ? "user_data_registered_itu_t_t35" :
+ payloadType == 5 ? "user_data_unregistered" :
+ payloadType == 17 ? "progressive_refinement_segment_end" :
+ payloadType == 22 ? "post_filter_hint" :
+ (payloadType == 132 && nal_unit_type == SUFFIX_SEI_NUT) ? "decoded_picture_hash" :
+ nal_unit_type == SUFFIX_SEI_NUT ? "reserved_sei_message" :
+ payloadType == 0 ? "buffering_period" :
+ payloadType == 1 ? "pic_timing" :
+ payloadType == 2 ? "pan_scan_rect" :
+ payloadType == 6 ? "recovery_point" :
+ payloadType == 9 ? "scene_info" :
+ payloadType == 15 ? "picture_snapshot" :
+ payloadType == 16 ? "progressive_refinement_segment_start" :
+ payloadType == 19 ? "film_grain_characteristics" :
+ payloadType == 23 ? "tone_mapping_info" :
+ payloadType == 45 ? "frame_packing_arrangement" :
+ payloadType == 47 ? "display_orientation" :
+ payloadType == 128 ? "structure_of_pictures_info" :
+ payloadType == 129 ? "active_parameter_sets" :
+ payloadType == 130 ? "decoding_unit_info" :
+ payloadType == 131 ? "temporal_sub_layer_zero_index" :
+ payloadType == 133 ? "scalable_nesting" :
+ payloadType == 134 ? "region_refresh_info" : "reserved_sei_message";
+ }
+ fprintf(stderr, "\tpayloadType %d (\"%s\"); payloadSize %d\n", payloadType, description, payloadSize);
+#endif
+
+ analyze_sei_payload(payloadType, payloadSize, &sei[j]);
+ j += payloadSize;
+ }
+}
+
+void H264or5VideoStreamParser
+::analyze_sei_payload(unsigned payloadType, unsigned payloadSize, u_int8_t* payload) {
+ if (payloadType == 1/* pic_timing, for both H.264 and H.265 */) {
+ BitVector bv(payload, 0, 8*payloadSize);
+
+ DEBUG_TAB;
+ if (CpbDpbDelaysPresentFlag) {
+ unsigned cpb_removal_delay = bv.getBits(cpb_removal_delay_length_minus1 + 1);
+ DEBUG_PRINT(cpb_removal_delay);
+ unsigned dpb_output_delay = bv.getBits(dpb_output_delay_length_minus1 + 1);
+ DEBUG_PRINT(dpb_output_delay);
+ }
+ double prevDeltaTfiDivisor = DeltaTfiDivisor;
+ if (pic_struct_present_flag) {
+ unsigned pic_struct = bv.getBits(4);
+ DEBUG_PRINT(pic_struct);
+ // Use this to set "DeltaTfiDivisor" (which is used to compute the frame rate):
+ if (fHNumber == 264) {
+ DeltaTfiDivisor =
+ pic_struct == 0 ? 2.0 :
+ pic_struct <= 2 ? 1.0 :
+ pic_struct <= 4 ? 2.0 :
+ pic_struct <= 6 ? 3.0 :
+ pic_struct == 7 ? 4.0 :
+ pic_struct == 8 ? 6.0 :
+ 2.0;
+ } else { // H.265
+ DeltaTfiDivisor =
+ pic_struct == 0 ? 2.0 :
+ pic_struct <= 2 ? 1.0 :
+ pic_struct <= 4 ? 2.0 :
+ pic_struct <= 6 ? 3.0 :
+ pic_struct == 7 ? 2.0 :
+ pic_struct == 8 ? 3.0 :
+ pic_struct <= 12 ? 1.0 :
+ 2.0;
+ }
+ } else {
+ if (fHNumber == 264) {
+ // Need to get field_pic_flag from slice_header to set this properly! #####
+ } else { // H.265
+ DeltaTfiDivisor = 1.0;
+ }
+ }
+ // If "DeltaTfiDivisor" has changed, and we've already computed the frame rate, then
+ // adjust it, based on the new value of "DeltaTfiDivisor":
+ if (DeltaTfiDivisor != prevDeltaTfiDivisor && fParsedFrameRate != 0.0) {
+ usingSource()->fFrameRate = fParsedFrameRate
+ = fParsedFrameRate*(prevDeltaTfiDivisor/DeltaTfiDivisor);
+#ifdef DEBUG
+ fprintf(stderr, "Changed frame rate to %f fps\n", usingSource()->fFrameRate);
+#endif
+ }
+ // Ignore the rest of the payload (timestamps) for now... #####
+ }
+}
+
+void H264or5VideoStreamParser::flushInput() {
+ fHaveSeenFirstStartCode = False;
+ fHaveSeenFirstByteOfNALUnit = False;
+
+ StreamParser::flushInput();
+}
+
+unsigned H264or5VideoStreamParser::parse() {
+ try {
+ // The stream must start with a 0x00000001:
+ if (!fHaveSeenFirstStartCode) {
+ // Skip over any input bytes that precede the first 0x00000001:
+ u_int32_t first4Bytes;
+ while ((first4Bytes = test4Bytes()) != 0x00000001) {
+ get1Byte(); setParseState(); // ensures that we progress over bad data
+ }
+ skipBytes(4); // skip this initial code
+
+ setParseState();
+ fHaveSeenFirstStartCode = True; // from now on
+ }
+
+ if (fOutputStartCodeSize > 0 && curFrameSize() == 0 && !haveSeenEOF()) {
+ // Include a start code in the output:
+ save4Bytes(0x00000001);
+ }
+
+ // Then save everything up until the next 0x00000001 (4 bytes) or 0x000001 (3 bytes), or we hit EOF.
+ // Also make note of the first byte, because it contains the "nal_unit_type":
+ if (haveSeenEOF()) {
+ // We hit EOF the last time that we tried to parse this data, so we know that any remaining unparsed data
+ // forms a complete NAL unit, and that there's no 'start code' at the end:
+ unsigned remainingDataSize = totNumValidBytes() - curOffset();
+#ifdef DEBUG
+ unsigned const trailingNALUnitSize = remainingDataSize;
+#endif
+ while (remainingDataSize > 0) {
+ u_int8_t nextByte = get1Byte();
+ if (!fHaveSeenFirstByteOfNALUnit) {
+ fFirstByteOfNALUnit = nextByte;
+ fHaveSeenFirstByteOfNALUnit = True;
+ }
+ saveByte(nextByte);
+ --remainingDataSize;
+ }
+
+#ifdef DEBUG
+ if (fHNumber == 264) {
+ u_int8_t nal_ref_idc = (fFirstByteOfNALUnit&0x60)>>5;
+ u_int8_t nal_unit_type = fFirstByteOfNALUnit&0x1F;
+ fprintf(stderr, "Parsed trailing %d-byte NAL-unit (nal_ref_idc: %d, nal_unit_type: %d (\"%s\"))\n",
+ trailingNALUnitSize, nal_ref_idc, nal_unit_type, nal_unit_type_description_h264[nal_unit_type]);
+ } else { // 265
+ u_int8_t nal_unit_type = (fFirstByteOfNALUnit&0x7E)>>1;
+ fprintf(stderr, "Parsed trailing %d-byte NAL-unit (nal_unit_type: %d (\"%s\"))\n",
+ trailingNALUnitSize, nal_unit_type, nal_unit_type_description_h265[nal_unit_type]);
+ }
+#endif
+
+ (void)get1Byte(); // forces another read, which will cause EOF to get handled for real this time
+ return 0;
+ } else {
+ u_int32_t next4Bytes = test4Bytes();
+ if (!fHaveSeenFirstByteOfNALUnit) {
+ fFirstByteOfNALUnit = next4Bytes>>24;
+ fHaveSeenFirstByteOfNALUnit = True;
+ }
+ while (next4Bytes != 0x00000001 && (next4Bytes&0xFFFFFF00) != 0x00000100) {
+ // We save at least some of "next4Bytes".
+ if ((unsigned)(next4Bytes&0xFF) > 1) {
+ // Common case: 0x00000001 or 0x000001 definitely doesn't begin anywhere in "next4Bytes", so we save all of it:
+ save4Bytes(next4Bytes);
+ skipBytes(4);
+ } else {
+ // Save the first byte, and continue testing the rest:
+ saveByte(next4Bytes>>24);
+ skipBytes(1);
+ }
+ setParseState(); // ensures forward progress
+ next4Bytes = test4Bytes();
+ }
+ // Assert: next4Bytes starts with 0x00000001 or 0x000001, and we've saved all previous bytes (forming a complete NAL unit).
+ // Skip over these remaining bytes, up until the start of the next NAL unit:
+ if (next4Bytes == 0x00000001) {
+ skipBytes(4);
+ } else {
+ skipBytes(3);
+ }
+ }
+
+ fHaveSeenFirstByteOfNALUnit = False; // for the next NAL unit that we'll parse
+ u_int8_t nal_unit_type;
+ if (fHNumber == 264) {
+ nal_unit_type = fFirstByteOfNALUnit&0x1F;
+#ifdef DEBUG
+ u_int8_t nal_ref_idc = (fFirstByteOfNALUnit&0x60)>>5;
+ fprintf(stderr, "Parsed %d-byte NAL-unit (nal_ref_idc: %d, nal_unit_type: %d (\"%s\"))\n",
+ curFrameSize()-fOutputStartCodeSize, nal_ref_idc, nal_unit_type, nal_unit_type_description_h264[nal_unit_type]);
+#endif
+ } else { // 265
+ nal_unit_type = (fFirstByteOfNALUnit&0x7E)>>1;
+#ifdef DEBUG
+ fprintf(stderr, "Parsed %d-byte NAL-unit (nal_unit_type: %d (\"%s\"))\n",
+ curFrameSize()-fOutputStartCodeSize, nal_unit_type, nal_unit_type_description_h265[nal_unit_type]);
+#endif
+ }
+
+ // Now that we have found (& copied) a NAL unit, process it if it's of special interest to us:
+ if (isVPS(nal_unit_type)) { // Video parameter set
+ // First, save a copy of this NAL unit, in case the downstream object wants to see it:
+ usingSource()->saveCopyOfVPS(fStartOfFrame + fOutputStartCodeSize, curFrameSize() - fOutputStartCodeSize);
+
+ if (fParsedFrameRate == 0.0) {
+ // We haven't yet parsed a frame rate from the stream.
+ // So parse this NAL unit to check whether frame rate information is present:
+ unsigned num_units_in_tick, time_scale;
+ analyze_video_parameter_set_data(num_units_in_tick, time_scale);
+ if (time_scale > 0 && num_units_in_tick > 0) {
+ usingSource()->fFrameRate = fParsedFrameRate
+ = time_scale/(DeltaTfiDivisor*num_units_in_tick);
+#ifdef DEBUG
+ fprintf(stderr, "Set frame rate to %f fps\n", usingSource()->fFrameRate);
+#endif
+ } else {
+#ifdef DEBUG
+ fprintf(stderr, "\tThis \"Video Parameter Set\" NAL unit contained no frame rate information, so we use a default frame rate of %f fps\n", usingSource()->fFrameRate);
+#endif
+ }
+ }
+ } else if (isSPS(nal_unit_type)) { // Sequence parameter set
+ // First, save a copy of this NAL unit, in case the downstream object wants to see it:
+ usingSource()->saveCopyOfSPS(fStartOfFrame + fOutputStartCodeSize, curFrameSize() - fOutputStartCodeSize);
+
+ if (fParsedFrameRate == 0.0) {
+ // We haven't yet parsed a frame rate from the stream.
+ // So parse this NAL unit to check whether frame rate information is present:
+ unsigned num_units_in_tick, time_scale;
+ analyze_seq_parameter_set_data(num_units_in_tick, time_scale);
+ if (time_scale > 0 && num_units_in_tick > 0) {
+ usingSource()->fFrameRate = fParsedFrameRate
+ = time_scale/(DeltaTfiDivisor*num_units_in_tick);
+#ifdef DEBUG
+ fprintf(stderr, "Set frame rate to %f fps\n", usingSource()->fFrameRate);
+#endif
+ } else {
+#ifdef DEBUG
+ fprintf(stderr, "\tThis \"Sequence Parameter Set\" NAL unit contained no frame rate information, so we use a default frame rate of %f fps\n", usingSource()->fFrameRate);
+#endif
+ }
+ }
+ } else if (isPPS(nal_unit_type)) { // Picture parameter set
+ // Save a copy of this NAL unit, in case the downstream object wants to see it:
+ usingSource()->saveCopyOfPPS(fStartOfFrame + fOutputStartCodeSize, curFrameSize() - fOutputStartCodeSize);
+ } else if (isSEI(nal_unit_type)) { // Supplemental enhancement information (SEI)
+ analyze_sei_data(nal_unit_type);
+ // Later, perhaps adjust "fPresentationTime" if we saw a "pic_timing" SEI payload??? #####
+ }
+
+ usingSource()->setPresentationTime();
+#ifdef DEBUG
+ unsigned long secs = (unsigned long)usingSource()->fPresentationTime.tv_sec;
+ unsigned uSecs = (unsigned)usingSource()->fPresentationTime.tv_usec;
+ fprintf(stderr, "\tPresentation time: %lu.%06u\n", secs, uSecs);
+#endif
+
+ // Now, check whether this NAL unit ends an 'access unit'.
+ // (RTP streamers need to know this in order to figure out whether or not to set the "M" bit.)
+ Boolean thisNALUnitEndsAccessUnit;
+ if (haveSeenEOF() || isEOF(nal_unit_type)) {
+ // There is no next NAL unit, so we assume that this one ends the current 'access unit':
+ thisNALUnitEndsAccessUnit = True;
+ } else if (usuallyBeginsAccessUnit(nal_unit_type)) {
+ // These NAL units usually *begin* an access unit, so assume that they don't end one here:
+ thisNALUnitEndsAccessUnit = False;
+ } else {
+ // We need to check the *next* NAL unit to figure out whether
+ // the current NAL unit ends an 'access unit':
+ u_int8_t firstBytesOfNextNALUnit[3];
+ testBytes(firstBytesOfNextNALUnit, 3);
+
+ u_int8_t const& next_nal_unit_type = fHNumber == 264
+ ? (firstBytesOfNextNALUnit[0]&0x1F) : ((firstBytesOfNextNALUnit[0]&0x7E)>>1);
+ if (isVCL(next_nal_unit_type)) {
+ // The high-order bit of the byte after the "nal_unit_header" tells us whether it's
+ // the start of a new 'access unit' (and thus the current NAL unit ends an 'access unit'):
+ u_int8_t const byteAfter_nal_unit_header
+ = fHNumber == 264 ? firstBytesOfNextNALUnit[1] : firstBytesOfNextNALUnit[2];
+ thisNALUnitEndsAccessUnit = (byteAfter_nal_unit_header&0x80) != 0;
+ } else if (usuallyBeginsAccessUnit(next_nal_unit_type)) {
+ // The next NAL unit's type is one that usually appears at the start of an 'access unit',
+ // so we assume that the current NAL unit ends an 'access unit':
+ thisNALUnitEndsAccessUnit = True;
+ } else {
+ // The next NAL unit definitely doesn't start a new 'access unit',
+ // which means that the current NAL unit doesn't end one:
+ thisNALUnitEndsAccessUnit = False;
+ }
+ }
+
+ if (thisNALUnitEndsAccessUnit) {
+#ifdef DEBUG
+ fprintf(stderr, "*****This NAL unit ends the current access unit*****\n");
+#endif
+ usingSource()->fPictureEndMarker = True;
+ ++usingSource()->fPictureCount;
+
+ // Note that the presentation time for the next NAL unit will be different:
+ struct timeval& nextPT = usingSource()->fNextPresentationTime; // alias
+ nextPT = usingSource()->fPresentationTime;
+ double nextFraction = nextPT.tv_usec/1000000.0 + 1/usingSource()->fFrameRate;
+ unsigned nextSecsIncrement = (long)nextFraction;
+ nextPT.tv_sec += (long)nextSecsIncrement;
+ nextPT.tv_usec = (long)((nextFraction - nextSecsIncrement)*1000000);
+ }
+ setParseState();
+
+ return curFrameSize();
+ } catch (int /*e*/) {
+#ifdef DEBUG
+ fprintf(stderr, "H264or5VideoStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
+#endif
+ return 0; // the parsing got interrupted
+ }
+}
+
+unsigned removeH264or5EmulationBytes(u_int8_t* to, unsigned toMaxSize,
+ u_int8_t const* from, unsigned fromSize) {
+ unsigned toSize = 0;
+ unsigned i = 0;
+ while (i < fromSize && toSize+1 < toMaxSize) {
+ if (i+2 < fromSize && from[i] == 0 && from[i+1] == 0 && from[i+2] == 3) {
+ to[toSize] = to[toSize+1] = 0;
+ toSize += 2;
+ i += 3;
+ } else {
+ to[toSize] = from[i];
+ toSize += 1;
+ i += 1;
+ }
+ }
+
+ return toSize;
+}
diff --git a/liveMedia/H265VideoFileServerMediaSubsession.cpp b/liveMedia/H265VideoFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..cc97da0
--- /dev/null
+++ b/liveMedia/H265VideoFileServerMediaSubsession.cpp
@@ -0,0 +1,121 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a H265 video file.
+// Implementation
+
+#include "H265VideoFileServerMediaSubsession.hh"
+#include "H265VideoRTPSink.hh"
+#include "ByteStreamFileSource.hh"
+#include "H265VideoStreamFramer.hh"
+
+H265VideoFileServerMediaSubsession*
+H265VideoFileServerMediaSubsession::createNew(UsageEnvironment& env,
+ char const* fileName,
+ Boolean reuseFirstSource) {
+ return new H265VideoFileServerMediaSubsession(env, fileName, reuseFirstSource);
+}
+
+H265VideoFileServerMediaSubsession::H265VideoFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource),
+ fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) {
+}
+
+H265VideoFileServerMediaSubsession::~H265VideoFileServerMediaSubsession() {
+ delete[] fAuxSDPLine;
+}
+
+static void afterPlayingDummy(void* clientData) {
+ H265VideoFileServerMediaSubsession* subsess = (H265VideoFileServerMediaSubsession*)clientData;
+ subsess->afterPlayingDummy1();
+}
+
+void H265VideoFileServerMediaSubsession::afterPlayingDummy1() {
+ // Unschedule any pending 'checking' task:
+ envir().taskScheduler().unscheduleDelayedTask(nextTask());
+ // Signal the event loop that we're done:
+ setDoneFlag();
+}
+
+static void checkForAuxSDPLine(void* clientData) {
+ H265VideoFileServerMediaSubsession* subsess = (H265VideoFileServerMediaSubsession*)clientData;
+ subsess->checkForAuxSDPLine1();
+}
+
+void H265VideoFileServerMediaSubsession::checkForAuxSDPLine1() {
+ nextTask() = NULL;
+
+ char const* dasl;
+ if (fAuxSDPLine != NULL) {
+ // Signal the event loop that we're done:
+ setDoneFlag();
+ } else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) {
+ fAuxSDPLine = strDup(dasl);
+ fDummyRTPSink = NULL;
+
+ // Signal the event loop that we're done:
+ setDoneFlag();
+ } else if (!fDoneFlag) {
+ // try again after a brief delay:
+ int uSecsToDelay = 100000; // 100 ms
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay,
+ (TaskFunc*)checkForAuxSDPLine, this);
+ }
+}
+
+char const* H265VideoFileServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) {
+ if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client)
+
+ if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream
+ // Note: For H265 video files, the 'config' information (used for several payload-format
+ // specific parameters in the SDP description) isn't known until we start reading the file.
+ // This means that "rtpSink"s "auxSDPLine()" will be NULL initially,
+ // and we need to start reading data from our file until this changes.
+ fDummyRTPSink = rtpSink;
+
+ // Start reading the file:
+ fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this);
+
+ // Check whether the sink's 'auxSDPLine()' is ready:
+ checkForAuxSDPLine(this);
+ }
+
+ envir().taskScheduler().doEventLoop(&fDoneFlag);
+
+ return fAuxSDPLine;
+}
+
+FramedSource* H265VideoFileServerMediaSubsession::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
+ estBitrate = 500; // kbps, estimate
+
+ // Create the video source:
+ ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(envir(), fFileName);
+ if (fileSource == NULL) return NULL;
+ fFileSize = fileSource->fileSize();
+
+ // Create a framer for the Video Elementary Stream:
+ return H265VideoStreamFramer::createNew(envir(), fileSource);
+}
+
+RTPSink* H265VideoFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* /*inputSource*/) {
+ return H265VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+}
diff --git a/liveMedia/H265VideoFileSink.cpp b/liveMedia/H265VideoFileSink.cpp
new file mode 100644
index 0000000..20e28f6
--- /dev/null
+++ b/liveMedia/H265VideoFileSink.cpp
@@ -0,0 +1,63 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.265 Video File sinks
+// Implementation
+
+#include "H265VideoFileSink.hh"
+#include "OutputFile.hh"
+
+////////// H265VideoFileSink //////////
+
+H265VideoFileSink
+::H265VideoFileSink(UsageEnvironment& env, FILE* fid,
+ char const* sPropVPSStr,
+ char const* sPropSPSStr,
+ char const* sPropPPSStr,
+ unsigned bufferSize, char const* perFrameFileNamePrefix)
+ : H264or5VideoFileSink(env, fid, bufferSize, perFrameFileNamePrefix,
+ sPropVPSStr, sPropSPSStr, sPropPPSStr) {
+}
+
+H265VideoFileSink::~H265VideoFileSink() {
+}
+
+H265VideoFileSink*
+H265VideoFileSink::createNew(UsageEnvironment& env, char const* fileName,
+ char const* sPropVPSStr,
+ char const* sPropSPSStr,
+ char const* sPropPPSStr,
+ unsigned bufferSize, Boolean oneFilePerFrame) {
+ do {
+ FILE* fid;
+ char const* perFrameFileNamePrefix;
+ if (oneFilePerFrame) {
+ // Create the fid for each frame
+ fid = NULL;
+ perFrameFileNamePrefix = fileName;
+ } else {
+ // Normal case: create the fid once
+ fid = OpenOutputFile(env, fileName);
+ if (fid == NULL) break;
+ perFrameFileNamePrefix = NULL;
+ }
+
+ return new H265VideoFileSink(env, fid, sPropVPSStr, sPropSPSStr, sPropPPSStr, bufferSize, perFrameFileNamePrefix);
+ } while (0);
+
+ return NULL;
+}
diff --git a/liveMedia/H265VideoRTPSink.cpp b/liveMedia/H265VideoRTPSink.cpp
new file mode 100644
index 0000000..7fa64f5
--- /dev/null
+++ b/liveMedia/H265VideoRTPSink.cpp
@@ -0,0 +1,182 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for H.265 video
+// Implementation
+
+#include "H265VideoRTPSink.hh"
+#include "H265VideoStreamFramer.hh"
+#include "Base64.hh"
+#include "BitVector.hh"
+#include "H264VideoRTPSource.hh" // for "parseSPropParameterSets()"
+
+////////// H265VideoRTPSink implementation //////////
+
+H265VideoRTPSink
+::H265VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int8_t const* vps, unsigned vpsSize,
+ u_int8_t const* sps, unsigned spsSize,
+ u_int8_t const* pps, unsigned ppsSize)
+ : H264or5VideoRTPSink(265, env, RTPgs, rtpPayloadFormat,
+ vps, vpsSize, sps, spsSize, pps, ppsSize) {
+}
+
+H265VideoRTPSink::~H265VideoRTPSink() {
+}
+
+H265VideoRTPSink* H265VideoRTPSink
+::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) {
+ return new H265VideoRTPSink(env, RTPgs, rtpPayloadFormat);
+}
+
+H265VideoRTPSink* H265VideoRTPSink
+::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int8_t const* vps, unsigned vpsSize,
+ u_int8_t const* sps, unsigned spsSize,
+ u_int8_t const* pps, unsigned ppsSize) {
+ return new H265VideoRTPSink(env, RTPgs, rtpPayloadFormat,
+ vps, vpsSize, sps, spsSize, pps, ppsSize);
+}
+
+H265VideoRTPSink* H265VideoRTPSink
+::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ char const* sPropVPSStr, char const* sPropSPSStr, char const* sPropPPSStr) {
+ u_int8_t* vps = NULL; unsigned vpsSize = 0;
+ u_int8_t* sps = NULL; unsigned spsSize = 0;
+ u_int8_t* pps = NULL; unsigned ppsSize = 0;
+
+ // Parse each 'sProp' string, extracting and then classifying the NAL unit(s) from each one.
+ // We're 'liberal in what we accept'; it's OK if the strings don't contain the NAL unit type
+ // implied by their names (or if one or more of the strings encode multiple NAL units).
+ SPropRecord* sPropRecords[3];
+ unsigned numSPropRecords[3];
+ sPropRecords[0] = parseSPropParameterSets(sPropVPSStr, numSPropRecords[0]);
+ sPropRecords[1] = parseSPropParameterSets(sPropSPSStr, numSPropRecords[1]);
+ sPropRecords[2] = parseSPropParameterSets(sPropPPSStr, numSPropRecords[2]);
+
+ for (unsigned j = 0; j < 3; ++j) {
+ SPropRecord* records = sPropRecords[j];
+ unsigned numRecords = numSPropRecords[j];
+
+ for (unsigned i = 0; i < numRecords; ++i) {
+ if (records[i].sPropLength == 0) continue; // bad data
+ u_int8_t nal_unit_type = ((records[i].sPropBytes[0])&0x7E)>>1;
+ if (nal_unit_type == 32/*VPS*/) {
+ vps = records[i].sPropBytes;
+ vpsSize = records[i].sPropLength;
+ } else if (nal_unit_type == 33/*SPS*/) {
+ sps = records[i].sPropBytes;
+ spsSize = records[i].sPropLength;
+ } else if (nal_unit_type == 34/*PPS*/) {
+ pps = records[i].sPropBytes;
+ ppsSize = records[i].sPropLength;
+ }
+ }
+ }
+
+ H265VideoRTPSink* result = new H265VideoRTPSink(env, RTPgs, rtpPayloadFormat,
+ vps, vpsSize, sps, spsSize, pps, ppsSize);
+ delete[] sPropRecords[0]; delete[] sPropRecords[1]; delete[] sPropRecords[2];
+
+ return result;
+}
+
+Boolean H265VideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
+ // Our source must be an appropriate framer:
+ return source.isH265VideoStreamFramer();
+}
+
+char const* H265VideoRTPSink::auxSDPLine() {
+ // Generate a new "a=fmtp:" line each time, using our VPS, SPS and PPS (if we have them),
+ // otherwise parameters from our framer source (in case they've changed since the last time that
+ // we were called):
+ H264or5VideoStreamFramer* framerSource = NULL;
+ u_int8_t* vps = fVPS; unsigned vpsSize = fVPSSize;
+ u_int8_t* sps = fSPS; unsigned spsSize = fSPSSize;
+ u_int8_t* pps = fPPS; unsigned ppsSize = fPPSSize;
+ if (vps == NULL || sps == NULL || pps == NULL) {
+ // We need to get VPS, SPS and PPS from our framer source:
+ if (fOurFragmenter == NULL) return NULL; // we don't yet have a fragmenter (and therefore not a source)
+ framerSource = (H264or5VideoStreamFramer*)(fOurFragmenter->inputSource());
+ if (framerSource == NULL) return NULL; // we don't yet have a source
+
+ framerSource->getVPSandSPSandPPS(vps, vpsSize, sps, spsSize, pps, ppsSize);
+ if (vps == NULL || sps == NULL || pps == NULL) {
+ return NULL; // our source isn't ready
+ }
+ }
+
+ // Set up the "a=fmtp:" SDP line for this stream.
+ u_int8_t* vpsWEB = new u_int8_t[vpsSize]; // "WEB" means "Without Emulation Bytes"
+ unsigned vpsWEBSize = removeH264or5EmulationBytes(vpsWEB, vpsSize, vps, vpsSize);
+ if (vpsWEBSize < 6/*'profile_tier_level' offset*/ + 12/*num 'profile_tier_level' bytes*/) {
+ // Bad VPS size => assume our source isn't ready
+ delete[] vpsWEB;
+ return NULL;
+ }
+ u_int8_t const* profileTierLevelHeaderBytes = &vpsWEB[6];
+ unsigned profileSpace = profileTierLevelHeaderBytes[0]>>6; // general_profile_space
+ unsigned profileId = profileTierLevelHeaderBytes[0]&0x1F; // general_profile_idc
+ unsigned tierFlag = (profileTierLevelHeaderBytes[0]>>5)&0x1; // general_tier_flag
+ unsigned levelId = profileTierLevelHeaderBytes[11]; // general_level_idc
+ u_int8_t const* interop_constraints = &profileTierLevelHeaderBytes[5];
+ char interopConstraintsStr[100];
+ sprintf(interopConstraintsStr, "%02X%02X%02X%02X%02X%02X",
+ interop_constraints[0], interop_constraints[1], interop_constraints[2],
+ interop_constraints[3], interop_constraints[4], interop_constraints[5]);
+ delete[] vpsWEB;
+
+ char* sprop_vps = base64Encode((char*)vps, vpsSize);
+ char* sprop_sps = base64Encode((char*)sps, spsSize);
+ char* sprop_pps = base64Encode((char*)pps, ppsSize);
+
+ char const* fmtpFmt =
+ "a=fmtp:%d profile-space=%u"
+ ";profile-id=%u"
+ ";tier-flag=%u"
+ ";level-id=%u"
+ ";interop-constraints=%s"
+ ";sprop-vps=%s"
+ ";sprop-sps=%s"
+ ";sprop-pps=%s\r\n";
+ unsigned fmtpFmtSize = strlen(fmtpFmt)
+ + 3 /* max num chars: rtpPayloadType */ + 20 /* max num chars: profile_space */
+ + 20 /* max num chars: profile_id */
+ + 20 /* max num chars: tier_flag */
+ + 20 /* max num chars: level_id */
+ + strlen(interopConstraintsStr)
+ + strlen(sprop_vps)
+ + strlen(sprop_sps)
+ + strlen(sprop_pps);
+ char* fmtp = new char[fmtpFmtSize];
+ sprintf(fmtp, fmtpFmt,
+ rtpPayloadType(), profileSpace,
+ profileId,
+ tierFlag,
+ levelId,
+ interopConstraintsStr,
+ sprop_vps,
+ sprop_sps,
+ sprop_pps);
+
+ delete[] sprop_vps;
+ delete[] sprop_sps;
+ delete[] sprop_pps;
+
+ delete[] fFmtpSDPLine; fFmtpSDPLine = fmtp;
+ return fFmtpSDPLine;
+}
diff --git a/liveMedia/H265VideoRTPSource.cpp b/liveMedia/H265VideoRTPSource.cpp
new file mode 100644
index 0000000..a68c322
--- /dev/null
+++ b/liveMedia/H265VideoRTPSource.cpp
@@ -0,0 +1,218 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.265 Video RTP Sources
+// Implementation
+
+#include "H265VideoRTPSource.hh"
+
+////////// H265BufferedPacket and H265BufferedPacketFactory //////////
+
+class H265BufferedPacket: public BufferedPacket {
+public:
+ H265BufferedPacket(H265VideoRTPSource& ourSource);
+ virtual ~H265BufferedPacket();
+
+private: // redefined virtual functions
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+private:
+ H265VideoRTPSource& fOurSource;
+};
+
+class H265BufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+
+///////// H265VideoRTPSource implementation ////////
+
+H265VideoRTPSource*
+H265VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ Boolean expectDONFields,
+ unsigned rtpTimestampFrequency) {
+ return new H265VideoRTPSource(env, RTPgs, rtpPayloadFormat,
+ expectDONFields, rtpTimestampFrequency);
+}
+
+H265VideoRTPSource
+::H265VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ Boolean expectDONFields,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency,
+ new H265BufferedPacketFactory),
+ fExpectDONFields(expectDONFields),
+ fPreviousNALUnitDON(0), fCurrentNALUnitAbsDon((u_int64_t)(~0)) {
+}
+
+H265VideoRTPSource::~H265VideoRTPSource() {
+}
+
+Boolean H265VideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+ u_int16_t DONL = 0;
+ unsigned numBytesToSkip;
+
+ // Check the Payload Header's 'nal_unit_type' for special aggregation or fragmentation packets:
+ if (packetSize < 2) return False;
+ fCurPacketNALUnitType = (headerStart[0]&0x7E)>>1;
+ switch (fCurPacketNALUnitType) {
+ case 48: { // Aggregation Packet (AP)
+ // We skip over the 2-byte Payload Header, and the DONL header (if any).
+ if (fExpectDONFields) {
+ if (packetSize < 4) return False;
+ DONL = (headerStart[2]<<8)|headerStart[3];
+ numBytesToSkip = 4;
+ } else {
+ numBytesToSkip = 2;
+ }
+ break;
+ }
+ case 49: { // Fragmentation Unit (FU)
+ // This NALU begins with the 2-byte Payload Header, the 1-byte FU header, and (optionally)
+ // the 2-byte DONL header.
+ // If the start bit is set, we reconstruct the original NAL header at the end of these
+ // 3 (or 5) bytes, and skip over the first 1 (or 3) bytes.
+ if (packetSize < 3) return False;
+ u_int8_t startBit = headerStart[2]&0x80; // from the FU header
+ u_int8_t endBit = headerStart[2]&0x40; // from the FU header
+ if (startBit) {
+ fCurrentPacketBeginsFrame = True;
+
+ u_int8_t nal_unit_type = headerStart[2]&0x3F; // the last 6 bits of the FU header
+ u_int8_t newNALHeader[2];
+ newNALHeader[0] = (headerStart[0]&0x81)|(nal_unit_type<<1);
+ newNALHeader[1] = headerStart[1];
+
+ if (fExpectDONFields) {
+ if (packetSize < 5) return False;
+ DONL = (headerStart[3]<<8)|headerStart[4];
+ headerStart[3] = newNALHeader[0];
+ headerStart[4] = newNALHeader[1];
+ numBytesToSkip = 3;
+ } else {
+ headerStart[1] = newNALHeader[0];
+ headerStart[2] = newNALHeader[1];
+ numBytesToSkip = 1;
+ }
+ } else {
+ // The start bit is not set, so we skip over all headers:
+ fCurrentPacketBeginsFrame = False;
+ if (fExpectDONFields) {
+ if (packetSize < 5) return False;
+ DONL = (headerStart[3]<<8)|headerStart[4];
+ numBytesToSkip = 5;
+ } else {
+ numBytesToSkip = 3;
+ }
+ }
+ fCurrentPacketCompletesFrame = (endBit != 0);
+ break;
+ }
+ default: {
+ // This packet contains one complete NAL unit:
+ fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame = True;
+ numBytesToSkip = 0;
+ break;
+ }
+ }
+
+ computeAbsDonFromDON(DONL);
+ resultSpecialHeaderSize = numBytesToSkip;
+ return True;
+}
+
+char const* H265VideoRTPSource::MIMEtype() const {
+ return "video/H265";
+}
+
+void H265VideoRTPSource::computeAbsDonFromDON(u_int16_t DON) {
+ if (!fExpectDONFields) {
+ // Without DON fields in the input stream, we just increment our "AbsDon" count each time:
+ ++fCurrentNALUnitAbsDon;
+ } else {
+ if (fCurrentNALUnitAbsDon == (u_int64_t)(~0)) {
+ // This is the very first NAL unit, so "AbsDon" is just "DON":
+ fCurrentNALUnitAbsDon = (u_int64_t)DON;
+ } else {
+ // Use the previous NAL unit's DON and the current DON to compute "AbsDon":
+ // AbsDon[n] = AbsDon[n-1] + (DON[n] - DON[n-1]) mod 2^16
+ short signedDiff16 = (short)(DON - fPreviousNALUnitDON);
+ int64_t signedDiff64 = (int64_t)signedDiff16;
+ fCurrentNALUnitAbsDon += signedDiff64;
+ }
+
+ fPreviousNALUnitDON = DON; // for next time
+ }
+}
+
+
+////////// H265BufferedPacket and H265BufferedPacketFactory implementation //////////
+
+H265BufferedPacket::H265BufferedPacket(H265VideoRTPSource& ourSource)
+ : fOurSource(ourSource) {
+}
+
+H265BufferedPacket::~H265BufferedPacket() {
+}
+
+unsigned H265BufferedPacket
+::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
+ unsigned resultNALUSize = 0; // if an error occurs
+
+ switch (fOurSource.fCurPacketNALUnitType) {
+ case 48: { // Aggregation Packet (AP)
+ if (useCount() > 0) {
+ // We're other than the first NAL unit inside this Aggregation Packet.
+ // Update our 'decoding order number':
+ u_int16_t DONL = 0;
+ if (fOurSource.fExpectDONFields) {
+ // There's a 1-byte DOND field next:
+ if (dataSize < 1) break;
+ u_int8_t DOND = framePtr[0];
+ DONL = fOurSource.fPreviousNALUnitDON + (u_int16_t)(DOND + 1);
+ ++framePtr;
+ --dataSize;
+ }
+ fOurSource.computeAbsDonFromDON(DONL);
+ }
+
+ // The next 2 bytes are the NAL unit size:
+ if (dataSize < 2) break;
+ resultNALUSize = (framePtr[0]<<8)|framePtr[1];
+ framePtr += 2;
+ break;
+ }
+ default: {
+ // Common case: We use the entire packet data:
+ return dataSize;
+ }
+ }
+
+ return (resultNALUSize <= dataSize) ? resultNALUSize : dataSize;
+}
+
+BufferedPacket* H265BufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* ourSource) {
+ return new H265BufferedPacket((H265VideoRTPSource&)(*ourSource));
+}
diff --git a/liveMedia/H265VideoStreamDiscreteFramer.cpp b/liveMedia/H265VideoStreamDiscreteFramer.cpp
new file mode 100644
index 0000000..4bd1758
--- /dev/null
+++ b/liveMedia/H265VideoStreamDiscreteFramer.cpp
@@ -0,0 +1,46 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simplified version of "H265VideoStreamFramer" that takes only complete,
+// discrete frames (rather than an arbitrary byte stream) as input.
+// This avoids the parsing and data copying overhead of the full
+// "H265VideoStreamFramer".
+// Implementation
+
+#include "H265VideoStreamDiscreteFramer.hh"
+
+H265VideoStreamDiscreteFramer*
+H265VideoStreamDiscreteFramer
+::createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters) {
+ return new H265VideoStreamDiscreteFramer(env, inputSource,
+ includeStartCodeInOutput, insertAccessUnitDelimiters);
+}
+
+H265VideoStreamDiscreteFramer
+::H265VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters)
+ : H264or5VideoStreamDiscreteFramer(265, env, inputSource,
+ includeStartCodeInOutput, insertAccessUnitDelimiters) {
+}
+
+H265VideoStreamDiscreteFramer::~H265VideoStreamDiscreteFramer() {
+}
+
+Boolean H265VideoStreamDiscreteFramer::isH265VideoStreamFramer() const {
+ return True;
+}
diff --git a/liveMedia/H265VideoStreamFramer.cpp b/liveMedia/H265VideoStreamFramer.cpp
new file mode 100644
index 0000000..4fec6c1
--- /dev/null
+++ b/liveMedia/H265VideoStreamFramer.cpp
@@ -0,0 +1,42 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up a H.265 Video Elementary Stream into NAL units.
+// Implementation
+
+#include "H265VideoStreamFramer.hh"
+
+H265VideoStreamFramer* H265VideoStreamFramer
+::createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters) {
+ return new H265VideoStreamFramer(env, inputSource, True,
+ includeStartCodeInOutput, insertAccessUnitDelimiters);
+}
+
+H265VideoStreamFramer
+::H265VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource, Boolean createParser,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters)
+ : H264or5VideoStreamFramer(265, env, inputSource, createParser,
+ includeStartCodeInOutput, insertAccessUnitDelimiters) {
+}
+
+H265VideoStreamFramer::~H265VideoStreamFramer() {
+}
+
+Boolean H265VideoStreamFramer::isH265VideoStreamFramer() const {
+ return True;
+}
diff --git a/liveMedia/HLSSegmenter.cpp b/liveMedia/HLSSegmenter.cpp
new file mode 100644
index 0000000..b5107ae
--- /dev/null
+++ b/liveMedia/HLSSegmenter.cpp
@@ -0,0 +1,141 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A media sink that takes - as input - a MPEG Transport Stream, and outputs a series
+// of MPEG Transport Stream files, each representing a segment of the input stream,
+// suitable for HLS (Apple's "HTTP Live Streaming").
+// Implementation
+
+#include "HLSSegmenter.hh"
+#include "OutputFile.hh"
+#include "MPEG2TransportStreamMultiplexor.hh"
+
+#define TRANSPORT_PACKET_SIZE 188
+#define OUTPUT_FILE_BUFFER_SIZE (TRANSPORT_PACKET_SIZE*100)
+
+HLSSegmenter* HLSSegmenter
+::createNew(UsageEnvironment& env,
+ unsigned segmentationDuration, char const* fileNamePrefix,
+ onEndOfSegmentFunc* onEndOfSegmentFunc, void* onEndOfSegmentClientData) {
+ return new HLSSegmenter(env, segmentationDuration, fileNamePrefix,
+ onEndOfSegmentFunc, onEndOfSegmentClientData);
+}
+
+HLSSegmenter::HLSSegmenter(UsageEnvironment& env,
+ unsigned segmentationDuration, char const* fileNamePrefix,
+ onEndOfSegmentFunc* onEndOfSegmentFunc, void* onEndOfSegmentClientData)
+ : MediaSink(env),
+ fSegmentationDuration(segmentationDuration), fFileNamePrefix(fileNamePrefix),
+ fOnEndOfSegmentFunc(onEndOfSegmentFunc), fOnEndOfSegmentClientData(onEndOfSegmentClientData),
+ fHaveConfiguredUpstreamSource(False), fCurrentSegmentCounter(1), fOutFid(NULL) {
+ // Allocate enough space for the segment file name:
+ fOutputSegmentFileName = new char[strlen(fileNamePrefix) + 20/*more than enough*/];
+
+ // Allocate the output file buffer size:
+ fOutputFileBuffer = new unsigned char[OUTPUT_FILE_BUFFER_SIZE];
+}
+HLSSegmenter::~HLSSegmenter() {
+ delete[] fOutputFileBuffer;
+ delete[] fOutputSegmentFileName;
+}
+
+void HLSSegmenter::ourEndOfSegmentHandler(void* clientData, double segmentDuration) {
+ ((HLSSegmenter*)clientData)->ourEndOfSegmentHandler(segmentDuration);
+}
+
+void HLSSegmenter::ourEndOfSegmentHandler(double segmentDuration) {
+ // Note the end of the current segment:
+ if (fOnEndOfSegmentFunc != NULL) {
+ (*fOnEndOfSegmentFunc)(fOnEndOfSegmentClientData, fOutputSegmentFileName, segmentDuration);
+ }
+
+ // Begin the next segment:
+ ++fCurrentSegmentCounter;
+ openNextOutputSegment();
+}
+
+Boolean HLSSegmenter::openNextOutputSegment() {
+ CloseOutputFile(fOutFid);
+
+ sprintf(fOutputSegmentFileName, "%s%03u.ts", fFileNamePrefix, fCurrentSegmentCounter);
+ fOutFid = OpenOutputFile(envir(), fOutputSegmentFileName);
+
+ return fOutFid != NULL;
+}
+
+void HLSSegmenter::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval /*presentationTime*/,
+ unsigned /*durationInMicroseconds*/) {
+ ((HLSSegmenter*)clientData)->afterGettingFrame(frameSize, numTruncatedBytes);
+}
+
+void HLSSegmenter::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes) {
+ if (numTruncatedBytes > 0) { // Shouldn't happen
+ fprintf(stderr, "HLSSegmenter::afterGettingFrame(frameSize %d, numTruncatedBytes %d)\n", frameSize, numTruncatedBytes);
+ }
+
+ // Write the data to our output segment file:
+ fwrite(fOutputFileBuffer, 1, frameSize, fOutFid);
+
+ // Then try getting the next frame:
+ continuePlaying();
+}
+
+void HLSSegmenter::ourOnSourceClosure(void* clientData) {
+ ((HLSSegmenter*)clientData)->ourOnSourceClosure();
+}
+
+void HLSSegmenter::ourOnSourceClosure() {
+ // Note the end of the final segment (currently being written):
+ if (fOnEndOfSegmentFunc != NULL) {
+ // We know that the source is a "MPEG2TransportStreamMultiplexor":
+ MPEG2TransportStreamMultiplexor* multiplexorSource = (MPEG2TransportStreamMultiplexor*)fSource;
+ double segmentDuration = multiplexorSource->currentSegmentDuration();
+
+ (*fOnEndOfSegmentFunc)(fOnEndOfSegmentClientData, fOutputSegmentFileName, segmentDuration);
+ }
+
+ // Handle the closure for real:
+ onSourceClosure();
+}
+
+Boolean HLSSegmenter::sourceIsCompatibleWithUs(MediaSource& source) {
+ // Our source must be a Transport Stream Multiplexor:
+ return source.isMPEG2TransportStreamMultiplexor();
+}
+
+Boolean HLSSegmenter::continuePlaying() {
+ if (fSource == NULL) return False;
+ if (!fHaveConfiguredUpstreamSource) {
+ // We know that the source is a "MPEG2TransportStreamMultiplexor":
+ MPEG2TransportStreamMultiplexor* multiplexorSource = (MPEG2TransportStreamMultiplexor*)fSource;
+
+ // Tell our upstream multiplexor to call our 'end of segment handler' at the end of
+ // each timed segment:
+ multiplexorSource->setTimedSegmentation(fSegmentationDuration, ourEndOfSegmentHandler, this);
+
+ fHaveConfiguredUpstreamSource = True; // from now on
+ }
+ if (fOutFid == NULL && !openNextOutputSegment()) return False;
+
+ fSource->getNextFrame(fOutputFileBuffer, OUTPUT_FILE_BUFFER_SIZE,
+ afterGettingFrame, this,
+ ourOnSourceClosure, this);
+
+ return True;
+}
diff --git a/liveMedia/HMAC_SHA1.cpp b/liveMedia/HMAC_SHA1.cpp
new file mode 100644
index 0000000..a77dd47
--- /dev/null
+++ b/liveMedia/HMAC_SHA1.cpp
@@ -0,0 +1,64 @@
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// This code may not be copied or used in any form without permission from Live Networks, Inc.
+//
+// A function for computing the HMAC_SHA1 digest
+// Implementation
+
+#include "HMAC_SHA1.hh"
+
+#ifndef NO_OPENSSL
+#if defined(__APPLE__)
+#define COMMON_DIGEST_FOR_OPENSSL
+#include <CommonCrypto/CommonDigest.h>
+#endif
+#include <openssl/evp.h>
+
+////////// HMAC_SHA1 implementation //////////
+
+static void sha1(u_int8_t* resultDigest/*must be SHA1_DIGEST_LEN bytes in size*/,
+ u_int8_t const* data1, unsigned data1Length,
+ u_int8_t const* data2 = NULL, unsigned data2Length = 0) {
+ EVP_MD_CTX* ctx = EVP_MD_CTX_create();
+ EVP_DigestInit(ctx, EVP_sha1());
+ EVP_DigestUpdate(ctx, data1, data1Length);
+ if (data2 != NULL) {
+ EVP_DigestUpdate(ctx, data2, data2Length);
+ }
+ EVP_DigestFinal(ctx, resultDigest, NULL);
+}
+
+void HMAC_SHA1(u_int8_t const* key, unsigned keyLength, u_int8_t const* text, unsigned textLength,
+ u_int8_t* resultDigest/*must be SHA1_DIGEST_LEN bytes in size*/) {
+ if (key == NULL || keyLength == 0 || text == NULL || textLength == 0 || resultDigest == NULL) {
+ return; // sanity check
+ }
+
+ // If the key is longer than the block size, hash it to make it smaller:
+ u_int8_t tmpDigest[SHA1_DIGEST_LEN];
+ if (keyLength > HMAC_BLOCK_SIZE) {
+ sha1(tmpDigest, key, keyLength);
+ key = tmpDigest;
+ keyLength = SHA1_DIGEST_LEN;
+ }
+ // Assert: keyLength <= HMAC_BLOCK_SIZE
+
+ // Initialize the inner and outer pads with the key:
+ u_int8_t ipad[HMAC_BLOCK_SIZE];
+ u_int8_t opad[HMAC_BLOCK_SIZE];
+ unsigned i;
+ for (i = 0; i < keyLength; ++i) {
+ ipad[i] = key[i]^0x36;
+ opad[i] = key[i]^0x5c;
+ }
+ for (; i < HMAC_BLOCK_SIZE; ++i) {
+ ipad[i] = 0x36;
+ opad[i] = 0x5c;
+ }
+
+ // Perform the inner hash:
+ sha1(tmpDigest, ipad, HMAC_BLOCK_SIZE, text, textLength);
+
+ // Perform the outer hash:
+ sha1(resultDigest, opad, HMAC_BLOCK_SIZE, tmpDigest, SHA1_DIGEST_LEN);
+}
+#endif
diff --git a/liveMedia/InputFile.cpp b/liveMedia/InputFile.cpp
new file mode 100644
index 0000000..afed82b
--- /dev/null
+++ b/liveMedia/InputFile.cpp
@@ -0,0 +1,112 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Common routines for opening/closing named input files
+// Implementation
+
+#include "InputFile.hh"
+#include <string.h>
+
+FILE* OpenInputFile(UsageEnvironment& env, char const* fileName) {
+ FILE* fid;
+
+ // Check for a special case file name: "stdin"
+ if (strcmp(fileName, "stdin") == 0) {
+ fid = stdin;
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE)
+ _setmode(_fileno(stdin), _O_BINARY); // convert to binary mode
+#endif
+ } else {
+ fid = fopen(fileName, "rb");
+ if (fid == NULL) {
+ env.setResultMsg("unable to open file \"",fileName, "\"");
+ }
+ }
+
+ return fid;
+}
+
+void CloseInputFile(FILE* fid) {
+ // Don't close 'stdin', in case we want to use it again later.
+ if (fid != NULL && fid != stdin) fclose(fid);
+}
+
+u_int64_t GetFileSize(char const* fileName, FILE* fid) {
+ u_int64_t fileSize = 0; // by default
+
+ if (fid != stdin) {
+#if !defined(_WIN32_WCE)
+ if (fileName == NULL) {
+#endif
+ if (fid != NULL && SeekFile64(fid, 0, SEEK_END) >= 0) {
+ fileSize = (u_int64_t)TellFile64(fid);
+ if (fileSize == (u_int64_t)-1) fileSize = 0; // TellFile64() failed
+ SeekFile64(fid, 0, SEEK_SET);
+ }
+#if !defined(_WIN32_WCE)
+ } else {
+ struct stat sb;
+ if (stat(fileName, &sb) == 0) {
+ fileSize = sb.st_size;
+ }
+ }
+#endif
+ }
+
+ return fileSize;
+}
+
+int64_t SeekFile64(FILE *fid, int64_t offset, int whence) {
+ if (fid == NULL) return -1;
+
+ clearerr(fid);
+ fflush(fid);
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE)
+ return _lseeki64(_fileno(fid), offset, whence) == (int64_t)-1 ? -1 : 0;
+#else
+#if defined(_WIN32_WCE)
+ return fseek(fid, (long)(offset), whence);
+#else
+ return fseeko(fid, (off_t)(offset), whence);
+#endif
+#endif
+}
+
+int64_t TellFile64(FILE *fid) {
+ if (fid == NULL) return -1;
+
+ clearerr(fid);
+ fflush(fid);
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE)
+ return _telli64(_fileno(fid));
+#else
+#if defined(_WIN32_WCE)
+ return ftell(fid);
+#else
+ return ftello(fid);
+#endif
+#endif
+}
+
+Boolean FileIsSeekable(FILE *fid) {
+ if (SeekFile64(fid, 1, SEEK_CUR) < 0) {
+ return False;
+ }
+
+ SeekFile64(fid, -1, SEEK_CUR); // seek back to where we were
+ return True;
+}
diff --git a/liveMedia/JPEG2000VideoRTPSink.cpp b/liveMedia/JPEG2000VideoRTPSink.cpp
new file mode 100644
index 0000000..d84b678
--- /dev/null
+++ b/liveMedia/JPEG2000VideoRTPSink.cpp
@@ -0,0 +1,78 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+
+#include "JPEG2000VideoRTPSink.hh"
+
+JPEG2000VideoRTPSink::JPEG2000VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs)
+ : VideoRTPSink(env, RTPgs, 98, 90000, "jpeg2000") {}
+
+JPEG2000VideoRTPSink::~JPEG2000VideoRTPSink() {}
+
+JPEG2000VideoRTPSink*
+JPEG2000VideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) {
+ return new JPEG2000VideoRTPSink(env, RTPgs);
+}
+
+#define JPEG2000_PAYLOAD_HEADER_SIZE 8
+
+void JPEG2000VideoRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ // Fill in the Payload Header:
+ u_int8_t payloadHeader[JPEG2000_PAYLOAD_HEADER_SIZE];
+
+ // For "tp", assume for now that the payload is progressively scanned (i.e., tp = 0)
+ // For "MHF", assume that a whole main header is present (i.e., MHF = 3), *unless* we're
+ // the second or later packet of a fragment, in which case we assume that it's not (i.e. MHF = 0)
+ // For "mh_id", set this to 0 (as specified in RFC 5371).
+ // For "T" (Tile field invalidation flag), set this to 0 (we don't set the "tile number" field).
+ payloadHeader[0] = fragmentationOffset > 0 ? 0x00 : 0x30;
+
+ // Set the "priority" field to 255, as specified in RFC 5371:
+ payloadHeader[1] = 255;
+
+ // Set the "tile number" field to 0:
+ payloadHeader[2] = payloadHeader[3] = 0;
+
+ // Set the "reserved" field to 0, as specified in RFC 5371:
+ payloadHeader[4] = 0;
+
+ // Set the "fragmentation offset" field to the value of our "fragmentationOffset" parameter:
+ payloadHeader[5] = (u_int8_t)(fragmentationOffset>>16);
+ payloadHeader[6] = (u_int8_t)(fragmentationOffset>>8);
+ payloadHeader[7] = (u_int8_t)(fragmentationOffset);
+
+ // Write the payload header to the outgoing packet:
+ setSpecialHeaderBytes(payloadHeader, sizeof payloadHeader);
+
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the RTP 'M' ('marker') bit
+ setMarkerBit();
+ }
+
+ // Also set the RTP timestamp:
+ setTimestamp(framePresentationTime);
+}
+
+unsigned JPEG2000VideoRTPSink::specialHeaderSize() const {
+ return JPEG2000_PAYLOAD_HEADER_SIZE;
+}
diff --git a/liveMedia/JPEG2000VideoRTPSource.cpp b/liveMedia/JPEG2000VideoRTPSource.cpp
new file mode 100644
index 0000000..cdfd0f1
--- /dev/null
+++ b/liveMedia/JPEG2000VideoRTPSource.cpp
@@ -0,0 +1,64 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+
+
+#include "JPEG2000VideoRTPSource.hh"
+
+JPEG2000VideoRTPSource*
+JPEG2000VideoRTPSource::createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* sampling) {
+ return new JPEG2000VideoRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, sampling);
+}
+
+JPEG2000VideoRTPSource
+::JPEG2000VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency,
+ char const* sampling)
+ : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency) {
+ fSampling = strDup(sampling);
+}
+
+JPEG2000VideoRTPSource::~JPEG2000VideoRTPSource() {
+ delete[] fSampling;
+}
+
+#define JPEG2000_PAYLOAD_HEADER_SIZE 8
+
+Boolean JPEG2000VideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ // There should be enough space for a payload header:
+ if (packetSize < JPEG2000_PAYLOAD_HEADER_SIZE) return False;
+
+ u_int32_t fragmentOffset = (headerStart[5]<<16)|(headerStart[6]<<8)|(headerStart[7]);
+ fCurrentPacketBeginsFrame = fragmentOffset == 0;
+ fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
+
+ resultSpecialHeaderSize = JPEG2000_PAYLOAD_HEADER_SIZE;
+ return True;
+}
+
+char const* JPEG2000VideoRTPSource::MIMEtype() const {
+ return "video/JPEG2000";
+}
diff --git a/liveMedia/JPEGVideoRTPSink.cpp b/liveMedia/JPEGVideoRTPSink.cpp
new file mode 100644
index 0000000..ee5cc15
--- /dev/null
+++ b/liveMedia/JPEGVideoRTPSink.cpp
@@ -0,0 +1,145 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for JPEG video (RFC 2435)
+// Implementation
+
+#include "JPEGVideoRTPSink.hh"
+#include "JPEGVideoSource.hh"
+
+JPEGVideoRTPSink
+::JPEGVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs)
+ : VideoRTPSink(env, RTPgs, 26, 90000, "JPEG") {
+}
+
+JPEGVideoRTPSink::~JPEGVideoRTPSink() {
+}
+
+JPEGVideoRTPSink*
+JPEGVideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) {
+ return new JPEGVideoRTPSink(env, RTPgs);
+}
+
+Boolean JPEGVideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
+ return source.isJPEGVideoSource();
+}
+
+Boolean JPEGVideoRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // A packet can contain only one frame
+ return False;
+}
+
+void JPEGVideoRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* /*frameStart*/,
+ unsigned /*numBytesInFrame*/,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ // Our source is known to be a JPEGVideoSource
+ JPEGVideoSource* source = (JPEGVideoSource*)fSource;
+ if (source == NULL) return; // sanity check
+
+ u_int8_t mainJPEGHeader[8]; // the special header
+ u_int8_t const type = source->type();
+
+ mainJPEGHeader[0] = 0; // Type-specific
+ mainJPEGHeader[1] = fragmentationOffset >> 16;
+ mainJPEGHeader[2] = fragmentationOffset >> 8;
+ mainJPEGHeader[3] = fragmentationOffset;
+ mainJPEGHeader[4] = type;
+ mainJPEGHeader[5] = source->qFactor();
+ mainJPEGHeader[6] = source->width();
+ mainJPEGHeader[7] = source->height();
+ setSpecialHeaderBytes(mainJPEGHeader, sizeof mainJPEGHeader);
+
+ unsigned restartMarkerHeaderSize = 0; // by default
+ if (type >= 64 && type <= 127) {
+ // There is also a Restart Marker Header:
+ restartMarkerHeaderSize = 4;
+ u_int16_t const restartInterval = source->restartInterval(); // should be non-zero
+
+ u_int8_t restartMarkerHeader[4];
+ restartMarkerHeader[0] = restartInterval>>8;
+ restartMarkerHeader[1] = restartInterval&0xFF;
+ restartMarkerHeader[2] = restartMarkerHeader[3] = 0xFF; // F=L=1; Restart Count = 0x3FFF
+
+ setSpecialHeaderBytes(restartMarkerHeader, restartMarkerHeaderSize,
+ sizeof mainJPEGHeader/* start position */);
+ }
+
+ if (fragmentationOffset == 0 && source->qFactor() >= 128) {
+ // There is also a Quantization Header:
+ u_int8_t precision;
+ u_int16_t length;
+ u_int8_t const* quantizationTables
+ = source->quantizationTables(precision, length);
+
+ unsigned const quantizationHeaderSize = 4 + length;
+ u_int8_t* quantizationHeader = new u_int8_t[quantizationHeaderSize];
+
+ quantizationHeader[0] = 0; // MBZ
+ quantizationHeader[1] = precision;
+ quantizationHeader[2] = length >> 8;
+ quantizationHeader[3] = length&0xFF;
+ if (quantizationTables != NULL) { // sanity check
+ for (u_int16_t i = 0; i < length; ++i) {
+ quantizationHeader[4+i] = quantizationTables[i];
+ }
+ }
+
+ setSpecialHeaderBytes(quantizationHeader, quantizationHeaderSize,
+ sizeof mainJPEGHeader + restartMarkerHeaderSize/* start position */);
+ delete[] quantizationHeader;
+ }
+
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the RTP 'M' ('marker') bit:
+ setMarkerBit();
+ }
+
+ // Also set the RTP timestamp:
+ setTimestamp(framePresentationTime);
+}
+
+
+unsigned JPEGVideoRTPSink::specialHeaderSize() const {
+ // Our source is known to be a JPEGVideoSource
+ JPEGVideoSource* source = (JPEGVideoSource*)fSource;
+ if (source == NULL) return 0; // sanity check
+
+ unsigned headerSize = 8; // by default
+
+ u_int8_t const type = source->type();
+ if (type >= 64 && type <= 127) {
+ // There is also a Restart Marker Header:
+ headerSize += 4;
+ }
+
+ if (curFragmentationOffset() == 0 && source->qFactor() >= 128) {
+ // There is also a Quantization Header:
+ u_int8_t dummy;
+ u_int16_t quantizationTablesSize;
+ (void)(source->quantizationTables(dummy, quantizationTablesSize));
+
+ headerSize += 4 + quantizationTablesSize;
+ }
+
+ return headerSize;
+}
diff --git a/liveMedia/JPEGVideoRTPSource.cpp b/liveMedia/JPEGVideoRTPSource.cpp
new file mode 100644
index 0000000..f71987e
--- /dev/null
+++ b/liveMedia/JPEGVideoRTPSource.cpp
@@ -0,0 +1,465 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// JPEG Video (RFC 2435) RTP Sources
+// Implementation
+
+#include "JPEGVideoRTPSource.hh"
+
+////////// JPEGBufferedPacket and JPEGBufferedPacketFactory //////////
+
+class JPEGBufferedPacket: public BufferedPacket {
+public:
+ Boolean completesFrame;
+
+private:
+ // Redefined virtual functions:
+ virtual void reset();
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+};
+
+class JPEGBufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+
+////////// JPEGVideoRTPSource implementation //////////
+
+#define BYTE unsigned char
+#define WORD unsigned
+#define DWORD unsigned long
+
+JPEGVideoRTPSource*
+JPEGVideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ unsigned defaultWidth, unsigned defaultHeight) {
+ return new JPEGVideoRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency, defaultWidth, defaultHeight);
+}
+
+JPEGVideoRTPSource::JPEGVideoRTPSource(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ unsigned defaultWidth, unsigned defaultHeight)
+ : MultiFramedRTPSource(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency,
+ new JPEGBufferedPacketFactory),
+ fDefaultWidth(defaultWidth), fDefaultHeight(defaultHeight) {
+}
+
+JPEGVideoRTPSource::~JPEGVideoRTPSource() {
+}
+
+enum {
+ MARKER_SOF0 = 0xc0, // start-of-frame, baseline scan
+ MARKER_SOI = 0xd8, // start of image
+ MARKER_EOI = 0xd9, // end of image
+ MARKER_SOS = 0xda, // start of scan
+ MARKER_DRI = 0xdd, // restart interval
+ MARKER_DQT = 0xdb, // define quantization tables
+ MARKER_DHT = 0xc4, // huffman tables
+ MARKER_APP_FIRST = 0xe0,
+ MARKER_APP_LAST = 0xef,
+ MARKER_COMMENT = 0xfe,
+};
+
+static unsigned char const lum_dc_codelens[] = {
+ 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static unsigned char const lum_dc_symbols[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static unsigned char const lum_ac_codelens[] = {
+ 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d,
+};
+
+static unsigned char const lum_ac_symbols[] = {
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa,
+};
+
+static unsigned char const chm_dc_codelens[] = {
+ 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+};
+
+static unsigned char const chm_dc_symbols[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static unsigned char const chm_ac_codelens[] = {
+ 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77,
+};
+
+static unsigned char const chm_ac_symbols[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
+ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
+ 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
+ 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
+ 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
+ 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
+ 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
+ 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
+ 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
+ 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
+ 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
+ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
+ 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa,
+};
+
+static void createHuffmanHeader(unsigned char*& p,
+ unsigned char const* codelens,
+ int ncodes,
+ unsigned char const* symbols,
+ int nsymbols,
+ int tableNo, int tableClass) {
+ *p++ = 0xff; *p++ = MARKER_DHT;
+ *p++ = 0; /* length msb */
+ *p++ = 3 + ncodes + nsymbols; /* length lsb */
+ *p++ = (tableClass << 4) | tableNo;
+ memcpy(p, codelens, ncodes);
+ p += ncodes;
+ memcpy(p, symbols, nsymbols);
+ p += nsymbols;
+}
+
+static unsigned computeJPEGHeaderSize(unsigned qtlen, unsigned dri) {
+ unsigned qtlen_half = qtlen/2; // in case qtlen is odd; shouldn't happen
+ qtlen = qtlen_half*2;
+
+ unsigned numQtables = qtlen > 64 ? 2 : 1;
+ return 485 + numQtables*5 + qtlen + (dri > 0 ? 6 : 0);
+}
+
+static void createJPEGHeader(unsigned char* buf, unsigned type,
+ unsigned w, unsigned h,
+ unsigned char const* qtables, unsigned qtlen,
+ unsigned dri) {
+ unsigned char *ptr = buf;
+ unsigned numQtables = qtlen > 64 ? 2 : 1;
+
+ // MARKER_SOI:
+ *ptr++ = 0xFF; *ptr++ = MARKER_SOI;
+
+ // MARKER_APP_FIRST:
+ *ptr++ = 0xFF; *ptr++ = MARKER_APP_FIRST;
+ *ptr++ = 0x00; *ptr++ = 0x10; // size of chunk
+ *ptr++ = 'J'; *ptr++ = 'F'; *ptr++ = 'I'; *ptr++ = 'F'; *ptr++ = 0x00;
+ *ptr++ = 0x01; *ptr++ = 0x01; // JFIF format version (1.1)
+ *ptr++ = 0x00; // no units
+ *ptr++ = 0x00; *ptr++ = 0x01; // Horizontal pixel aspect ratio
+ *ptr++ = 0x00; *ptr++ = 0x01; // Vertical pixel aspect ratio
+ *ptr++ = 0x00; *ptr++ = 0x00; // no thumbnail
+
+ // MARKER_DRI:
+ if (dri > 0) {
+ *ptr++ = 0xFF; *ptr++ = MARKER_DRI;
+ *ptr++ = 0x00; *ptr++ = 0x04; // size of chunk
+ *ptr++ = (BYTE)(dri >> 8); *ptr++ = (BYTE)(dri); // restart interval
+ }
+
+ // MARKER_DQT (luma):
+ unsigned tableSize = numQtables == 1 ? qtlen : qtlen/2;
+ *ptr++ = 0xFF; *ptr++ = MARKER_DQT;
+ *ptr++ = 0x00; *ptr++ = tableSize + 3; // size of chunk
+ *ptr++ = 0x00; // precision(0), table id(0)
+ memcpy(ptr, qtables, tableSize);
+ qtables += tableSize;
+ ptr += tableSize;
+
+ if (numQtables > 1) {
+ unsigned tableSize = qtlen - qtlen/2;
+ // MARKER_DQT (chroma):
+ *ptr++ = 0xFF; *ptr++ = MARKER_DQT;
+ *ptr++ = 0x00; *ptr++ = tableSize + 3; // size of chunk
+ *ptr++ = 0x01; // precision(0), table id(1)
+ memcpy(ptr, qtables, tableSize);
+ qtables += tableSize;
+ ptr += tableSize;
+ }
+
+ // MARKER_SOF0:
+ *ptr++ = 0xFF; *ptr++ = MARKER_SOF0;
+ *ptr++ = 0x00; *ptr++ = 0x11; // size of chunk
+ *ptr++ = 0x08; // sample precision
+ *ptr++ = (BYTE)(h >> 8);
+ *ptr++ = (BYTE)(h); // number of lines (must be a multiple of 8)
+ *ptr++ = (BYTE)(w >> 8);
+ *ptr++ = (BYTE)(w); // number of columns (must be a multiple of 8)
+ *ptr++ = 0x03; // number of components
+ *ptr++ = 0x01; // id of component
+ *ptr++ = type ? 0x22 : 0x21; // sampling ratio (h,v)
+ *ptr++ = 0x00; // quant table id
+ *ptr++ = 0x02; // id of component
+ *ptr++ = 0x11; // sampling ratio (h,v)
+ *ptr++ = numQtables == 1 ? 0x00 : 0x01; // quant table id
+ *ptr++ = 0x03; // id of component
+ *ptr++ = 0x11; // sampling ratio (h,v)
+ *ptr++ = numQtables == 1 ? 0x00 : 0x01; // quant table id
+
+ createHuffmanHeader(ptr, lum_dc_codelens, sizeof lum_dc_codelens,
+ lum_dc_symbols, sizeof lum_dc_symbols, 0, 0);
+ createHuffmanHeader(ptr, lum_ac_codelens, sizeof lum_ac_codelens,
+ lum_ac_symbols, sizeof lum_ac_symbols, 0, 1);
+ createHuffmanHeader(ptr, chm_dc_codelens, sizeof chm_dc_codelens,
+ chm_dc_symbols, sizeof chm_dc_symbols, 1, 0);
+ createHuffmanHeader(ptr, chm_ac_codelens, sizeof chm_ac_codelens,
+ chm_ac_symbols, sizeof chm_ac_symbols, 1, 1);
+
+ // MARKER_SOS:
+ *ptr++ = 0xFF; *ptr++ = MARKER_SOS;
+ *ptr++ = 0x00; *ptr++ = 0x0C; // size of chunk
+ *ptr++ = 0x03; // number of components
+ *ptr++ = 0x01; // id of component
+ *ptr++ = 0x00; // huffman table id (DC, AC)
+ *ptr++ = 0x02; // id of component
+ *ptr++ = 0x11; // huffman table id (DC, AC)
+ *ptr++ = 0x03; // id of component
+ *ptr++ = 0x11; // huffman table id (DC, AC)
+ *ptr++ = 0x00; // start of spectral
+ *ptr++ = 0x3F; // end of spectral
+ *ptr++ = 0x00; // successive approximation bit position (high, low)
+}
+
+// The default 'luma' and 'chroma' quantizer tables, in zigzag order:
+static unsigned char const defaultQuantizers[128] = {
+ // luma table:
+ 16, 11, 12, 14, 12, 10, 16, 14,
+ 13, 14, 18, 17, 16, 19, 24, 40,
+ 26, 24, 22, 22, 24, 49, 35, 37,
+ 29, 40, 58, 51, 61, 60, 57, 51,
+ 56, 55, 64, 72, 92, 78, 64, 68,
+ 87, 69, 55, 56, 80, 109, 81, 87,
+ 95, 98, 103, 104, 103, 62, 77, 113,
+ 121, 112, 100, 120, 92, 101, 103, 99,
+ // chroma table:
+ 17, 18, 18, 24, 21, 24, 47, 26,
+ 26, 47, 99, 66, 56, 66, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99
+};
+
+static void makeDefaultQtables(unsigned char* resultTables, unsigned Q) {
+ int factor = Q;
+ int q;
+
+ if (Q < 1) factor = 1;
+ else if (Q > 99) factor = 99;
+
+ if (Q < 50) {
+ q = 5000 / factor;
+ } else {
+ q = 200 - factor*2;
+ }
+
+ for (int i = 0; i < 128; ++i) {
+ int newVal = (defaultQuantizers[i]*q + 50)/100;
+ if (newVal < 1) newVal = 1;
+ else if (newVal > 255) newVal = 255;
+ resultTables[i] = newVal;
+ }
+}
+
+Boolean JPEGVideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ unsigned char* qtables = NULL;
+ unsigned qtlen = 0;
+ unsigned dri = 0;
+
+ // There's at least 8-byte video-specific header
+ /*
+0 1 2 3
+0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| Type-specific | Fragment Offset |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| Type | Q | Width | Height |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ if (packetSize < 8) return False;
+
+ resultSpecialHeaderSize = 8;
+
+ unsigned Offset = (unsigned)((DWORD)headerStart[1] << 16 | (DWORD)headerStart[2] << 8 | (DWORD)headerStart[3]);
+ unsigned Type = (unsigned)headerStart[4];
+ unsigned type = Type & 1;
+ unsigned Q = (unsigned)headerStart[5];
+ unsigned width = (unsigned)headerStart[6] * 8;
+ unsigned height = (unsigned)headerStart[7] * 8;
+ if ((width == 0 || height == 0) && fDefaultWidth != 0 && fDefaultHeight != 0) {
+ // Use the default width and height parameters instead:
+ width = fDefaultWidth;
+ height = fDefaultHeight;
+ }
+ if (width == 0) width = 256*8; // special case
+ if (height == 0) height = 256*8; // special case
+
+ if (Type > 63) {
+ // Restart Marker header present
+ /*
+0 1 2 3
+0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| Restart Interval |F|L| Restart Count |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ if (packetSize < resultSpecialHeaderSize + 4) return False;
+
+ unsigned RestartInterval = (unsigned)((WORD)headerStart[resultSpecialHeaderSize] << 8 | (WORD)headerStart[resultSpecialHeaderSize + 1]);
+ dri = RestartInterval;
+ resultSpecialHeaderSize += 4;
+ }
+
+ if (Offset == 0) {
+ if (Q > 127) {
+ // Quantization Table header present
+/*
+0 1 2 3
+0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| MBZ | Precision | Length |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+| Quantization Table Data |
+| ... |
++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+ if (packetSize < resultSpecialHeaderSize + 4) return False;
+
+ unsigned MBZ = (unsigned)headerStart[resultSpecialHeaderSize];
+ if (MBZ == 0) {
+ // unsigned Precision = (unsigned)headerStart[resultSpecialHeaderSize + 1];
+ unsigned Length = (unsigned)((WORD)headerStart[resultSpecialHeaderSize + 2] << 8 | (WORD)headerStart[resultSpecialHeaderSize + 3]);
+
+ //ASSERT(Length == 128);
+
+ resultSpecialHeaderSize += 4;
+
+ if (packetSize < resultSpecialHeaderSize + Length) return False;
+
+ qtlen = Length;
+ qtables = &headerStart[resultSpecialHeaderSize];
+
+ resultSpecialHeaderSize += Length;
+ }
+ }
+ }
+
+ // If this is the first (or only) fragment of a JPEG frame, then we need
+ // to synthesize a JPEG header, and prepend it to the incoming data.
+ // Hack: We can do this because we allowed space for it in
+ // our special "JPEGBufferedPacket" subclass. We also adjust
+ // "resultSpecialHeaderSize" to compensate for this, by subtracting
+ // the size of the synthesized header. Note that this will cause
+ // "resultSpecialHeaderSize" to become negative, but the code that called
+ // us (in "MultiFramedRTPSource") will handle this properly.
+ if (Offset == 0) {
+ unsigned char newQtables[128];
+ if (qtlen == 0) {
+ // A quantization table was not present in the RTP JPEG header,
+ // so use the default tables, scaled according to the "Q" factor:
+ makeDefaultQtables(newQtables, Q);
+ qtables = newQtables;
+ qtlen = sizeof newQtables;
+ }
+
+ unsigned hdrlen = computeJPEGHeaderSize(qtlen, dri);
+ resultSpecialHeaderSize -= hdrlen; // goes negative
+ headerStart += (int)resultSpecialHeaderSize; // goes backward
+ createJPEGHeader(headerStart, type, width, height, qtables, qtlen, dri);
+ }
+
+ fCurrentPacketBeginsFrame = (Offset == 0);
+
+ // The RTP "M" (marker) bit indicates the last fragment of a frame:
+ ((JPEGBufferedPacket*)packet)->completesFrame
+ = fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
+
+ return True;
+}
+
+char const* JPEGVideoRTPSource::MIMEtype() const {
+ return "video/JPEG";
+}
+
+////////// JPEGBufferedPacket and JPEGBufferedPacketFactory implementation
+
+void JPEGBufferedPacket::reset() {
+ BufferedPacket::reset();
+
+ // Move our "fHead" and "fTail" forward, to allow space for a synthesized
+ // JPEG header to precede the RTP data that comes in over the network.
+ unsigned offset = MAX_JPEG_HEADER_SIZE;
+ if (offset > fPacketSize) offset = fPacketSize; // shouldn't happen
+ fHead = fTail = offset;
+}
+
+unsigned JPEGBufferedPacket
+::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
+ // Normally, the enclosed frame size is just "dataSize". If, however,
+ // the frame does not end with the "EOI" marker, then add this now:
+ if (completesFrame && dataSize >= 2 &&
+ !(framePtr[dataSize-2] == 0xFF && framePtr[dataSize-1] == MARKER_EOI)) {
+ framePtr[dataSize++] = 0xFF;
+ framePtr[dataSize++] = MARKER_EOI;
+ }
+ return dataSize;
+}
+
+BufferedPacket* JPEGBufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* /*ourSource*/) {
+ return new JPEGBufferedPacket;
+}
diff --git a/liveMedia/JPEGVideoSource.cpp b/liveMedia/JPEGVideoSource.cpp
new file mode 100644
index 0000000..f3344ab
--- /dev/null
+++ b/liveMedia/JPEGVideoSource.cpp
@@ -0,0 +1,45 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// JPEG video sources
+// Implementation
+
+#include "JPEGVideoSource.hh"
+
+JPEGVideoSource::JPEGVideoSource(UsageEnvironment& env)
+ : FramedSource(env) {
+}
+
+JPEGVideoSource::~JPEGVideoSource() {
+}
+
+u_int8_t const* JPEGVideoSource::quantizationTables(u_int8_t& precision,
+ u_int16_t& length) {
+ // Default implementation
+ precision = 0;
+ length = 0;
+ return NULL;
+}
+
+u_int16_t JPEGVideoSource::restartInterval() {
+ // Default implementation
+ return 0;
+}
+
+Boolean JPEGVideoSource::isJPEGVideoSource() const {
+ return True;
+}
diff --git a/liveMedia/Locale.cpp b/liveMedia/Locale.cpp
new file mode 100644
index 0000000..a6bd6aa
--- /dev/null
+++ b/liveMedia/Locale.cpp
@@ -0,0 +1,60 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Support for temporarily setting the locale (e.g., to "C" or "POSIX") for (e.g.) parsing or printing
+// floating-point numbers in protocol headers, or calling toupper()/tolower() on human-input strings.
+// Implementation
+
+#include "Locale.hh"
+#include <strDup.hh>
+
+Locale::Locale(char const* newLocale, LocaleCategory category) {
+#ifndef LOCALE_NOT_USED
+#ifndef NEWLOCALE_NOT_USED
+ int categoryMask;
+ switch (category) {
+ case All: { categoryMask = LC_ALL_MASK; break; }
+ case Numeric: { categoryMask = LC_NUMERIC_MASK; break; }
+ }
+ fLocale = newlocale(categoryMask, newLocale, NULL);
+ fPrevLocale = uselocale(fLocale);
+#else
+ switch (category) {
+ case All: { fCategoryNum = LC_ALL; break; }
+ case Numeric: { fCategoryNum = LC_NUMERIC; break; }
+ }
+ fPrevLocale = strDup(setlocale(fCategoryNum, NULL));
+ setlocale(fCategoryNum, newLocale);
+#endif
+#endif
+}
+
+Locale::~Locale() {
+#ifndef LOCALE_NOT_USED
+#ifndef NEWLOCALE_NOT_USED
+ if (fLocale != (locale_t)0) {
+ uselocale(fPrevLocale);
+ freelocale(fLocale);
+ }
+#else
+ if (fPrevLocale != NULL) {
+ setlocale(fCategoryNum, fPrevLocale);
+ delete[] fPrevLocale;
+ }
+#endif
+#endif
+}
diff --git a/liveMedia/MIKEY.cpp b/liveMedia/MIKEY.cpp
new file mode 100644
index 0000000..d2d6343
--- /dev/null
+++ b/liveMedia/MIKEY.cpp
@@ -0,0 +1,598 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A data structure that implements a MIKEY message (RFC 3830)
+// Implementation
+
+#include "MIKEY.hh"
+#include <GroupsockHelper.hh> // for our_random32()
+
+////////// MIKEYPayload definition /////////
+
+class MIKEYPayload {
+public:
+ MIKEYPayload(MIKEYState& ourMIKEYState, u_int8_t payloadType);
+ // create with default values
+ MIKEYPayload(MIKEYState& ourMIKEYState, u_int8_t payloadType,
+ u_int8_t const* data, unsigned dataSize);
+ // create as a copy of existing values
+
+ virtual ~MIKEYPayload();
+
+ u_int8_t const* data() const { return fData; }
+ unsigned dataSize() const { return fDataSize; }
+
+ MIKEYPayload* next() const { return fNext; }
+ void setNextPayload(MIKEYPayload* nextPayload);
+
+private:
+ MIKEYState& fOurMIKEYState;
+ u_int8_t fPayloadType;
+ u_int8_t* fData;
+ unsigned fDataSize;
+ MIKEYPayload* fNext;
+};
+
+
+////////// MIKEYState implementation //////////
+
+enum MIKEYPayloadType {
+ KEMAC = 1,
+ PKE = 2,
+ DH = 3,
+ SIGN = 4,
+ T = 5,
+ ID = 6,
+ CERT = 7,
+ CHASH = 8,
+ V = 9,
+ SP = 10,
+ RAND = 11,
+ ERR = 12,
+ KEY_DATA = 13,
+ HDR = 255
+};
+
+MIKEYState::MIKEYState()
+ : // Set default encryption/authentication parameters:
+ fEncryptSRTP(True),
+ fEncryptSRTCP(True),
+ fMKI(our_random32()),
+ fUseAuthentication(True),
+
+ fHeaderPayload(NULL), fTailPayload(NULL), fTotalPayloadByteCount(0) {
+ // Fill in our 'key data' (30 bytes) with (pseudo-)random bits:
+ u_int8_t* p = &fKeyData[0];
+ u_int32_t random32;
+ random32 = our_random32();
+ *p++ = (random32>>24); *p++ = (random32>>16); *p++ = (random32>>8); *p++ = random32; // 0-3
+ random32 = our_random32();
+ *p++ = (random32>>24); *p++ = (random32>>16); *p++ = (random32>>8); *p++ = random32; // 4-7
+ random32 = our_random32();
+ *p++ = (random32>>24); *p++ = (random32>>16); *p++ = (random32>>8); *p++ = random32; // 8-11
+ random32 = our_random32();
+ *p++ = (random32>>24); *p++ = (random32>>16); *p++ = (random32>>8); *p++ = random32; // 12-15
+ random32 = our_random32();
+ *p++ = (random32>>24); *p++ = (random32>>16); *p++ = (random32>>8); *p++ = random32; // 16-19
+ random32 = our_random32();
+ *p++ = (random32>>24); *p++ = (random32>>16); *p++ = (random32>>8); *p++ = random32; // 20-23
+ random32 = our_random32();
+ *p++ = (random32>>24); *p++ = (random32>>16); *p++ = (random32>>8); *p++ = random32; // 24-27
+ random32 = our_random32();
+ *p++ = (random32>>24); *p++ = (random32>>16); // 28-29
+
+ addNewPayload(new MIKEYPayload(*this, HDR));
+ addNewPayload(new MIKEYPayload(*this, T));
+ addNewPayload(new MIKEYPayload(*this, RAND));
+ addNewPayload(new MIKEYPayload(*this, SP));
+ addNewPayload(new MIKEYPayload(*this, KEMAC));
+}
+
+MIKEYState::~MIKEYState() {
+ delete fHeaderPayload; // which will delete all the other payloads as well
+}
+
+MIKEYState* MIKEYState::createNew(u_int8_t* messageToParse, unsigned messageSize) {
+ Boolean parsedOK;
+ MIKEYState* newMIKEYState = new MIKEYState(messageToParse, messageSize, parsedOK);
+
+ if (!parsedOK) {
+ delete newMIKEYState;
+ newMIKEYState = NULL;
+ }
+
+ delete[] messageToParse;
+ return newMIKEYState;
+}
+
+u_int8_t* MIKEYState::generateMessage(unsigned& messageSize) const {
+ if (fTotalPayloadByteCount == 0) return NULL;
+
+ // ASSERT: fTotalPayloadByteCount == the sum of all of the payloads' "fDataSize"s
+ messageSize = fTotalPayloadByteCount;
+ u_int8_t* resultMessage = new u_int8_t[messageSize];
+ u_int8_t* p = resultMessage;
+
+ for (MIKEYPayload* payload = fHeaderPayload; payload != NULL; payload = payload->next()) {
+ if (payload->data() == NULL) continue;
+
+ memcpy(p, payload->data(), payload->dataSize());
+ p += payload->dataSize();
+ }
+
+ return resultMessage;
+}
+
+MIKEYState::MIKEYState(u_int8_t const* messageToParse, unsigned messageSize, Boolean& parsedOK)
+ : // Set encryption/authentication parameters to default values (that may be overwritten
+ // later as we parse the message):
+ fEncryptSRTP(False),
+ fEncryptSRTCP(False),
+ fUseAuthentication(False),
+
+ fHeaderPayload(NULL), fTailPayload(NULL), fTotalPayloadByteCount(0) {
+ parsedOK = False; // unless we learn otherwise
+
+ // Begin by parsing a HDR payload:
+ u_int8_t const* ptr = messageToParse;
+ u_int8_t const* const endPtr = messageToParse + messageSize;
+ u_int8_t nextPayloadType;
+
+ if (!parseHDRPayload(ptr, endPtr, nextPayloadType)) return;
+
+ // Then parse each subsequent payload that we see:
+ while (nextPayloadType != 0) {
+ if (!parseNonHDRPayload(ptr, endPtr, nextPayloadType)) return;
+ }
+
+ // We succeeded in parsing all the data:
+ parsedOK = True;
+}
+
+void MIKEYState::addNewPayload(MIKEYPayload* newPayload) {
+ if (fTailPayload == NULL) {
+ fHeaderPayload = newPayload;
+ } else {
+ fTailPayload->setNextPayload(newPayload);
+ }
+ fTailPayload = newPayload;
+
+ fTotalPayloadByteCount += newPayload->dataSize();
+}
+
+#define testSize(n) if (ptr + (n) > endPtr) break
+
+Boolean MIKEYState
+::parseHDRPayload(u_int8_t const*& ptr, u_int8_t const* endPtr, u_int8_t& nextPayloadType) {
+ do {
+ testSize(10);
+ nextPayloadType = ptr[2];
+ u_int8_t numCryptoSessions = ptr[8];
+
+ unsigned payloadSize = 10 + numCryptoSessions*(1+4+4);
+ testSize(payloadSize);
+
+ addNewPayload(new MIKEYPayload(*this, HDR, ptr, payloadSize));
+ ptr += payloadSize;
+
+ return True;
+ } while (0);
+
+ // An error occurred:
+ return False;
+}
+
+static u_int32_t get4Bytes(u_int8_t const*& ptr) {
+ u_int32_t result = (ptr[0]<<24)|(ptr[1]<<16)|(ptr[2]<<8)|ptr[3];
+ ptr += 4;
+ return result;
+}
+
+static u_int16_t get2Bytes(u_int8_t const*& ptr) {
+ u_int16_t result = (ptr[0]<<8)|ptr[1];
+ ptr += 2;
+ return result;
+}
+
+Boolean MIKEYState
+::parseNonHDRPayload(u_int8_t const*& ptr, u_int8_t const* endPtr, u_int8_t& nextPayloadType) {
+ do {
+ Boolean parseSucceeded = False; // initially
+ u_int8_t const* payloadStart = ptr;
+ unsigned payloadSize = 0;
+
+ testSize(1);
+ u_int8_t ourPayloadType = nextPayloadType;
+ nextPayloadType = *ptr++;
+
+ // The parsing depends on "ourPayloadType":
+ switch (ourPayloadType) {
+ case T: { // RFC 3830, section 6.6
+ testSize(1);
+ u_int8_t TS_type = *ptr++;
+ unsigned TS_value_len = 0;
+ switch (TS_type) {
+ case 0: // NTP-UTC
+ case 1: { // NTP
+ TS_value_len = 8;
+ break;
+ }
+ case 2: { // COUNTER
+ TS_value_len = 4;
+ break;
+ }
+ }
+ if (TS_value_len == 0) break; // unknown TS_type
+
+ testSize(TS_value_len);
+ payloadSize = 2 + TS_value_len;
+ parseSucceeded = True;
+ break;
+ }
+ case RAND: { // RFC 3830, section 6.11
+ testSize(1);
+ u_int8_t RAND_len = *ptr++;
+
+ testSize(RAND_len);
+ payloadSize = 2 + RAND_len;
+ parseSucceeded = True;
+ break;
+ }
+ case SP: { // RFC 3830, section 6.10
+ testSize(4);
+ ++ptr; // skip over "Policy no"
+ u_int8_t protType = *ptr++;
+ if (protType != 0/*SRTP*/) break; // unsupported protocol type
+
+ u_int16_t policyParam_len = get2Bytes(ptr);
+
+ testSize(policyParam_len);
+ payloadSize = 5 + policyParam_len;
+ u_int8_t const* payloadEndPtr = payloadStart + payloadSize;
+ // Look through the "Policy param" section, making sure that:
+ // - each of the "length" fields make sense
+ // - for "type"s that we understand, the values make sense
+ Boolean parsedPolicyParamSection = False;
+ while (1) {
+ testSize(2);
+ u_int8_t ppType = *ptr++;
+ u_int8_t ppLength = *ptr++;
+
+ testSize(ppLength);
+ if (ptr+ppLength > payloadEndPtr) break; // bad "length"s - too big for the payload
+
+ // Check types that we understand:
+ Boolean policyIsOK = False; // until we learn otherwise
+ switch (ppType) {
+ case 0: { // Encryption algorithm: we handle only NULL and AES-CM
+ if (ppLength != 1) break;
+ u_int8_t value = ptr[0];
+ if (value > 1) break; // unsupported algorithm
+ if (value > 0) fEncryptSRTP = fEncryptSRTCP = True;
+ // Note: these might get changed by a subsequent "off/on" entry
+ policyIsOK = True;
+ break;
+ }
+ case 1: { // Session Encr. key length
+ if (ppLength != 1) break;
+ u_int8_t value = ptr[0];
+ if (value != 16) break; // bad/unsupported value
+ policyIsOK = True;
+ break;
+ }
+ case 2: { // Authentication algorithm: we handle only NULL and HMAC-SHA-1
+ if (ppLength != 1) break;
+ u_int8_t value = ptr[0];
+ if (value > 1) break; // unsupported algorithm
+ if (value > 0) fUseAuthentication = True;
+ // Note: this might get changed by a subsequent "off/on" entry
+ policyIsOK = True;
+ break;
+ }
+ case 3: { // Session Auth. key length
+ if (ppLength != 1) break;
+ u_int8_t value = ptr[0];
+ if (value != 20) break; // bad/unsupported value
+ policyIsOK = True;
+ break;
+ }
+ case 4: { // Session Salt key length
+ if (ppLength != 1) break;
+ u_int8_t value = ptr[0];
+ if (value != 14) break; // bad/unsupported value
+ policyIsOK = True;
+ break;
+ }
+ case 7: { // SRTP encryption off/on
+ if (ppLength != 1) break;
+ u_int8_t value = ptr[0];
+ if (value > 1) break; // bad/unsupported value
+ fEncryptSRTP = value;
+ policyIsOK = True;
+ break;
+ }
+ case 8: { // SRTCP encryption off/on
+ if (ppLength != 1) break;
+ u_int8_t value = ptr[0];
+ if (value > 1) break; // bad/unsupported value
+ fEncryptSRTCP = value;
+ policyIsOK = True;
+ break;
+ }
+ case 10: { // SRTP authentication off/on
+ if (ppLength != 1) break;
+ u_int8_t value = ptr[0];
+ if (value > 1) break; // bad/unsupported value
+ fUseAuthentication = value;
+ policyIsOK = True;
+ break;
+ }
+ case 11: { // Authentication tag length
+ if (ppLength != 1) break;
+ u_int8_t value = ptr[0];
+ if (value != 10) break; // bad/unsupported value
+ policyIsOK = True;
+ break;
+ }
+ default: { // a policy type that we don't handle; still OK
+ policyIsOK = True;
+ break;
+ }
+ }
+ if (!policyIsOK) break;
+
+ ptr += ppLength;
+ if (ptr == payloadEndPtr) {
+ // We've successfully checked all of the "Policy param"s
+ parsedPolicyParamSection = True;
+ break;
+ }
+ }
+ if (!parsedPolicyParamSection) break;
+ parseSucceeded = True;
+ break;
+ }
+ case KEMAC: { // RFC 3830, section 6.2
+ testSize(3);
+ u_int8_t encrAlg = *ptr++;
+ // We currently support only 'NULL' encryption on the key data:
+ if (encrAlg != 0/*NULL*/) break; // unknown or unsupported key encryption
+
+ u_int16_t encrDataLen = get2Bytes(ptr);
+
+ testSize(encrDataLen);
+ // Parse the 'key data sub-payload':
+ {
+ u_int8_t const* subPtr = ptr;
+ if (encrDataLen < 4) break; // not enough space
+ ++subPtr; // skip over the "Next payload" field
+
+ // Check the "Type" and "KV" fields; we support only TEK and SPI/MKI
+ // Note: This means that we'll reject the "a=key-mgmt" SDP MIKEY data from
+ // an Axis camera, because that doesn't specify SPI/MKI. But that's what
+ // we want, because the Axis camera's "a=key-mgmt" SDP MIKEY data is meant
+ // to be ignored by clients.
+ if (*subPtr++ != ((2<<4)|1)) break; // Type 2 (TEK) | KV 1 (SPI/MKI)
+ u_int16_t keyDataLen = get2Bytes(subPtr);
+ // The key data length must be 30 (encryption key length (16) + salt length (14)):
+ if (keyDataLen != 30) break;
+
+ // Make sure we have enough space for the key data and the "SPI Length" field:
+ if (4+keyDataLen+1 > encrDataLen) break;
+ // Record the key data:
+ memmove(fKeyData, subPtr, keyDataLen);
+ subPtr += keyDataLen;
+
+ // Check the "SPI Length"; we support only length 4:
+ u_int8_t SPILength = *subPtr++;
+ if (SPILength != 4) break;
+
+ // Make a note of the MKI (the next 4 bytes):
+ if (4+keyDataLen+1+SPILength > encrDataLen) break;
+ fMKI = get4Bytes(subPtr);
+ }
+ ptr += encrDataLen;
+
+ testSize(1);
+ u_int8_t macAlg = *ptr++;
+ unsigned macLen;
+ // We currently support only a 'NULL' MAC on the key data:
+ if (macAlg == 0/*NULL*/) macLen = 0;
+ else break; // unknown or unsupported MAC algorithm => parse fails
+
+ testSize(macLen);
+ payloadSize = 4 + encrDataLen + 1 + macLen;
+ parseSucceeded = True;
+ break;
+ }
+ default: {
+ // Unknown payload type. The parsing fails.
+ break;
+ }
+ }
+ if (!parseSucceeded) break;
+
+ addNewPayload(new MIKEYPayload(*this, ourPayloadType, payloadStart, payloadSize));
+ ptr = payloadStart + payloadSize;
+
+ return True;
+ } while (0);
+
+ // An error occurred:
+ return False;
+}
+
+
+////////// MIKEYPayload implementation //////////
+
+static void addWord(u_int8_t*& p, u_int32_t word) {
+ *p++ = word>>24; *p++ = word>>16; *p++ = word>>8; *p++ = word;
+}
+
+static void addHalfWord(u_int8_t*& p, u_int16_t halfWord) {
+ *p++ = halfWord>>8; *p++ = halfWord;
+}
+
+static void add1BytePolicyParam(u_int8_t*& p, u_int8_t type, u_int8_t value) {
+ *p++ = type;
+ *p++ = 1; // length
+ *p++ = value;
+}
+
+MIKEYPayload::MIKEYPayload(MIKEYState& ourMIKEYState, u_int8_t payloadType)
+ : fOurMIKEYState(ourMIKEYState), fPayloadType(payloadType), fNext(NULL) {
+ switch (payloadType) {
+ case HDR: { // RFC 3830, section 6.1
+ fDataSize = 19;
+ fData = new u_int8_t[fDataSize];
+ u_int8_t* p = fData;
+ *p++ = 1; // version
+ *p++ = 0; // Initiator's pre-shared key message
+ *p++ = 0; // no next payload (initially)
+ *p++ = 0; // V=0; PRF func: MIKEY-1
+ u_int32_t const CSB_ID = our_random32();
+ addWord(p, CSB_ID);
+ *p++ = 1; // #CS: 1
+ *p++ = 0; // CS ID map type: SRTP-ID
+ *p++ = 0; // Policy_no_1
+ addWord(p, our_random32()); // SSRC_1
+ addWord(p, 0x00000000); // ROC_1
+ break;
+ }
+ case T: { // RFC 3830, section 6.6
+ fDataSize = 10;
+ fData = new u_int8_t[fDataSize];
+ u_int8_t* p = fData;
+ *p++ = 0; // no next payload (initially)
+ *p++ = 0; // TS type: NTP-UTC
+
+ // Get the current time, and convert it to a NTP-UTC time:
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ u_int32_t ntpSeconds = timeNow.tv_sec + 0x83AA7E80; // 1970 epoch -> 1900 epoch
+ addWord(p, ntpSeconds);
+ double fractionalPart = (timeNow.tv_usec/15625.0)*0x04000000; // 2^32/10^6
+ u_int32_t ntpFractionOfSecond = (u_int32_t)(fractionalPart+0.5); // round
+ addWord(p, ntpFractionOfSecond);
+ break;
+ }
+ case RAND: { // RFC 3830, section 6.11
+ fDataSize = 18;
+ fData = new u_int8_t[fDataSize];
+ u_int8_t* p = fData;
+ *p++ = 0; // no next payload (initially)
+ unsigned const numRandomWords = 4; // number of 32-bit words making up the RAND value
+ *p++ = 4*numRandomWords; // RAND len (in bytes)
+ for (unsigned i = 0; i < numRandomWords; ++i) {
+ u_int32_t randomNumber = our_random32();
+ addWord(p, randomNumber);
+ }
+ break;
+ }
+ case SP: { // RFC 3830, section 6.10
+ fDataSize = 32;
+ fData = new u_int8_t[fDataSize];
+ u_int8_t* p = fData;
+ *p++ = 0; // no next payload (initially)
+ *p++ = 0; // Policy number
+ *p++ = 0; // Protocol type: SRTP
+ u_int16_t policyParamLen = 27;
+ addHalfWord(p, policyParamLen);
+ // Now add the SRTP policy parameters:
+ add1BytePolicyParam(p, 0/*Encryption algorithm*/,
+ (fOurMIKEYState.encryptSRTP()||fOurMIKEYState.encryptSRTCP())
+ ? 1/*AES-CM*/ : 0/*NULL*/);
+ add1BytePolicyParam(p, 1/*Session Encryption key length*/, 16);
+ add1BytePolicyParam(p, 2/*Authentication algorithm*/,
+ fOurMIKEYState.useAuthentication()
+ ? 1/*HMAC-SHA-1*/ : 0/*NULL*/);
+ add1BytePolicyParam(p, 3/*Session Authentication key length*/, 20);
+ add1BytePolicyParam(p, 4/*Session Salt key length*/, 14);
+ add1BytePolicyParam(p, 7/*SRTP encryption off/on*/, fOurMIKEYState.encryptSRTP());
+ add1BytePolicyParam(p, 8/*SRTCP encryption off/on*/, fOurMIKEYState.encryptSRTCP());
+ add1BytePolicyParam(p, 10/*SRTP authentication off/on*/, fOurMIKEYState.useAuthentication());
+ add1BytePolicyParam(p, 11/*Authentication tag length*/, 10);
+ break;
+ }
+ case KEMAC: { // RFC 3830, section 6.2
+ fDataSize = 44;
+ fData = new u_int8_t[fDataSize];
+ u_int8_t* p = fData;
+ *p++ = 0; // no next payload
+ *p++ = 0; // Encr alg (NULL)
+ u_int16_t encrDataLen = 39;
+ addHalfWord(p, encrDataLen);
+ { // Key data sub-payload (RFC 3830, section 6.13):
+ *p++ = 0; // no next payload
+ *p++ = (2<<4)|1; // Type 2 (TEK) | KV 1 (SPI/MKI)
+
+ // Key data len:
+ u_int16_t const keyDataLen = 30;
+ addHalfWord(p, keyDataLen);
+
+ // Key data:
+ memcpy(p, fOurMIKEYState.keyData(), keyDataLen);
+ p += keyDataLen;
+
+ { // KV data (for SPI/MKI) (RFC 3830, section 6.14):
+ *p++ = 4; // SPI/MKI Length
+ addWord(p, fOurMIKEYState.MKI());
+ }
+ }
+ *p++ = 0; // MAC alg (NULL)
+ break;
+ }
+ default: {
+ // Unused payload type. Just in case, allocate 1 byte, for the
+ // presumed 'next payload type' field.
+ fDataSize = 1;
+ fData = new u_int8_t[fDataSize];
+ fData[0] = 0;
+ break;
+ }
+ }
+}
+
+MIKEYPayload::MIKEYPayload(MIKEYState& ourMIKEYState, u_int8_t payloadType,
+ u_int8_t const* data, unsigned dataSize)
+ : fOurMIKEYState(ourMIKEYState), fPayloadType(payloadType),
+ fDataSize(dataSize), fNext(NULL) {
+ fData = new u_int8_t[fDataSize];
+ memcpy(fData, data, fDataSize);
+}
+
+MIKEYPayload::~MIKEYPayload() {
+ delete fNext;
+}
+
+void MIKEYPayload::setNextPayload(MIKEYPayload* nextPayload) {
+ fNext = nextPayload;
+
+ // We also need to set the 'next payload type' field in our data:
+ u_int8_t nextPayloadType = nextPayload->fPayloadType;
+
+ switch (fPayloadType) {
+ case HDR: {
+ fData[2] = nextPayloadType;
+ break;
+ }
+ default: {
+ if (fData != NULL) fData[0] = nextPayloadType;
+ break;
+ }
+ }
+}
diff --git a/liveMedia/MP3ADU.cpp b/liveMedia/MP3ADU.cpp
new file mode 100644
index 0000000..d7deb79
--- /dev/null
+++ b/liveMedia/MP3ADU.cpp
@@ -0,0 +1,634 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// 'ADU' MP3 streams (for improved loss-tolerance)
+// Implementation
+
+#include "MP3ADU.hh"
+#include "MP3ADUdescriptor.hh"
+#include "MP3Internals.hh"
+#include <string.h>
+
+#ifdef TEST_LOSS
+#include "GroupsockHelper.hh"
+#endif
+
+// Segment data structures, used in the implementation below:
+
+#define SegmentBufSize 2000 /* conservatively high */
+
+class Segment {
+public:
+ unsigned char buf[SegmentBufSize];
+ unsigned char* dataStart() { return &buf[descriptorSize]; }
+ unsigned frameSize; // if it's a non-ADU frame
+ unsigned dataHere(); // if it's a non-ADU frame
+
+ unsigned descriptorSize;
+ static unsigned const headerSize;
+ unsigned sideInfoSize, aduSize;
+ unsigned backpointer;
+
+ struct timeval presentationTime;
+ unsigned durationInMicroseconds;
+};
+
+unsigned const Segment::headerSize = 4;
+
+#define SegmentQueueSize 20
+
+class SegmentQueue {
+public:
+ SegmentQueue(Boolean directionIsToADU, Boolean includeADUdescriptors)
+ : fDirectionIsToADU(directionIsToADU),
+ fIncludeADUdescriptors(includeADUdescriptors) {
+ reset();
+ }
+
+ Segment s[SegmentQueueSize];
+
+ unsigned headIndex() {return fHeadIndex;}
+ Segment& headSegment() {return s[fHeadIndex];}
+
+ unsigned nextFreeIndex() {return fNextFreeIndex;}
+ Segment& nextFreeSegment() {return s[fNextFreeIndex];}
+ Boolean isEmpty() {return isEmptyOrFull() && totalDataSize() == 0;}
+ Boolean isFull() {return isEmptyOrFull() && totalDataSize() > 0;}
+
+ static unsigned nextIndex(unsigned ix) {return (ix+1)%SegmentQueueSize;}
+ static unsigned prevIndex(unsigned ix) {return (ix+SegmentQueueSize-1)%SegmentQueueSize;}
+
+ unsigned totalDataSize() {return fTotalDataSize;}
+
+ void enqueueNewSegment(FramedSource* inputSource, FramedSource* usingSource);
+
+ Boolean dequeue();
+
+ Boolean insertDummyBeforeTail(unsigned backpointer);
+
+ void reset() { fHeadIndex = fNextFreeIndex = fTotalDataSize = 0; }
+
+private:
+ static void sqAfterGettingSegment(void* clientData,
+ unsigned numBytesRead,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+ Boolean sqAfterGettingCommon(Segment& seg, unsigned numBytesRead);
+ Boolean isEmptyOrFull() {return headIndex() == nextFreeIndex();}
+
+ unsigned fHeadIndex, fNextFreeIndex, fTotalDataSize;
+
+ // The following is used for asynchronous reads:
+ FramedSource* fUsingSource;
+
+ // This tells us whether the direction in which we're being used
+ // is MP3->ADU, or vice-versa. (This flag is used for debugging output.)
+ Boolean fDirectionIsToADU;
+
+ // The following is true iff we're used to enqueue incoming
+ // ADU frames, and these have an ADU descriptor in front
+ Boolean fIncludeADUdescriptors;
+};
+
+////////// ADUFromMP3Source //////////
+
+ADUFromMP3Source::ADUFromMP3Source(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean includeADUdescriptors)
+ : FramedFilter(env, inputSource),
+ fAreEnqueueingMP3Frame(False),
+ fSegments(new SegmentQueue(True /* because we're MP3->ADU */,
+ False /*no descriptors in incoming frames*/)),
+ fIncludeADUdescriptors(includeADUdescriptors),
+ fTotalDataSizeBeforePreviousRead(0), fScale(1), fFrameCounter(0) {
+}
+
+ADUFromMP3Source::~ADUFromMP3Source() {
+ delete fSegments;
+}
+
+
+char const* ADUFromMP3Source::MIMEtype() const {
+ return "audio/MPA-ROBUST";
+}
+
+ADUFromMP3Source* ADUFromMP3Source::createNew(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean includeADUdescriptors) {
+ // The source must be a MPEG audio source:
+ if (strcmp(inputSource->MIMEtype(), "audio/MPEG") != 0) {
+ env.setResultMsg(inputSource->name(), " is not an MPEG audio source");
+ return NULL;
+ }
+
+ return new ADUFromMP3Source(env, inputSource, includeADUdescriptors);
+}
+
+void ADUFromMP3Source::resetInput() {
+ fSegments->reset();
+}
+
+Boolean ADUFromMP3Source::setScaleFactor(int scale) {
+ if (scale < 1) return False;
+ fScale = scale;
+ return True;
+}
+
+void ADUFromMP3Source::doGetNextFrame() {
+ if (!fAreEnqueueingMP3Frame) {
+ // Arrange to enqueue a new MP3 frame:
+ fTotalDataSizeBeforePreviousRead = fSegments->totalDataSize();
+ fAreEnqueueingMP3Frame = True;
+ fSegments->enqueueNewSegment(fInputSource, this);
+ } else {
+ // Deliver an ADU from a previously-read MP3 frame:
+ fAreEnqueueingMP3Frame = False;
+
+ if (!doGetNextFrame1()) {
+ // An internal error occurred; act as if our source went away:
+ handleClosure();
+ }
+ }
+}
+
+Boolean ADUFromMP3Source::doGetNextFrame1() {
+ // First, check whether we have enough previously-read data to output an
+ // ADU for the last-read MP3 frame:
+ unsigned tailIndex;
+ Segment* tailSeg;
+ Boolean needMoreData;
+
+ if (fSegments->isEmpty()) {
+ needMoreData = True;
+ tailSeg = NULL; tailIndex = 0; // unneeded, but stops compiler warnings
+ } else {
+ tailIndex = SegmentQueue::prevIndex(fSegments->nextFreeIndex());
+ tailSeg = &(fSegments->s[tailIndex]);
+
+ needMoreData
+ = fTotalDataSizeBeforePreviousRead < tailSeg->backpointer // bp points back too far
+ || tailSeg->backpointer + tailSeg->dataHere() < tailSeg->aduSize; // not enough data
+ }
+
+ if (needMoreData) {
+ // We don't have enough data to output an ADU from the last-read MP3
+ // frame, so need to read another one and try again:
+ doGetNextFrame();
+ return True;
+ }
+
+ // Output an ADU from the tail segment:
+ fFrameSize = tailSeg->headerSize+tailSeg->sideInfoSize+tailSeg->aduSize;
+ fPresentationTime = tailSeg->presentationTime;
+ fDurationInMicroseconds = tailSeg->durationInMicroseconds;
+ unsigned descriptorSize
+ = fIncludeADUdescriptors ? ADUdescriptor::computeSize(fFrameSize) : 0;
+#ifdef DEBUG
+ fprintf(stderr, "m->a:outputting ADU %d<-%d, nbr:%d, sis:%d, dh:%d, (descriptor size: %d)\n", tailSeg->aduSize, tailSeg->backpointer, fFrameSize, tailSeg->sideInfoSize, tailSeg->dataHere(), descriptorSize);
+#endif
+ if (descriptorSize + fFrameSize > fMaxSize) {
+ envir() << "ADUFromMP3Source::doGetNextFrame1(): not enough room ("
+ << descriptorSize + fFrameSize << ">"
+ << fMaxSize << ")\n";
+ fFrameSize = 0;
+ return False;
+ }
+
+ unsigned char* toPtr = fTo;
+ // output the ADU descriptor:
+ if (fIncludeADUdescriptors) {
+ fFrameSize += ADUdescriptor::generateDescriptor(toPtr, fFrameSize);
+ }
+
+ // output header and side info:
+ memmove(toPtr, tailSeg->dataStart(),
+ tailSeg->headerSize + tailSeg->sideInfoSize);
+ toPtr += tailSeg->headerSize + tailSeg->sideInfoSize;
+
+ // go back to the frame that contains the start of our data:
+ unsigned offset = 0;
+ unsigned i = tailIndex;
+ unsigned prevBytes = tailSeg->backpointer;
+ while (prevBytes > 0) {
+ i = SegmentQueue::prevIndex(i);
+ unsigned dataHere = fSegments->s[i].dataHere();
+ if (dataHere < prevBytes) {
+ prevBytes -= dataHere;
+ } else {
+ offset = dataHere - prevBytes;
+ break;
+ }
+ }
+
+ // dequeue any segments that we no longer need:
+ while (fSegments->headIndex() != i) {
+ fSegments->dequeue(); // we're done with it
+ }
+
+ unsigned bytesToUse = tailSeg->aduSize;
+ while (bytesToUse > 0) {
+ Segment& seg = fSegments->s[i];
+ unsigned char* fromPtr
+ = &seg.dataStart()[seg.headerSize + seg.sideInfoSize + offset];
+ unsigned dataHere = seg.dataHere() - offset;
+ unsigned bytesUsedHere = dataHere < bytesToUse ? dataHere : bytesToUse;
+ memmove(toPtr, fromPtr, bytesUsedHere);
+ bytesToUse -= bytesUsedHere;
+ toPtr += bytesUsedHere;
+ offset = 0;
+ i = SegmentQueue::nextIndex(i);
+ }
+
+
+ if (fFrameCounter++%fScale == 0) {
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking infinite recursion.
+ afterGetting(this);
+ } else {
+ // Don't use this frame; get another one:
+ doGetNextFrame();
+ }
+
+ return True;
+}
+
+
+////////// MP3FromADUSource //////////
+
+MP3FromADUSource::MP3FromADUSource(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean includeADUdescriptors)
+ : FramedFilter(env, inputSource),
+ fAreEnqueueingADU(False),
+ fSegments(new SegmentQueue(False /* because we're ADU->MP3 */,
+ includeADUdescriptors)) {
+}
+
+MP3FromADUSource::~MP3FromADUSource() {
+ delete fSegments;
+}
+
+char const* MP3FromADUSource::MIMEtype() const {
+ return "audio/MPEG";
+}
+
+MP3FromADUSource* MP3FromADUSource::createNew(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean includeADUdescriptors) {
+ // The source must be an MP3 ADU source:
+ if (strcmp(inputSource->MIMEtype(), "audio/MPA-ROBUST") != 0) {
+ env.setResultMsg(inputSource->name(), " is not an MP3 ADU source");
+ return NULL;
+ }
+
+ return new MP3FromADUSource(env, inputSource, includeADUdescriptors);
+}
+
+
+void MP3FromADUSource::doGetNextFrame() {
+ if (fAreEnqueueingADU) insertDummyADUsIfNecessary();
+ fAreEnqueueingADU = False;
+
+ if (needToGetAnADU()) {
+ // Before returning a frame, we must enqueue at least one ADU:
+#ifdef TEST_LOSS
+ NOTE: This code no longer works, because it uses synchronous reads,
+ which are no longer supported.
+ static unsigned const framesPerPacket = 10;
+ static unsigned const frameCount = 0;
+ static Boolean packetIsLost;
+ while (1) {
+ if ((frameCount++)%framesPerPacket == 0) {
+ packetIsLost = (our_random()%10 == 0); // simulate 10% packet loss #####
+ }
+
+ if (packetIsLost) {
+ // Read and discard the next input frame (that would be part of
+ // a lost packet):
+ Segment dummySegment;
+ unsigned numBytesRead;
+ struct timeval presentationTime;
+ // (this works only if the source can be read synchronously)
+ fInputSource->syncGetNextFrame(dummySegment.buf,
+ sizeof dummySegment.buf, numBytesRead,
+ presentationTime);
+ } else {
+ break; // from while (1)
+ }
+ }
+#endif
+
+ fAreEnqueueingADU = True;
+ fSegments->enqueueNewSegment(fInputSource, this);
+ } else {
+ // Return a frame now:
+ generateFrameFromHeadADU();
+ // sets fFrameSize, fPresentationTime, and fDurationInMicroseconds
+
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking infinite recursion.
+ afterGetting(this);
+ }
+}
+
+Boolean MP3FromADUSource::needToGetAnADU() {
+ // Check whether we need to first enqueue a new ADU before we
+ // can generate a frame for our head ADU.
+ Boolean needToEnqueue = True;
+
+ if (!fSegments->isEmpty()) {
+ unsigned index = fSegments->headIndex();
+ Segment* seg = &(fSegments->headSegment());
+ int const endOfHeadFrame = (int) seg->dataHere();
+ unsigned frameOffset = 0;
+
+ while (1) {
+ int endOfData = frameOffset - seg->backpointer + seg->aduSize;
+ if (endOfData >= endOfHeadFrame) {
+ // We already have enough data to generate a frame
+ needToEnqueue = False;
+ break;
+ }
+
+ frameOffset += seg->dataHere();
+ index = SegmentQueue::nextIndex(index);
+ if (index == fSegments->nextFreeIndex()) break;
+ seg = &(fSegments->s[index]);
+ }
+ }
+
+ return needToEnqueue;
+}
+
+void MP3FromADUSource::insertDummyADUsIfNecessary() {
+ if (fSegments->isEmpty()) return; // shouldn't happen
+
+ // The tail segment (ADU) is assumed to have been recently
+ // enqueued. If its backpointer would overlap the data
+ // of the previous ADU, then we need to insert one or more
+ // empty, 'dummy' ADUs ahead of it. (This situation should occur
+ // only if an intermediate ADU was lost.)
+
+ unsigned tailIndex
+ = SegmentQueue::prevIndex(fSegments->nextFreeIndex());
+ Segment* tailSeg = &(fSegments->s[tailIndex]);
+
+ while (1) {
+ unsigned prevADUend; // relative to the start of the new ADU
+ if (fSegments->headIndex() != tailIndex) {
+ // there is a previous segment
+ unsigned prevIndex = SegmentQueue::prevIndex(tailIndex);
+ Segment& prevSegment = fSegments->s[prevIndex];
+ prevADUend = prevSegment.dataHere() + prevSegment.backpointer;
+ if (prevSegment.aduSize > prevADUend) {
+ // shouldn't happen if the previous ADU was well-formed
+ prevADUend = 0;
+ } else {
+ prevADUend -= prevSegment.aduSize;
+ }
+ } else {
+ prevADUend = 0;
+ }
+
+ if (tailSeg->backpointer > prevADUend) {
+ // We need to insert a dummy ADU in front of the tail
+#ifdef DEBUG
+ fprintf(stderr, "a->m:need to insert a dummy ADU (%d, %d, %d) [%d, %d]\n", tailSeg->backpointer, prevADUend, tailSeg->dataHere(), fSegments->headIndex(), fSegments->nextFreeIndex());
+#endif
+ tailIndex = fSegments->nextFreeIndex();
+ if (!fSegments->insertDummyBeforeTail(prevADUend)) return;
+ tailSeg = &(fSegments->s[tailIndex]);
+ } else {
+ break; // no more dummy ADUs need to be inserted
+ }
+ }
+}
+
+Boolean MP3FromADUSource::generateFrameFromHeadADU() {
+ // Output a frame for the head ADU:
+ if (fSegments->isEmpty()) return False;
+ unsigned index = fSegments->headIndex();
+ Segment* seg = &(fSegments->headSegment());
+#ifdef DEBUG
+ fprintf(stderr, "a->m:outputting frame for %d<-%d (fs %d, dh %d), (descriptorSize: %d)\n", seg->aduSize, seg->backpointer, seg->frameSize, seg->dataHere(), seg->descriptorSize);
+#endif
+ unsigned char* toPtr = fTo;
+
+ // output header and side info:
+ fFrameSize = seg->frameSize;
+ fPresentationTime = seg->presentationTime;
+ fDurationInMicroseconds = seg->durationInMicroseconds;
+ memmove(toPtr, seg->dataStart(), seg->headerSize + seg->sideInfoSize);
+ toPtr += seg->headerSize + seg->sideInfoSize;
+
+ // zero out the rest of the frame, in case ADU data doesn't fill it all in
+ unsigned bytesToZero = seg->dataHere();
+ for (unsigned i = 0; i < bytesToZero; ++i) {
+ toPtr[i] = '\0';
+ }
+
+ // Fill in the frame with appropriate ADU data from this and
+ // subsequent ADUs:
+ unsigned frameOffset = 0;
+ unsigned toOffset = 0;
+ unsigned const endOfHeadFrame = seg->dataHere();
+
+ while (toOffset < endOfHeadFrame) {
+ int startOfData = frameOffset - seg->backpointer;
+ if (startOfData > (int)endOfHeadFrame) break; // no more ADUs needed
+
+ int endOfData = startOfData + seg->aduSize;
+ if (endOfData > (int)endOfHeadFrame) {
+ endOfData = endOfHeadFrame;
+ }
+
+ unsigned fromOffset;
+ if (startOfData <= (int)toOffset) {
+ fromOffset = toOffset - startOfData;
+ startOfData = toOffset;
+ if (endOfData < startOfData) endOfData = startOfData;
+ } else {
+ fromOffset = 0;
+
+ // we may need some padding bytes beforehand
+ unsigned bytesToZero = startOfData - toOffset;
+#ifdef DEBUG
+ if (bytesToZero > 0) fprintf(stderr, "a->m:outputting %d zero bytes (%d, %d, %d, %d)\n", bytesToZero, startOfData, toOffset, frameOffset, seg->backpointer);
+#endif
+ toOffset += bytesToZero;
+ }
+
+ unsigned char* fromPtr
+ = &seg->dataStart()[seg->headerSize + seg->sideInfoSize + fromOffset];
+ unsigned bytesUsedHere = endOfData - startOfData;
+#ifdef DEBUG
+ if (bytesUsedHere > 0) fprintf(stderr, "a->m:outputting %d bytes from %d<-%d\n", bytesUsedHere, seg->aduSize, seg->backpointer);
+#endif
+ memmove(toPtr + toOffset, fromPtr, bytesUsedHere);
+ toOffset += bytesUsedHere;
+
+ frameOffset += seg->dataHere();
+ index = SegmentQueue::nextIndex(index);
+ if (index == fSegments->nextFreeIndex()) break;
+ seg = &(fSegments->s[index]);
+ }
+
+ fSegments->dequeue();
+
+ return True;
+}
+
+
+////////// Segment //////////
+
+unsigned Segment::dataHere() {
+ int result = frameSize - (headerSize + sideInfoSize);
+ if (result < 0) {
+ return 0;
+ }
+
+ return (unsigned)result;
+}
+
+////////// SegmentQueue //////////
+
+void SegmentQueue::enqueueNewSegment(FramedSource* inputSource,
+ FramedSource* usingSource) {
+ if (isFull()) {
+ usingSource->envir() << "SegmentQueue::enqueueNewSegment() overflow\n";
+ usingSource->handleClosure();
+ return;
+ }
+
+ fUsingSource = usingSource;
+
+ Segment& seg = nextFreeSegment();
+ inputSource->getNextFrame(seg.buf, sizeof seg.buf,
+ sqAfterGettingSegment, this,
+ FramedSource::handleClosure, usingSource);
+}
+
+void SegmentQueue::sqAfterGettingSegment(void* clientData,
+ unsigned numBytesRead,
+ unsigned /*numTruncatedBytes*/,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ SegmentQueue* segQueue = (SegmentQueue*)clientData;
+ Segment& seg = segQueue->nextFreeSegment();
+
+ seg.presentationTime = presentationTime;
+ seg.durationInMicroseconds = durationInMicroseconds;
+
+ if (segQueue->sqAfterGettingCommon(seg, numBytesRead)) {
+#ifdef DEBUG
+ char const* direction = segQueue->fDirectionIsToADU ? "m->a" : "a->m";
+ fprintf(stderr, "%s:read frame %d<-%d, fs:%d, sis:%d, dh:%d, (descriptor size: %d)\n", direction, seg.aduSize, seg.backpointer, seg.frameSize, seg.sideInfoSize, seg.dataHere(), seg.descriptorSize);
+#endif
+ }
+
+ // Continue our original calling source where it left off:
+ segQueue->fUsingSource->doGetNextFrame();
+}
+
+// Common code called after a new segment is enqueued
+Boolean SegmentQueue::sqAfterGettingCommon(Segment& seg,
+ unsigned numBytesRead) {
+ unsigned char* fromPtr = seg.buf;
+
+ if (fIncludeADUdescriptors) {
+ // The newly-read data is assumed to be an ADU with a descriptor
+ // in front
+ (void)ADUdescriptor::getRemainingFrameSize(fromPtr);
+ seg.descriptorSize = (unsigned)(fromPtr-seg.buf);
+ } else {
+ seg.descriptorSize = 0;
+ }
+
+ // parse the MP3-specific info in the frame to get the ADU params
+ unsigned hdr;
+ MP3SideInfo sideInfo;
+ if (!GetADUInfoFromMP3Frame(fromPtr, numBytesRead,
+ hdr, seg.frameSize,
+ sideInfo, seg.sideInfoSize,
+ seg.backpointer, seg.aduSize)) {
+ return False;
+ }
+
+ // If we've just read an ADU (rather than a regular MP3 frame), then use the
+ // entire "numBytesRead" data for the 'aduSize', so that we include any
+ // 'ancillary data' that may be present at the end of the ADU:
+ if (!fDirectionIsToADU) {
+ unsigned newADUSize
+ = numBytesRead - seg.descriptorSize - 4/*header size*/ - seg.sideInfoSize;
+ if (newADUSize > seg.aduSize) seg.aduSize = newADUSize;
+ }
+ fTotalDataSize += seg.dataHere();
+ fNextFreeIndex = nextIndex(fNextFreeIndex);
+
+ return True;
+}
+
+Boolean SegmentQueue::dequeue() {
+ if (isEmpty()) {
+ fUsingSource->envir() << "SegmentQueue::dequeue(): underflow!\n";
+ return False;
+ }
+
+ Segment& seg = s[headIndex()];
+ fTotalDataSize -= seg.dataHere();
+ fHeadIndex = nextIndex(fHeadIndex);
+ return True;
+}
+
+Boolean SegmentQueue::insertDummyBeforeTail(unsigned backpointer) {
+ if (isEmptyOrFull()) return False;
+
+ // Copy the current tail segment to its new position, then modify the
+ // old tail segment to be a 'dummy' ADU
+
+ unsigned newTailIndex = nextFreeIndex();
+ Segment& newTailSeg = s[newTailIndex];
+
+ unsigned oldTailIndex = prevIndex(newTailIndex);
+ Segment& oldTailSeg = s[oldTailIndex];
+
+ newTailSeg = oldTailSeg; // structure copy
+
+ // Begin by setting (replacing) the ADU descriptor of the dummy ADU:
+ unsigned char* ptr = oldTailSeg.buf;
+ if (fIncludeADUdescriptors) {
+ unsigned remainingFrameSize
+ = oldTailSeg.headerSize + oldTailSeg.sideInfoSize + 0 /* 0-size ADU */;
+ unsigned currentDescriptorSize = oldTailSeg.descriptorSize;
+
+ if (currentDescriptorSize == 2) {
+ ADUdescriptor::generateTwoByteDescriptor(ptr, remainingFrameSize);
+ } else {
+ (void)ADUdescriptor::generateDescriptor(ptr, remainingFrameSize);
+ }
+ }
+
+ // Then zero out the side info of the dummy frame:
+ if (!ZeroOutMP3SideInfo(ptr, oldTailSeg.frameSize,
+ backpointer)) return False;
+
+ unsigned dummyNumBytesRead
+ = oldTailSeg.descriptorSize + 4/*header size*/ + oldTailSeg.sideInfoSize;
+ return sqAfterGettingCommon(oldTailSeg, dummyNumBytesRead);
+}
diff --git a/liveMedia/MP3ADURTPSink.cpp b/liveMedia/MP3ADURTPSink.cpp
new file mode 100644
index 0000000..88df8b4
--- /dev/null
+++ b/liveMedia/MP3ADURTPSink.cpp
@@ -0,0 +1,119 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for 'ADUized' MP3 frames ("mpa-robust")
+// Implementation
+
+#include "MP3ADURTPSink.hh"
+
+MP3ADURTPSink::MP3ADURTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char RTPPayloadType)
+ : AudioRTPSink(env, RTPgs, RTPPayloadType, 90000, "MPA-ROBUST") {
+}
+
+MP3ADURTPSink::~MP3ADURTPSink() {
+}
+
+MP3ADURTPSink*
+MP3ADURTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char RTPPayloadType) {
+ return new MP3ADURTPSink(env, RTPgs, RTPPayloadType);
+}
+
+static void badDataSize(UsageEnvironment& env, unsigned numBytesInFrame) {
+ env << "MP3ADURTPSink::doSpecialFrameHandling(): invalid size ("
+ << numBytesInFrame << ") of non-fragmented input ADU!\n";
+}
+
+void MP3ADURTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ // If this is the first (or only) fragment of an ADU, then
+ // check the "ADU descriptor" (that should be at the front) for validity:
+ if (fragmentationOffset == 0) {
+ unsigned aduDescriptorSize;
+
+ if (numBytesInFrame < 1) {
+ badDataSize(envir(), numBytesInFrame);
+ return;
+ }
+ if (frameStart[0]&0x40) {
+ // We have a 2-byte ADU descriptor
+ aduDescriptorSize = 2;
+ if (numBytesInFrame < 2) {
+ badDataSize(envir(), numBytesInFrame);
+ return;
+ }
+ fCurADUSize = ((frameStart[0]&~0xC0)<<8) | frameStart[1];
+ } else {
+ // We have a 1-byte ADU descriptor
+ aduDescriptorSize = 1;
+ fCurADUSize = frameStart[0]&~0x80;
+ }
+
+ if (frameStart[0]&0x80) {
+ envir() << "Unexpected \"C\" bit seen on non-fragment input ADU!\n";
+ return;
+ }
+
+ // Now, check whether the ADU size in the ADU descriptor is consistent
+ // with the total data size of (all fragments of) the input frame:
+ unsigned expectedADUSize =
+ fragmentationOffset + numBytesInFrame + numRemainingBytes
+ - aduDescriptorSize;
+ if (fCurADUSize != expectedADUSize) {
+ envir() << "MP3ADURTPSink::doSpecialFrameHandling(): Warning: Input ADU size "
+ << expectedADUSize << " (=" << fragmentationOffset
+ << "+" << numBytesInFrame << "+" << numRemainingBytes
+ << "-" << aduDescriptorSize
+ << ") did not match the value (" << fCurADUSize
+ << ") in the ADU descriptor!\n";
+ fCurADUSize = expectedADUSize;
+ }
+ } else {
+ // This is the second (or subsequent) fragment.
+ // Insert a new ADU descriptor:
+ unsigned char aduDescriptor[2];
+ aduDescriptor[0] = 0xC0|(fCurADUSize>>8);
+ aduDescriptor[1] = fCurADUSize&0xFF;
+ setSpecialHeaderBytes(aduDescriptor, 2);
+ }
+
+ // Important: Also call our base class's doSpecialFrameHandling(),
+ // to set the packet's timestamp:
+ MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
+ frameStart, numBytesInFrame,
+ framePresentationTime,
+ numRemainingBytes);
+}
+
+unsigned MP3ADURTPSink::specialHeaderSize() const {
+ // Normally there's no special header.
+ // (The "ADU descriptor" is already present in the data.)
+ unsigned specialHeaderSize = 0;
+
+ // However, if we're about to output the second (or subsequent) fragment
+ // of a fragmented ADU, then we need to insert a new ADU descriptor at
+ // the front of the packet:
+ if (curFragmentationOffset() > 0) {
+ specialHeaderSize = 2;
+ }
+
+ return specialHeaderSize;
+}
diff --git a/liveMedia/MP3ADURTPSource.cpp b/liveMedia/MP3ADURTPSource.cpp
new file mode 100644
index 0000000..2a6e1d2
--- /dev/null
+++ b/liveMedia/MP3ADURTPSource.cpp
@@ -0,0 +1,80 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP source for 'ADUized' MP3 frames ("mpa-robust")
+// Implementation
+
+#include "MP3ADURTPSource.hh"
+#include "MP3ADUdescriptor.hh"
+
+////////// ADUBufferedPacket and ADUBufferedPacketFactory //////////
+
+class ADUBufferedPacket: public BufferedPacket {
+private: // redefined virtual functions
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+};
+
+class ADUBufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+///////// MP3ADURTPSource implementation ////////
+
+MP3ADURTPSource*
+MP3ADURTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new MP3ADURTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+MP3ADURTPSource::MP3ADURTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency,
+ new ADUBufferedPacketFactory) {
+}
+
+MP3ADURTPSource::~MP3ADURTPSource() {
+}
+
+char const* MP3ADURTPSource::MIMEtype() const {
+ return "audio/MPA-ROBUST";
+}
+
+////////// ADUBufferedPacket and ADUBufferredPacketFactory implementation
+
+unsigned ADUBufferedPacket
+::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
+ // Return the size of the next MP3 'ADU', on the assumption that
+ // the input data is ADU-encoded MP3 frames.
+ unsigned char* frameDataPtr = framePtr;
+ unsigned remainingFrameSize
+ = ADUdescriptor::getRemainingFrameSize(frameDataPtr);
+ unsigned descriptorSize = (unsigned)(frameDataPtr - framePtr);
+ unsigned fullADUSize = descriptorSize + remainingFrameSize;
+
+ return (fullADUSize <= dataSize) ? fullADUSize : dataSize;
+}
+
+BufferedPacket* ADUBufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* /*ourSource*/) {
+ return new ADUBufferedPacket;
+}
diff --git a/liveMedia/MP3ADUTranscoder.cpp b/liveMedia/MP3ADUTranscoder.cpp
new file mode 100644
index 0000000..78a251f
--- /dev/null
+++ b/liveMedia/MP3ADUTranscoder.cpp
@@ -0,0 +1,92 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Transcoder for ADUized MP3 frames
+// Implementation
+
+#include "MP3ADUTranscoder.hh"
+#include "MP3Internals.hh"
+#include <string.h>
+
+MP3ADUTranscoder::MP3ADUTranscoder(UsageEnvironment& env,
+ unsigned outBitrate /* in kbps */,
+ FramedSource* inputSource)
+ : FramedFilter(env, inputSource),
+ fOutBitrate(outBitrate),
+ fAvailableBytesForBackpointer(0),
+ fOrigADU(new unsigned char[MAX_MP3_FRAME_SIZE]) {
+}
+
+MP3ADUTranscoder::~MP3ADUTranscoder() {
+ delete[] fOrigADU;
+}
+
+MP3ADUTranscoder* MP3ADUTranscoder::createNew(UsageEnvironment& env,
+ unsigned outBitrate /* in kbps */,
+ FramedSource* inputSource) {
+ // The source must be an MP3 ADU source:
+ if (strcmp(inputSource->MIMEtype(), "audio/MPA-ROBUST") != 0) {
+ env.setResultMsg(inputSource->name(), " is not an MP3 ADU source");
+ return NULL;
+ }
+
+ return new MP3ADUTranscoder(env, outBitrate, inputSource);
+}
+
+void MP3ADUTranscoder::getAttributes() const {
+ // Begin by getting the attributes from our input source:
+ fInputSource->getAttributes();
+
+ // Then modify them by appending the corrected bandwidth
+ char buffer[30];
+ sprintf(buffer, " bandwidth %d", outBitrate());
+ envir().appendToResultMsg(buffer);
+}
+
+void MP3ADUTranscoder::doGetNextFrame() {
+ fInputSource->getNextFrame(fOrigADU, MAX_MP3_FRAME_SIZE,
+ afterGettingFrame, this, handleClosure, this);
+}
+
+void MP3ADUTranscoder::afterGettingFrame(void* clientData,
+ unsigned numBytesRead,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ MP3ADUTranscoder* transcoder = (MP3ADUTranscoder*)clientData;
+ transcoder->afterGettingFrame1(numBytesRead, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+void MP3ADUTranscoder::afterGettingFrame1(unsigned numBytesRead,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ fNumTruncatedBytes = numTruncatedBytes; // but can we handle this being >0? #####
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+ fFrameSize = TranscodeMP3ADU(fOrigADU, numBytesRead, fOutBitrate,
+ fTo, fMaxSize, fAvailableBytesForBackpointer);
+ if (fFrameSize == 0) { // internal error - bad ADU data?
+ handleClosure();
+ return;
+ }
+
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking infinite recursion.
+ afterGetting(this);
+}
diff --git a/liveMedia/MP3ADUdescriptor.cpp b/liveMedia/MP3ADUdescriptor.cpp
new file mode 100644
index 0000000..4667d05
--- /dev/null
+++ b/liveMedia/MP3ADUdescriptor.cpp
@@ -0,0 +1,65 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Descriptor preceding frames of 'ADU' MP3 streams (for improved loss-tolerance)
+// Implementation
+
+#include "MP3ADUdescriptor.hh"
+
+////////// ADUdescriptor //////////
+
+//##### NOTE: For now, ignore fragmentation. Fix this later! #####
+
+#define TWO_BYTE_DESCR_FLAG 0x40
+
+unsigned ADUdescriptor::generateDescriptor(unsigned char*& toPtr,
+ unsigned remainingFrameSize) {
+ unsigned descriptorSize = ADUdescriptor::computeSize(remainingFrameSize);
+ switch (descriptorSize) {
+ case 1: {
+ *toPtr++ = (unsigned char)remainingFrameSize;
+ break;
+ }
+ case 2: {
+ generateTwoByteDescriptor(toPtr, remainingFrameSize);
+ break;
+ }
+ }
+
+ return descriptorSize;
+}
+
+void ADUdescriptor::generateTwoByteDescriptor(unsigned char*& toPtr,
+ unsigned remainingFrameSize) {
+ *toPtr++ = (TWO_BYTE_DESCR_FLAG|(unsigned char)(remainingFrameSize>>8));
+ *toPtr++ = (unsigned char)(remainingFrameSize&0xFF);
+}
+
+unsigned ADUdescriptor::getRemainingFrameSize(unsigned char*& fromPtr) {
+ unsigned char firstByte = *fromPtr++;
+
+ if (firstByte&TWO_BYTE_DESCR_FLAG) {
+ // This is a 2-byte descriptor
+ unsigned char secondByte = *fromPtr++;
+
+ return ((firstByte&0x3F)<<8) | secondByte;
+ } else {
+ // This is a 1-byte descriptor
+ return (firstByte&0x3F);
+ }
+}
+
diff --git a/liveMedia/MP3ADUdescriptor.hh b/liveMedia/MP3ADUdescriptor.hh
new file mode 100644
index 0000000..3c9eef4
--- /dev/null
+++ b/liveMedia/MP3ADUdescriptor.hh
@@ -0,0 +1,43 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Descriptor preceding frames of 'ADU' MP3 streams (for improved loss-tolerance)
+// C++ header
+
+#ifndef _MP3_ADU_DESCRIPTOR_HH
+#define _MP3_ADU_DESCRIPTOR_HH
+
+// A class for handling the descriptor that begins each ADU frame:
+// (Note: We don't yet implement fragmentation)
+class ADUdescriptor {
+public:
+ // Operations for generating a new descriptor
+ static unsigned computeSize(unsigned remainingFrameSize) {
+ return remainingFrameSize >= 64 ? 2 : 1;
+ }
+ static unsigned generateDescriptor(unsigned char*& toPtr, unsigned remainingFrameSize);
+ // returns descriptor size; increments "toPtr" afterwards
+ static void generateTwoByteDescriptor(unsigned char*& toPtr, unsigned remainingFrameSize);
+ // always generates a 2-byte descriptor, even if "remainingFrameSize" is
+ // small enough for a 1-byte descriptor
+
+ // Operations for reading a descriptor
+ static unsigned getRemainingFrameSize(unsigned char*& fromPtr);
+ // increments "fromPtr" afterwards
+};
+
+#endif
diff --git a/liveMedia/MP3ADUinterleaving.cpp b/liveMedia/MP3ADUinterleaving.cpp
new file mode 100644
index 0000000..7f3a68c
--- /dev/null
+++ b/liveMedia/MP3ADUinterleaving.cpp
@@ -0,0 +1,517 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Interleaving of MP3 ADUs
+// Implementation
+
+#include "MP3ADUinterleaving.hh"
+#include "MP3ADUdescriptor.hh"
+
+#include <string.h>
+
+#ifdef TEST_LOSS
+#include "GroupsockHelper.hh"
+#endif
+
+////////// Interleaving //////////
+
+Interleaving::Interleaving(unsigned cycleSize,
+ unsigned char const* cycleArray)
+ : fCycleSize(cycleSize) {
+ for (unsigned i = 0; i < fCycleSize; ++i) {
+ fInverseCycle[cycleArray[i]] = i;
+ }
+}
+
+Interleaving::~Interleaving() {
+}
+
+////////// MP3ADUinterleaverBase //////////
+
+MP3ADUinterleaverBase::MP3ADUinterleaverBase(UsageEnvironment& env,
+ FramedSource* inputSource)
+ : FramedFilter(env, inputSource) {
+}
+MP3ADUinterleaverBase::~MP3ADUinterleaverBase() {
+}
+
+FramedSource* MP3ADUinterleaverBase::getInputSource(UsageEnvironment& env,
+ char const* inputSourceName) {
+ FramedSource* inputSource;
+ if (!FramedSource::lookupByName(env, inputSourceName, inputSource))
+ return NULL;
+
+ if (strcmp(inputSource->MIMEtype(), "audio/MPA-ROBUST") != 0) {
+ env.setResultMsg(inputSourceName, " is not an MP3 ADU source");
+ return NULL;
+ }
+
+ return inputSource;
+}
+
+void MP3ADUinterleaverBase::afterGettingFrame(void* clientData,
+ unsigned numBytesRead,
+ unsigned /*numTruncatedBytes*/,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ MP3ADUinterleaverBase* interleaverBase = (MP3ADUinterleaverBase*)clientData;
+ // Finish up after reading:
+ interleaverBase->afterGettingFrame(numBytesRead,
+ presentationTime, durationInMicroseconds);
+
+ // Then, continue to deliver an outgoing frame:
+ interleaverBase->doGetNextFrame();
+}
+
+
+////////// InterleavingFrames (definition) //////////
+
+class InterleavingFrames {
+public:
+ InterleavingFrames(unsigned maxCycleSize);
+ virtual ~InterleavingFrames();
+
+ Boolean haveReleaseableFrame();
+ void getIncomingFrameParams(unsigned char index,
+ unsigned char*& dataPtr,
+ unsigned& bytesAvailable);
+ void getReleasingFrameParams(unsigned char index,
+ unsigned char*& dataPtr,
+ unsigned& bytesInUse,
+ struct timeval& presentationTime,
+ unsigned& durationInMicroseconds);
+ void setFrameParams(unsigned char index,
+ unsigned char icc, unsigned char ii,
+ unsigned frameSize, struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ unsigned nextIndexToRelease() {return fNextIndexToRelease;}
+ void releaseNext();
+
+private:
+ unsigned fMaxCycleSize;
+ unsigned fNextIndexToRelease;
+ class InterleavingFrameDescriptor* fDescriptors;
+};
+
+////////// MP3ADUinterleaver //////////
+
+
+MP3ADUinterleaver::MP3ADUinterleaver(UsageEnvironment& env,
+ Interleaving const& interleaving,
+ FramedSource* inputSource)
+ : MP3ADUinterleaverBase(env, inputSource),
+ fInterleaving(interleaving),
+ fFrames(new InterleavingFrames(interleaving.cycleSize())),
+ fII(0), fICC(0) {
+}
+
+MP3ADUinterleaver::~MP3ADUinterleaver() {
+ delete fFrames;
+}
+
+MP3ADUinterleaver* MP3ADUinterleaver::createNew(UsageEnvironment& env,
+ Interleaving const& interleaving,
+ FramedSource* inputSource) {
+ return new MP3ADUinterleaver(env, interleaving, inputSource);
+}
+
+void MP3ADUinterleaver::doGetNextFrame() {
+ // If there's a frame immediately available, deliver it, otherwise get new
+ // frames from the source until one's available:
+ if (fFrames->haveReleaseableFrame()) {
+ releaseOutgoingFrame();
+
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking infinite recursion.
+ afterGetting(this);
+ } else {
+ fPositionOfNextIncomingFrame = fInterleaving.lookupInverseCycle(fII);
+ unsigned char* dataPtr;
+ unsigned bytesAvailable;
+ fFrames->getIncomingFrameParams(fPositionOfNextIncomingFrame,
+ dataPtr, bytesAvailable);
+
+ // Read the next incoming frame (asynchronously)
+ fInputSource->getNextFrame(dataPtr, bytesAvailable,
+ &MP3ADUinterleaverBase::afterGettingFrame, this,
+ handleClosure, this);
+ }
+}
+
+void MP3ADUinterleaver::releaseOutgoingFrame() {
+ unsigned char* fromPtr;
+ fFrames->getReleasingFrameParams(fFrames->nextIndexToRelease(),
+ fromPtr, fFrameSize,
+ fPresentationTime, fDurationInMicroseconds);
+
+ if (fFrameSize > fMaxSize) {
+ fNumTruncatedBytes = fFrameSize - fMaxSize;
+ fFrameSize = fMaxSize;
+ }
+ memmove(fTo, fromPtr, fFrameSize);
+
+ fFrames->releaseNext();
+}
+
+void MP3ADUinterleaver::afterGettingFrame(unsigned numBytesRead,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // Set the (icc,ii) and frame size of the newly-read frame:
+ fFrames->setFrameParams(fPositionOfNextIncomingFrame,
+ fICC, fII, numBytesRead,
+ presentationTime, durationInMicroseconds);
+
+ // Prepare our counters for the next frame:
+ if (++fII == fInterleaving.cycleSize()) {
+ fII = 0;
+ fICC = (fICC+1)%8;
+ }
+}
+
+////////// DeinterleavingFrames (definition) //////////
+
+class DeinterleavingFrames {
+public:
+ DeinterleavingFrames();
+ virtual ~DeinterleavingFrames();
+
+ Boolean haveReleaseableFrame();
+ void getIncomingFrameParams(unsigned char*& dataPtr,
+ unsigned& bytesAvailable);
+ void getIncomingFrameParamsAfter(unsigned frameSize,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds,
+ unsigned char& icc, unsigned char& ii);
+ void getReleasingFrameParams(unsigned char*& dataPtr,
+ unsigned& bytesInUse,
+ struct timeval& presentationTime,
+ unsigned& durationInMicroseconds);
+ void moveIncomingFrameIntoPlace();
+ void releaseNext();
+ void startNewCycle();
+
+private:
+ unsigned fNextIndexToRelease;
+ Boolean fHaveEndedCycle;
+ unsigned fIIlastSeen;
+ unsigned fMinIndexSeen, fMaxIndexSeen; // actually, max+1
+ class DeinterleavingFrameDescriptor* fDescriptors;
+};
+
+////////// MP3ADUdeinterleaver //////////
+
+MP3ADUdeinterleaver::MP3ADUdeinterleaver(UsageEnvironment& env,
+ FramedSource* inputSource)
+ : MP3ADUinterleaverBase(env, inputSource),
+ fFrames(new DeinterleavingFrames),
+ fIIlastSeen(~0), fICClastSeen(~0) {
+}
+
+MP3ADUdeinterleaver::~MP3ADUdeinterleaver() {
+ delete fFrames;
+}
+
+MP3ADUdeinterleaver* MP3ADUdeinterleaver::createNew(UsageEnvironment& env,
+ FramedSource* inputSource) {
+ return new MP3ADUdeinterleaver(env, inputSource);
+}
+
+void MP3ADUdeinterleaver::doGetNextFrame() {
+ // If there's a frame immediately available, deliver it, otherwise get new
+ // frames from the source until one's available:
+ if (fFrames->haveReleaseableFrame()) {
+ releaseOutgoingFrame();
+
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking infinite recursion.
+ afterGetting(this);
+ } else {
+#ifdef TEST_LOSS
+ NOTE: This code no longer works, because it uses synchronous reads,
+ which are no longer supported.
+ static unsigned const framesPerPacket = 3;
+ static unsigned const frameCount = 0;
+ static Boolean packetIsLost;
+ while (1) {
+ unsigned packetCount = frameCount/framesPerPacket;
+ if ((frameCount++)%framesPerPacket == 0) {
+ packetIsLost = (our_random()%10 == 0); // simulate 10% packet loss #####
+ }
+
+ if (packetIsLost) {
+ // Read and discard the next input frame (that would be part of
+ // a lost packet):
+ unsigned char dummyBuf[2000];
+ unsigned numBytesRead;
+ struct timeval presentationTime;
+ // (this works only if the source can be read synchronously)
+ fInputSource->syncGetNextFrame(dummyBuf, sizeof dummyBuf,
+ numBytesRead, presentationTime);
+ } else {
+ break; // from while (1)
+ }
+ }
+#endif
+ unsigned char* dataPtr;
+ unsigned bytesAvailable;
+ fFrames->getIncomingFrameParams(dataPtr, bytesAvailable);
+
+ // Read the next incoming frame (asynchronously)
+ fInputSource->getNextFrame(dataPtr, bytesAvailable,
+ &MP3ADUinterleaverBase::afterGettingFrame, this,
+ handleClosure, this);
+ }
+}
+
+void MP3ADUdeinterleaver::afterGettingFrame(unsigned numBytesRead,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // Get the (icc,ii) and set the frame size of the newly-read frame:
+ unsigned char icc, ii;
+ fFrames->getIncomingFrameParamsAfter(numBytesRead,
+ presentationTime, durationInMicroseconds,
+ icc, ii);
+
+ // Compare these to the values we saw last:
+ if (icc != fICClastSeen || ii == fIIlastSeen) {
+ // We've started a new interleave cycle
+ // (or interleaving was not used). Release all
+ // pending ADU frames to the ADU->MP3 conversion step:
+ fFrames->startNewCycle();
+ } else {
+ // We're still in the same cycle as before.
+ // Move the newly-read frame into place, so it can be used:
+ fFrames->moveIncomingFrameIntoPlace();
+ }
+
+ fICClastSeen = icc;
+ fIIlastSeen = ii;
+}
+
+void MP3ADUdeinterleaver::releaseOutgoingFrame() {
+ unsigned char* fromPtr;
+ fFrames->getReleasingFrameParams(fromPtr, fFrameSize,
+ fPresentationTime, fDurationInMicroseconds);
+
+ if (fFrameSize > fMaxSize) {
+ fNumTruncatedBytes = fFrameSize - fMaxSize;
+ fFrameSize = fMaxSize;
+ }
+ memmove(fTo, fromPtr, fFrameSize);
+
+ fFrames->releaseNext();
+}
+
+////////// InterleavingFrames (implementation) //////////
+
+#define MAX_FRAME_SIZE 2000 /* conservatively high */
+
+class InterleavingFrameDescriptor {
+public:
+ InterleavingFrameDescriptor() {frameDataSize = 0;}
+
+ unsigned frameDataSize; // includes ADU descriptor and (modified) MPEG hdr
+ struct timeval presentationTime;
+ unsigned durationInMicroseconds;
+ unsigned char frameData[MAX_FRAME_SIZE]; // ditto
+};
+
+InterleavingFrames::InterleavingFrames(unsigned maxCycleSize)
+ : fMaxCycleSize(maxCycleSize), fNextIndexToRelease(0),
+ fDescriptors(new InterleavingFrameDescriptor[maxCycleSize]) {
+}
+InterleavingFrames::~InterleavingFrames() {
+ delete[] fDescriptors;
+}
+
+Boolean InterleavingFrames::haveReleaseableFrame() {
+ return fDescriptors[fNextIndexToRelease].frameDataSize > 0;
+}
+
+void InterleavingFrames::getIncomingFrameParams(unsigned char index,
+ unsigned char*& dataPtr,
+ unsigned& bytesAvailable) {
+ InterleavingFrameDescriptor& desc = fDescriptors[index];
+ dataPtr = &desc.frameData[0];
+ bytesAvailable = MAX_FRAME_SIZE;
+}
+
+void InterleavingFrames::getReleasingFrameParams(unsigned char index,
+ unsigned char*& dataPtr,
+ unsigned& bytesInUse,
+ struct timeval& presentationTime,
+ unsigned& durationInMicroseconds) {
+ InterleavingFrameDescriptor& desc = fDescriptors[index];
+ dataPtr = &desc.frameData[0];
+ bytesInUse = desc.frameDataSize;
+ presentationTime = desc.presentationTime;
+ durationInMicroseconds = desc.durationInMicroseconds;
+}
+
+void InterleavingFrames::setFrameParams(unsigned char index,
+ unsigned char icc,
+ unsigned char ii,
+ unsigned frameSize,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ InterleavingFrameDescriptor& desc = fDescriptors[index];
+ desc.frameDataSize = frameSize;
+ desc.presentationTime = presentationTime;
+ desc.durationInMicroseconds = durationInMicroseconds;
+
+ // Advance over the ADU descriptor, to get to the MPEG 'syncword':
+ unsigned char* ptr = &desc.frameData[0];
+ (void)ADUdescriptor::getRemainingFrameSize(ptr);
+
+ // Replace the next 11 bits with (ii,icc):
+ *ptr++ = ii;
+ *ptr &=~ 0xE0;
+ *ptr |= (icc<<5);
+}
+
+void InterleavingFrames::releaseNext() {
+ fDescriptors[fNextIndexToRelease].frameDataSize = 0;
+ fNextIndexToRelease = (fNextIndexToRelease+1)%fMaxCycleSize;
+}
+
+////////// DeinterleavingFrames (implementation) //////////
+
+class DeinterleavingFrameDescriptor {
+public:
+ DeinterleavingFrameDescriptor() {frameDataSize = 0; frameData = NULL;}
+ virtual ~DeinterleavingFrameDescriptor() {delete[] frameData;}
+
+ unsigned frameDataSize; // includes ADU descriptor and (modified) MPEG hdr
+ struct timeval presentationTime;
+ unsigned durationInMicroseconds;
+ unsigned char* frameData;
+};
+
+DeinterleavingFrames::DeinterleavingFrames()
+ : fNextIndexToRelease(0), fHaveEndedCycle(False),
+ fMinIndexSeen(MAX_CYCLE_SIZE), fMaxIndexSeen(0),
+ fDescriptors(new DeinterleavingFrameDescriptor[MAX_CYCLE_SIZE+1]) {
+}
+DeinterleavingFrames::~DeinterleavingFrames() {
+ delete[] fDescriptors;
+}
+
+Boolean DeinterleavingFrames::haveReleaseableFrame() {
+ if (!fHaveEndedCycle) {
+ // Check just the next frame in the sequence
+ return fDescriptors[fNextIndexToRelease].frameDataSize > 0;
+ } else {
+ // We've just ended a cycle, so we can skip over frames that didn't
+ // get filled in (due to packet loss):
+ if (fNextIndexToRelease < fMinIndexSeen) {
+ fNextIndexToRelease = fMinIndexSeen;
+ }
+ while (fNextIndexToRelease < fMaxIndexSeen
+ && fDescriptors[fNextIndexToRelease].frameDataSize == 0) {
+ ++fNextIndexToRelease;
+ }
+ if (fNextIndexToRelease >= fMaxIndexSeen) {
+ // No more frames are available from the cycle that we just ended, so
+ // clear out all previously stored frames, then make available
+ // the last-read frame, and return false for now:
+ for (unsigned i = fMinIndexSeen; i < fMaxIndexSeen; ++i) {
+ fDescriptors[i].frameDataSize = 0;
+ }
+
+ fMinIndexSeen = MAX_CYCLE_SIZE; fMaxIndexSeen = 0;
+ moveIncomingFrameIntoPlace();
+
+ fHaveEndedCycle = False;
+ fNextIndexToRelease = 0;
+ return False;
+ }
+
+ return True;
+ }
+}
+
+void DeinterleavingFrames::getIncomingFrameParams(unsigned char*& dataPtr,
+ unsigned& bytesAvailable) {
+ // Use fDescriptors[MAX_CYCLE_SIZE] to store the incoming frame,
+ // prior to figuring out its real position:
+ DeinterleavingFrameDescriptor& desc = fDescriptors[MAX_CYCLE_SIZE];
+ if (desc.frameData == NULL) {
+ // There's no buffer yet, so allocate a new one:
+ desc.frameData = new unsigned char[MAX_FRAME_SIZE];
+ }
+ dataPtr = desc.frameData;
+ bytesAvailable = MAX_FRAME_SIZE;
+}
+
+void DeinterleavingFrames
+::getIncomingFrameParamsAfter(unsigned frameSize,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds,
+ unsigned char& icc, unsigned char& ii) {
+ DeinterleavingFrameDescriptor& desc = fDescriptors[MAX_CYCLE_SIZE];
+ desc.frameDataSize = frameSize;
+ desc.presentationTime = presentationTime;
+ desc.durationInMicroseconds = durationInMicroseconds;
+
+ // Advance over the ADU descriptor, to get to the MPEG 'syncword':
+ unsigned char* ptr = desc.frameData;
+ (void)ADUdescriptor::getRemainingFrameSize(ptr);
+
+ // Read the next 11 bits into (ii,icc), and replace them with all-1s:
+ fIIlastSeen = ii = *ptr; *ptr++ = 0xFF;
+ icc = (*ptr&0xE0)>>5; *ptr |= 0xE0;
+}
+
+void DeinterleavingFrames::getReleasingFrameParams(unsigned char*& dataPtr,
+ unsigned& bytesInUse,
+ struct timeval& presentationTime,
+ unsigned& durationInMicroseconds) {
+ DeinterleavingFrameDescriptor& desc = fDescriptors[fNextIndexToRelease];
+ dataPtr = desc.frameData;
+ bytesInUse = desc.frameDataSize;
+ presentationTime = desc.presentationTime;
+ durationInMicroseconds = desc.durationInMicroseconds;
+}
+
+void DeinterleavingFrames::moveIncomingFrameIntoPlace() {
+ DeinterleavingFrameDescriptor& fromDesc = fDescriptors[MAX_CYCLE_SIZE];
+ DeinterleavingFrameDescriptor& toDesc = fDescriptors[fIIlastSeen];
+
+ toDesc.frameDataSize = fromDesc.frameDataSize;
+ toDesc.presentationTime = fromDesc.presentationTime;
+
+ // Move the data pointer into place by swapping the data pointers:
+ unsigned char* tmp = toDesc.frameData;
+ toDesc.frameData = fromDesc.frameData;
+ fromDesc.frameData = tmp;
+
+ if (fIIlastSeen < fMinIndexSeen) {
+ fMinIndexSeen = fIIlastSeen;
+ }
+ if (fIIlastSeen + 1 > fMaxIndexSeen) {
+ fMaxIndexSeen = fIIlastSeen + 1;
+ }
+}
+
+void DeinterleavingFrames::releaseNext() {
+ fDescriptors[fNextIndexToRelease].frameDataSize = 0;
+ fNextIndexToRelease = (fNextIndexToRelease+1)%MAX_CYCLE_SIZE;
+}
+
+void DeinterleavingFrames::startNewCycle() {
+ fHaveEndedCycle = True;
+}
diff --git a/liveMedia/MP3AudioFileServerMediaSubsession.cpp b/liveMedia/MP3AudioFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..cc53ae1
--- /dev/null
+++ b/liveMedia/MP3AudioFileServerMediaSubsession.cpp
@@ -0,0 +1,179 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a MP3 audio file.
+// (Actually, any MPEG-1 or MPEG-2 audio file should work.)
+// Implementation
+
+#include "MP3AudioFileServerMediaSubsession.hh"
+#include "MPEG1or2AudioRTPSink.hh"
+#include "MP3ADURTPSink.hh"
+#include "MP3FileSource.hh"
+#include "MP3ADU.hh"
+
+MP3AudioFileServerMediaSubsession* MP3AudioFileServerMediaSubsession
+::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource,
+ Boolean generateADUs, Interleaving* interleaving) {
+ return new MP3AudioFileServerMediaSubsession(env, fileName, reuseFirstSource,
+ generateADUs, interleaving);
+}
+
+MP3AudioFileServerMediaSubsession
+::MP3AudioFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource,
+ Boolean generateADUs,
+ Interleaving* interleaving)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource),
+ fGenerateADUs(generateADUs), fInterleaving(interleaving), fFileDuration(0.0) {
+}
+
+MP3AudioFileServerMediaSubsession
+::~MP3AudioFileServerMediaSubsession() {
+ delete fInterleaving;
+}
+
+FramedSource* MP3AudioFileServerMediaSubsession
+::createNewStreamSourceCommon(FramedSource* baseMP3Source, unsigned mp3NumBytes, unsigned& estBitrate) {
+ FramedSource* streamSource;
+ do {
+ streamSource = baseMP3Source; // by default
+ if (streamSource == NULL) break;
+
+ // Use the MP3 file size, plus the duration, to estimate the stream's bitrate:
+ if (mp3NumBytes > 0 && fFileDuration > 0.0) {
+ estBitrate = (unsigned)(mp3NumBytes/(125*fFileDuration) + 0.5); // kbps, rounded
+ } else {
+ estBitrate = 128; // kbps, estimate
+ }
+
+ if (fGenerateADUs) {
+ // Add a filter that converts the source MP3s to ADUs:
+ streamSource = ADUFromMP3Source::createNew(envir(), streamSource);
+ if (streamSource == NULL) break;
+
+ if (fInterleaving != NULL) {
+ // Add another filter that interleaves the ADUs before packetizing:
+ streamSource = MP3ADUinterleaver::createNew(envir(), *fInterleaving,
+ streamSource);
+ if (streamSource == NULL) break;
+ }
+ } else if (fFileDuration > 0.0) {
+ // Because this is a seekable file, insert a pair of filters: one that
+ // converts the input MP3 stream to ADUs; another that converts these
+ // ADUs back to MP3. This allows us to seek within the input stream without
+ // tripping over the MP3 'bit reservoir':
+ streamSource = ADUFromMP3Source::createNew(envir(), streamSource);
+ if (streamSource == NULL) break;
+
+ streamSource = MP3FromADUSource::createNew(envir(), streamSource);
+ if (streamSource == NULL) break;
+ }
+ } while (0);
+
+ return streamSource;
+}
+
+void MP3AudioFileServerMediaSubsession::getBaseStreams(FramedSource* frontStream,
+ FramedSource*& sourceMP3Stream, ADUFromMP3Source*& aduStream/*if any*/) {
+ if (fGenerateADUs) {
+ // There's an ADU stream.
+ if (fInterleaving != NULL) {
+ // There's an interleaving filter in front of the ADU stream. So go back one, to reach the ADU stream:
+ aduStream = (ADUFromMP3Source*)(((FramedFilter*)frontStream)->inputSource());
+ } else {
+ aduStream = (ADUFromMP3Source*)frontStream;
+ }
+
+ // Then, go back one more, to reach the MP3 source:
+ sourceMP3Stream = (MP3FileSource*)(aduStream->inputSource());
+ } else if (fFileDuration > 0.0) {
+ // There are a pair of filters - MP3->ADU and ADU->MP3 - in front of the
+ // original MP3 source. So, go back one, to reach the ADU source:
+ aduStream = (ADUFromMP3Source*)(((FramedFilter*)frontStream)->inputSource());
+
+ // Then, go back one more, to reach the MP3 source:
+ sourceMP3Stream = (MP3FileSource*)(aduStream->inputSource());
+ } else {
+ // There's no filter in front of the source MP3 stream (and there's no ADU stream):
+ aduStream = NULL;
+ sourceMP3Stream = frontStream;
+ }
+}
+
+
+void MP3AudioFileServerMediaSubsession
+::seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& /*numBytes*/) {
+ FramedSource* sourceMP3Stream;
+ ADUFromMP3Source* aduStream;
+ getBaseStreams(inputSource, sourceMP3Stream, aduStream);
+
+ if (aduStream != NULL) aduStream->resetInput(); // because we're about to seek within its source
+ ((MP3FileSource*)sourceMP3Stream)->seekWithinFile(seekNPT, streamDuration);
+}
+
+void MP3AudioFileServerMediaSubsession
+::setStreamSourceScale(FramedSource* inputSource, float scale) {
+
+ FramedSource* sourceMP3Stream;
+ ADUFromMP3Source* aduStream;
+ getBaseStreams(inputSource, sourceMP3Stream, aduStream);
+
+ if (aduStream == NULL) return; // because, in this case, the stream's not scalable
+
+ int iScale = (int)scale;
+ aduStream->setScaleFactor(iScale);
+ ((MP3FileSource*)sourceMP3Stream)->setPresentationTimeScale(iScale);
+}
+
+FramedSource* MP3AudioFileServerMediaSubsession
+::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
+ MP3FileSource* mp3Source = MP3FileSource::createNew(envir(), fFileName);
+ if (mp3Source == NULL) return NULL;
+ fFileDuration = mp3Source->filePlayTime();
+
+ return createNewStreamSourceCommon(mp3Source, mp3Source->fileSize(), estBitrate);
+}
+
+RTPSink* MP3AudioFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* /*inputSource*/) {
+ if (fGenerateADUs) {
+ return MP3ADURTPSink::createNew(envir(), rtpGroupsock,
+ rtpPayloadTypeIfDynamic);
+ } else {
+ return MPEG1or2AudioRTPSink::createNew(envir(), rtpGroupsock);
+ }
+}
+
+void MP3AudioFileServerMediaSubsession::testScaleFactor(float& scale) {
+ if (fFileDuration <= 0.0) {
+ // The file is non-seekable, so is probably a live input source.
+ // We don't support scale factors other than 1
+ scale = 1;
+ } else {
+ // We support any integral scale >= 1
+ int iScale = (int)(scale + 0.5); // round
+ if (iScale < 1) iScale = 1;
+ scale = (float)iScale;
+ }
+}
+
+float MP3AudioFileServerMediaSubsession::duration() const {
+ return fFileDuration;
+}
diff --git a/liveMedia/MP3AudioMatroskaFileServerMediaSubsession.cpp b/liveMedia/MP3AudioMatroskaFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..b71d1e4
--- /dev/null
+++ b/liveMedia/MP3AudioMatroskaFileServerMediaSubsession.cpp
@@ -0,0 +1,58 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an MP3 audio track within a Matroska file.
+// (Actually, MPEG-1 or MPEG-2 audio file should also work.)
+// Implementation
+
+#include "FileServerMediaSubsession.hh"
+#include "MP3AudioMatroskaFileServerMediaSubsession.hh"
+#include "MatroskaDemuxedTrack.hh"
+
+MP3AudioMatroskaFileServerMediaSubsession* MP3AudioMatroskaFileServerMediaSubsession
+::createNew(MatroskaFileServerDemux& demux, MatroskaTrack* track,
+ Boolean generateADUs, Interleaving* interleaving) {
+ return new MP3AudioMatroskaFileServerMediaSubsession(demux, track, generateADUs, interleaving);
+}
+
+MP3AudioMatroskaFileServerMediaSubsession
+::MP3AudioMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, MatroskaTrack* track,
+ Boolean generateADUs, Interleaving* interleaving)
+ : MP3AudioFileServerMediaSubsession(demux.envir(), demux.fileName(), False, generateADUs, interleaving),
+ fOurDemux(demux), fTrackNumber(track->trackNumber) {
+ fFileDuration = fOurDemux.fileDuration();
+}
+
+MP3AudioMatroskaFileServerMediaSubsession::~MP3AudioMatroskaFileServerMediaSubsession() {
+}
+
+void MP3AudioMatroskaFileServerMediaSubsession
+::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) {
+ FramedSource* sourceMP3Stream;
+ ADUFromMP3Source* aduStream;
+ getBaseStreams(inputSource, sourceMP3Stream, aduStream);
+
+ if (aduStream != NULL) aduStream->resetInput(); // because we're about to seek within its source
+ ((MatroskaDemuxedTrack*)sourceMP3Stream)->seekToTime(seekNPT);
+}
+
+FramedSource* MP3AudioMatroskaFileServerMediaSubsession
+::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) {
+ FramedSource* baseMP3Source = fOurDemux.newDemuxedTrack(clientSessionId, fTrackNumber);
+ return createNewStreamSourceCommon(baseMP3Source, 0, estBitrate);
+}
diff --git a/liveMedia/MP3AudioMatroskaFileServerMediaSubsession.hh b/liveMedia/MP3AudioMatroskaFileServerMediaSubsession.hh
new file mode 100644
index 0000000..67cb663
--- /dev/null
+++ b/liveMedia/MP3AudioMatroskaFileServerMediaSubsession.hh
@@ -0,0 +1,57 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an MP3 audio track within a Matroska file.
+// (Actually, MPEG-1 or MPEG-2 audio should also work.)
+// C++ header
+
+#ifndef _MP3_AUDIO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _MP3_AUDIO_MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _MP3_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "MP3AudioFileServerMediaSubsession.hh"
+#endif
+#ifndef _MATROSKA_FILE_SERVER_DEMUX_HH
+#include "MatroskaFileServerDemux.hh"
+#endif
+
+class MP3AudioMatroskaFileServerMediaSubsession: public MP3AudioFileServerMediaSubsession {
+public:
+ static MP3AudioMatroskaFileServerMediaSubsession*
+ createNew(MatroskaFileServerDemux& demux, MatroskaTrack* track,
+ Boolean generateADUs = False, Interleaving* interleaving = NULL);
+ // Note: "interleaving" is used only if "generateADUs" is True,
+ // (and a value of NULL means 'no interleaving')
+
+private:
+ MP3AudioMatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, MatroskaTrack* track,
+ Boolean generateADUs, Interleaving* interleaving);
+ // called only by createNew();
+ virtual ~MP3AudioMatroskaFileServerMediaSubsession();
+
+private: // redefined virtual functions
+ virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes);
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+
+private:
+ MatroskaFileServerDemux& fOurDemux;
+ unsigned fTrackNumber;
+};
+
+#endif
diff --git a/liveMedia/MP3FileSource.cpp b/liveMedia/MP3FileSource.cpp
new file mode 100644
index 0000000..bb622d4
--- /dev/null
+++ b/liveMedia/MP3FileSource.cpp
@@ -0,0 +1,178 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MP3 File Sources
+// Implementation
+
+#include "MP3FileSource.hh"
+#include "MP3StreamState.hh"
+#include "InputFile.hh"
+
+////////// MP3FileSource //////////
+
+MP3FileSource::MP3FileSource(UsageEnvironment& env, FILE* fid)
+ : FramedFileSource(env, fid),
+ fStreamState(new MP3StreamState(env)) {
+}
+
+MP3FileSource::~MP3FileSource() {
+ delete fStreamState;
+}
+
+char const* MP3FileSource::MIMEtype() const {
+ return "audio/MPEG";
+}
+
+MP3FileSource* MP3FileSource::createNew(UsageEnvironment& env, char const* fileName) {
+ MP3FileSource* newSource = NULL;
+
+ do {
+ FILE* fid;
+
+ fid = OpenInputFile(env, fileName);
+ if (fid == NULL) break;
+
+ newSource = new MP3FileSource(env, fid);
+ if (newSource == NULL) break;
+
+ unsigned fileSize = (unsigned)GetFileSize(fileName, fid);
+ newSource->assignStream(fid, fileSize);
+ if (!newSource->initializeStream()) break;
+
+ return newSource;
+ } while (0);
+
+ Medium::close(newSource);
+ return NULL;
+}
+
+float MP3FileSource::filePlayTime() const {
+ return fStreamState->filePlayTime();
+}
+
+unsigned MP3FileSource::fileSize() const {
+ return fStreamState->fileSize();
+}
+
+void MP3FileSource::setPresentationTimeScale(unsigned scale) {
+ fStreamState->setPresentationTimeScale(scale);
+}
+
+void MP3FileSource::seekWithinFile(double seekNPT, double streamDuration) {
+ float fileDuration = filePlayTime();
+
+ // First, make sure that 0.0 <= seekNPT <= seekNPT + streamDuration <= fileDuration
+ if (seekNPT < 0.0) {
+ seekNPT = 0.0;
+ } else if (seekNPT > fileDuration) {
+ seekNPT = fileDuration;
+ }
+ if (streamDuration < 0.0) {
+ streamDuration = 0.0;
+ } else if (seekNPT + streamDuration > fileDuration) {
+ streamDuration = fileDuration - seekNPT;
+ }
+
+ float seekFraction = (float)seekNPT/fileDuration;
+ unsigned seekByteNumber = fStreamState->getByteNumberFromPositionFraction(seekFraction);
+ fStreamState->seekWithinFile(seekByteNumber);
+
+ fLimitNumBytesToStream = False; // by default
+ if (streamDuration > 0.0) {
+ float endFraction = (float)(seekNPT + streamDuration)/fileDuration;
+ unsigned endByteNumber = fStreamState->getByteNumberFromPositionFraction(endFraction);
+ if (endByteNumber > seekByteNumber) { // sanity check
+ fNumBytesToStream = endByteNumber - seekByteNumber;
+ fLimitNumBytesToStream = True;
+ }
+ }
+}
+
+void MP3FileSource::getAttributes() const {
+ char buffer[200];
+ fStreamState->getAttributes(buffer, sizeof buffer);
+ envir().setResultMsg(buffer);
+}
+
+void MP3FileSource::doGetNextFrame() {
+ if (!doGetNextFrame1()) {
+ handleClosure();
+ return;
+ }
+
+ // Switch to another task:
+#if defined(__WIN32__) || defined(_WIN32)
+ // HACK: liveCaster/lc uses an implementation of scheduleDelayedTask()
+ // that performs very badly (chewing up lots of CPU time, apparently polling)
+ // on Windows. Until this is fixed, we just call our "afterGetting()"
+ // function directly. This avoids infinite recursion, as long as our sink
+ // is discontinuous, which is the case for the RTP sink that liveCaster/lc
+ // uses. #####
+ afterGetting(this);
+#else
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
+ (TaskFunc*)afterGetting, this);
+#endif
+}
+
+Boolean MP3FileSource::doGetNextFrame1() {
+ if (fLimitNumBytesToStream && fNumBytesToStream == 0) return False; // we've already streamed as much as we were asked for
+
+ if (!fHaveJustInitialized) {
+ if (fStreamState->findNextHeader(fPresentationTime) == 0) return False;
+ } else {
+ fPresentationTime = fFirstFramePresentationTime;
+ fHaveJustInitialized = False;
+ }
+
+ if (!fStreamState->readFrame(fTo, fMaxSize, fFrameSize, fDurationInMicroseconds)) {
+ char tmp[200];
+ sprintf(tmp,
+ "Insufficient buffer size %d for reading MPEG audio frame (needed %d)\n",
+ fMaxSize, fFrameSize);
+ envir().setResultMsg(tmp);
+ fFrameSize = fMaxSize;
+ return False;
+ }
+ if (fNumBytesToStream > fFrameSize) fNumBytesToStream -= fFrameSize; else fNumBytesToStream = 0;
+
+ return True;
+}
+
+void MP3FileSource::assignStream(FILE* fid, unsigned fileSize) {
+ fStreamState->assignStream(fid, fileSize);
+}
+
+
+Boolean MP3FileSource::initializeStream() {
+ // Make sure the file has an appropriate header near the start:
+ if (fStreamState->findNextHeader(fFirstFramePresentationTime) == 0) {
+ envir().setResultMsg("not an MPEG audio file");
+ return False;
+ }
+
+ fStreamState->checkForXingHeader(); // in case this is a VBR file
+
+ fHaveJustInitialized = True;
+ fLimitNumBytesToStream = False;
+ fNumBytesToStream = 0;
+
+ // Hack: It's possible that our environment's 'result message' has been
+ // reset within this function, so set it again to our name now:
+ envir().setResultMsg(name());
+ return True;
+}
diff --git a/liveMedia/MP3Internals.cpp b/liveMedia/MP3Internals.cpp
new file mode 100644
index 0000000..3481b7c
--- /dev/null
+++ b/liveMedia/MP3Internals.cpp
@@ -0,0 +1,808 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MP3 internal implementation details
+// Implementation
+
+#include "MP3InternalsHuffman.hh"
+
+#include <stdlib.h>
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+
+// This is crufty old code that needs to be cleaned up #####
+
+static unsigned const live_tabsel[2][3][16] = {
+ { {32,32,64,96,128,160,192,224,256,288,320,352,384,416,448,448},
+ {32,32,48,56, 64, 80, 96,112,128,160,192,224,256,320,384,384},
+ {32,32,40,48, 56, 64, 80, 96,112,128,160,192,224,256,320,320} },
+
+ { {32,32,48,56,64,80,96,112,128,144,160,176,192,224,256,256},
+ {8,8,16,24,32,40,48,56,64,80,96,112,128,144,160,160},
+ {8,8,16,24,32,40,48,56,64,80,96,112,128,144,160,160} }
+};
+/* Note: live_tabsel[*][*][0 or 15] shouldn't occur; use dummy values there */
+
+static long const live_freqs[]
+= { 44100, 48000, 32000, 22050, 24000, 16000, 11025, 12000, 8000, 0 };
+
+struct bandInfoStruct {
+ int longIdx[23];
+ int longDiff[22];
+ int shortIdx[14];
+ int shortDiff[13];
+};
+
+static struct bandInfoStruct const bandInfo[7] = {
+/* MPEG 1.0 */
+ { {0,4,8,12,16,20,24,30,36,44,52,62,74, 90,110,134,162,196,238,288,342,418,576},
+ {4,4,4,4,4,4,6,6,8, 8,10,12,16,20,24,28,34,42,50,54, 76,158},
+ {0,4*3,8*3,12*3,16*3,22*3,30*3,40*3,52*3,66*3, 84*3,106*3,136*3,192*3},
+ {4,4,4,4,6,8,10,12,14,18,22,30,56} } ,
+
+ { {0,4,8,12,16,20,24,30,36,42,50,60,72, 88,106,128,156,190,230,276,330,384,576},
+ {4,4,4,4,4,4,6,6,6, 8,10,12,16,18,22,28,34,40,46,54, 54,192},
+ {0,4*3,8*3,12*3,16*3,22*3,28*3,38*3,50*3,64*3, 80*3,100*3,126*3,192*3},
+ {4,4,4,4,6,6,10,12,14,16,20,26,66} } ,
+
+ { {0,4,8,12,16,20,24,30,36,44,54,66,82,102,126,156,194,240,296,364,448,550,576} ,
+ {4,4,4,4,4,4,6,6,8,10,12,16,20,24,30,38,46,56,68,84,102, 26} ,
+ {0,4*3,8*3,12*3,16*3,22*3,30*3,42*3,58*3,78*3,104*3,138*3,180*3,192*3} ,
+ {4,4,4,4,6,8,12,16,20,26,34,42,12} } ,
+
+/* MPEG 2.0 */
+ { {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576},
+ {6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54 } ,
+ {0,4*3,8*3,12*3,18*3,24*3,32*3,42*3,56*3,74*3,100*3,132*3,174*3,192*3} ,
+ {4,4,4,6,6,8,10,14,18,26,32,42,18 } } ,
+
+ { {0,6,12,18,24,30,36,44,54,66,80,96,114,136,162,194,232,278,330,394,464,540,576},
+ {6,6,6,6,6,6,8,10,12,14,16,18,22,26,32,38,46,52,64,70,76,36 } ,
+ {0,4*3,8*3,12*3,18*3,26*3,36*3,48*3,62*3,80*3,104*3,136*3,180*3,192*3} ,
+ {4,4,4,6,8,10,12,14,18,24,32,44,12 } } ,
+
+ { {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576},
+ {6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54 },
+ {0,4*3,8*3,12*3,18*3,26*3,36*3,48*3,62*3,80*3,104*3,134*3,174*3,192*3},
+ {4,4,4,6,8,10,12,14,18,24,30,40,18 } } ,
+
+/* MPEG 2.5, wrong! table (it's just a copy of MPEG 2.0/44.1kHz) */
+ { {0,6,12,18,24,30,36,44,54,66,80,96,116,140,168,200,238,284,336,396,464,522,576},
+ {6,6,6,6,6,6,8,10,12,14,16,20,24,28,32,38,46,52,60,68,58,54 } ,
+ {0,4*3,8*3,12*3,18*3,24*3,32*3,42*3,56*3,74*3,100*3,132*3,174*3,192*3} ,
+ {4,4,4,6,6,8,10,14,18,26,32,42,18 } } ,
+};
+
+unsigned int n_slen2[512]; /* MPEG 2.0 slen for 'normal' mode */
+unsigned int i_slen2[256]; /* MPEG 2.0 slen for intensity stereo */
+
+#define MPG_MD_MONO 3
+
+
+////////// MP3FrameParams //////////
+
+MP3FrameParams::MP3FrameParams()
+ : bv(frameBytes, 0, sizeof frameBytes) /* by default */ {
+ oldHdr = firstHdr = 0;
+
+ static Boolean doneInit = False;
+ if (doneInit) return;
+
+ int i,j,k,l;
+
+ for (i=0;i<5;i++) {
+ for (j=0;j<6;j++) {
+ for (k=0;k<6;k++) {
+ int n = k + j * 6 + i * 36;
+ i_slen2[n] = i|(j<<3)|(k<<6)|(3<<12);
+ }
+ }
+ }
+ for (i=0;i<4;i++) {
+ for (j=0;j<4;j++) {
+ for (k=0;k<4;k++) {
+ int n = k + j * 4 + i * 16;
+ i_slen2[n+180] = i|(j<<3)|(k<<6)|(4<<12);
+ }
+ }
+ }
+ for (i=0;i<4;i++) {
+ for (j=0;j<3;j++) {
+ int n = j + i * 3;
+ i_slen2[n+244] = i|(j<<3) | (5<<12);
+ n_slen2[n+500] = i|(j<<3) | (2<<12) | (1<<15);
+ }
+ }
+
+ for (i=0;i<5;i++) {
+ for (j=0;j<5;j++) {
+ for (k=0;k<4;k++) {
+ for (l=0;l<4;l++) {
+ int n = l + k * 4 + j * 16 + i * 80;
+ n_slen2[n] = i|(j<<3)|(k<<6)|(l<<9)|(0<<12);
+ }
+ }
+ }
+ }
+ for (i=0;i<5;i++) {
+ for (j=0;j<5;j++) {
+ for (k=0;k<4;k++) {
+ int n = k + j * 4 + i * 20;
+ n_slen2[n+400] = i|(j<<3)|(k<<6)|(1<<12);
+ }
+ }
+ }
+ doneInit = True;
+}
+
+MP3FrameParams::~MP3FrameParams() {
+}
+
+void MP3FrameParams::setParamsFromHeader() {
+ if (hdr & (1<<20)) {
+ isMPEG2 = (hdr & (1<<19)) ? 0x0 : 0x1;
+ isMPEG2_5 = 0;
+ }
+ else {
+ isMPEG2 = 1;
+ isMPEG2_5 = 1;
+ }
+
+ layer = 4-((hdr>>17)&3);
+ if (layer == 4) layer = 3; // layer==4 is not allowed
+ bitrateIndex = ((hdr>>12)&0xf);
+
+ if (isMPEG2_5) {
+ samplingFreqIndex = ((hdr>>10)&0x3) + 6;
+ } else {
+ samplingFreqIndex = ((hdr>>10)&0x3) + (isMPEG2*3);
+ }
+
+ hasCRC = (hdr & 0x10000) == 0;
+
+ padding = ((hdr>>9)&0x1);
+ extension = ((hdr>>8)&0x1);
+ mode = ((hdr>>6)&0x3);
+ mode_ext = ((hdr>>4)&0x3);
+ copyright = ((hdr>>3)&0x1);
+ original = ((hdr>>2)&0x1);
+ emphasis = hdr & 0x3;
+
+ stereo = (mode == MPG_MD_MONO) ? 1 : 2;
+
+ if (((hdr>>10)&0x3) == 0x3) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr,"Stream error - hdr: 0x%08x\n", hdr);
+#endif
+ }
+
+ bitrate = live_tabsel[isMPEG2][layer-1][bitrateIndex];
+ samplingFreq = live_freqs[samplingFreqIndex];
+ isStereo = (stereo > 1);
+ isFreeFormat = (bitrateIndex == 0);
+ frameSize
+ = ComputeFrameSize(bitrate, samplingFreq, padding, isMPEG2, layer);
+ sideInfoSize = computeSideInfoSize();
+ }
+
+unsigned MP3FrameParams::computeSideInfoSize() {
+ unsigned size;
+
+ if (isMPEG2) {
+ size = isStereo ? 17 : 9;
+ } else {
+ size = isStereo ? 32 : 17;
+ }
+
+ if (hasCRC) {
+ size += 2;
+ }
+
+ return size;
+}
+
+unsigned ComputeFrameSize(unsigned bitrate, unsigned samplingFreq,
+ Boolean usePadding, Boolean isMPEG2,
+ unsigned char layer) {
+ if (samplingFreq == 0) return 0;
+ unsigned const bitrateMultiplier = (layer == 1) ? 12000*4 : 144000;
+ unsigned framesize;
+
+ framesize = bitrate*bitrateMultiplier;
+ framesize /= samplingFreq<<(isMPEG2 ? 1 : 0);
+ framesize = framesize + usePadding - 4;
+
+ return framesize;
+}
+
+#define TRUNC_FAIRLY
+static unsigned updateSideInfoSizes(MP3SideInfo& sideInfo, Boolean isMPEG2,
+ unsigned char const* mainDataPtr,
+ unsigned allowedNumBits,
+ unsigned& part23Length0a,
+ unsigned& part23Length0aTruncation,
+ unsigned& part23Length0b,
+ unsigned& part23Length0bTruncation,
+ unsigned& part23Length1a,
+ unsigned& part23Length1aTruncation,
+ unsigned& part23Length1b,
+ unsigned& part23Length1bTruncation) {
+ unsigned p23L0, p23L1 = 0, p23L0Trunc = 0, p23L1Trunc = 0;
+
+ p23L0 = sideInfo.ch[0].gr[0].part2_3_length;
+ p23L1 = isMPEG2 ? 0 : sideInfo.ch[0].gr[1].part2_3_length;
+#ifdef TRUNC_ONLY0
+ if (p23L0 < allowedNumBits)
+ allowedNumBits = p23L0;
+#endif
+#ifdef TRUNC_ONLY1
+ if (p23L1 < allowedNumBits)
+ allowedNumBits = p23L1;
+#endif
+ if (p23L0 + p23L1 > allowedNumBits) {
+ /* We need to shorten one or both fields */
+ unsigned truncation = p23L0 + p23L1 - allowedNumBits;
+#ifdef TRUNC_FAIRLY
+ p23L0Trunc = (truncation*p23L0)/(p23L0 + p23L1);
+ p23L1Trunc = truncation - p23L0Trunc;
+#endif
+#if defined(TRUNC_FAVOR0) || defined(TRUNC_ONLY0)
+ p23L1Trunc = (truncation>p23L1) ? p23L1 : truncation;
+ p23L0Trunc = truncation - p23L1Trunc;
+#endif
+#if defined(TRUNC_FAVOR1) || defined(TRUNC_ONLY1)
+ p23L0Trunc = (truncation>p23L0) ? p23L0 : truncation;
+ p23L1Trunc = truncation - p23L0Trunc;
+#endif
+ }
+
+ /* ASSERT: (p23L0Trunc <= p23L0) && (p23l1Trunc <= p23L1) */
+ p23L0 -= p23L0Trunc; p23L1 -= p23L1Trunc;
+#ifdef DEBUG
+ fprintf(stderr, "updateSideInfoSizes (allowed: %d): %d->%d, %d->%d\n", allowedNumBits, p23L0+p23L0Trunc, p23L0, p23L1+p23L1Trunc, p23L1);
+#endif
+
+ // The truncations computed above are still estimates. We need to
+ // adjust them so that the new fields will continue to end on
+ // Huffman-encoded sample boundaries:
+ updateSideInfoForHuffman(sideInfo, isMPEG2, mainDataPtr,
+ p23L0, p23L1,
+ part23Length0a, part23Length0aTruncation,
+ part23Length0b, part23Length0bTruncation,
+ part23Length1a, part23Length1aTruncation,
+ part23Length1b, part23Length1bTruncation);
+ p23L0 = part23Length0a + part23Length0b;
+ p23L1 = part23Length1a + part23Length1b;
+
+ sideInfo.ch[0].gr[0].part2_3_length = p23L0;
+ sideInfo.ch[0].gr[1].part2_3_length = p23L1;
+ part23Length0bTruncation
+ += sideInfo.ch[1].gr[0].part2_3_length; /* allow for stereo */
+ sideInfo.ch[1].gr[0].part2_3_length = 0; /* output mono */
+ sideInfo.ch[1].gr[1].part2_3_length = 0; /* output mono */
+
+ return p23L0 + p23L1;
+}
+
+
+Boolean GetADUInfoFromMP3Frame(unsigned char const* framePtr,
+ unsigned totFrameSize,
+ unsigned& hdr, unsigned& frameSize,
+ MP3SideInfo& sideInfo, unsigned& sideInfoSize,
+ unsigned& backpointer, unsigned& aduSize) {
+ if (totFrameSize < 4) return False; // there's not enough data
+
+ MP3FrameParams fr;
+ fr.hdr = ((unsigned)framePtr[0] << 24) | ((unsigned)framePtr[1] << 16)
+ | ((unsigned)framePtr[2] << 8) | (unsigned)framePtr[3];
+ fr.setParamsFromHeader();
+ fr.setBytePointer(framePtr + 4, totFrameSize - 4); // skip hdr
+
+ frameSize = 4 + fr.frameSize;
+
+ if (fr.layer != 3) {
+ // Special case for non-layer III frames
+ backpointer = 0;
+ sideInfoSize = 0;
+ aduSize = fr.frameSize;
+ return True;
+ }
+
+ sideInfoSize = fr.sideInfoSize;
+ if (totFrameSize < 4 + sideInfoSize) return False; // not enough data
+
+ fr.getSideInfo(sideInfo);
+
+ hdr = fr.hdr;
+ backpointer = sideInfo.main_data_begin;
+ unsigned numBits = sideInfo.ch[0].gr[0].part2_3_length;
+ numBits += sideInfo.ch[0].gr[1].part2_3_length;
+ numBits += sideInfo.ch[1].gr[0].part2_3_length;
+ numBits += sideInfo.ch[1].gr[1].part2_3_length;
+ aduSize = (numBits+7)/8;
+#ifdef DEBUG
+ fprintf(stderr, "mp3GetADUInfoFromFrame: hdr: %08x, frameSize: %d, part2_3_lengths: %d,%d,%d,%d, aduSize: %d, backpointer: %d\n", hdr, frameSize, sideInfo.ch[0].gr[0].part2_3_length, sideInfo.ch[0].gr[1].part2_3_length, sideInfo.ch[1].gr[0].part2_3_length, sideInfo.ch[1].gr[1].part2_3_length, aduSize, backpointer);
+#endif
+
+ return True;
+}
+
+
+static void getSideInfo1(MP3FrameParams& fr, MP3SideInfo& si,
+ int stereo, int ms_stereo, long sfreq,
+ int /*single*/) {
+ int ch, gr;
+#if 0
+ int powdiff = (single == 3) ? 4 : 0;
+#endif
+
+ /* initialize all four "part2_3_length" fields to zero: */
+ si.ch[0].gr[0].part2_3_length = 0; si.ch[1].gr[0].part2_3_length = 0;
+ si.ch[0].gr[1].part2_3_length = 0; si.ch[1].gr[1].part2_3_length = 0;
+
+ si.main_data_begin = fr.getBits(9);
+ if (stereo == 1)
+ si.private_bits = fr.getBits(5);
+ else
+ si.private_bits = fr.getBits(3);
+
+ for (ch=0; ch<stereo; ch++) {
+ si.ch[ch].gr[0].scfsi = -1;
+ si.ch[ch].gr[1].scfsi = fr.getBits(4);
+ }
+
+ for (gr=0; gr<2; gr++) {
+ for (ch=0; ch<stereo; ch++) {
+ MP3SideInfo::gr_info_s_t& gr_info = si.ch[ch].gr[gr];
+
+ gr_info.part2_3_length = fr.getBits(12);
+ gr_info.big_values = fr.getBits(9);
+ gr_info.global_gain = fr.getBits(8);
+#if 0
+ gr_info.pow2gain = gainpow2+256 - gr_info.global_gain + powdiff;
+ if (ms_stereo) gr_info.pow2gain += 2;
+#endif
+ gr_info.scalefac_compress = fr.getBits(4);
+/* window-switching flag == 1 for block_Type != 0 .. and block-type == 0 -> win-sw-flag = 0 */
+ gr_info.window_switching_flag = fr.get1Bit();
+ if (gr_info.window_switching_flag) {
+ int i;
+ gr_info.block_type = fr.getBits(2);
+ gr_info.mixed_block_flag = fr.get1Bit();
+ gr_info.table_select[0] = fr.getBits(5);
+ gr_info.table_select[1] = fr.getBits(5);
+ /*
+ * table_select[2] not needed, because there is no region2,
+ * but to satisfy some verifications tools we set it either.
+ */
+ gr_info.table_select[2] = 0;
+ for (i=0;i<3;i++) {
+ gr_info.subblock_gain[i] = fr.getBits(3);
+ gr_info.full_gain[i]
+ = gr_info.pow2gain + ((gr_info.subblock_gain[i])<<3);
+ }
+
+#ifdef DEBUG_ERRORS
+ if (gr_info.block_type == 0) {
+ fprintf(stderr,"Blocktype == 0 and window-switching == 1 not allowed.\n");
+ }
+#endif
+ /* region_count/start parameters are implicit in this case. */
+ gr_info.region1start = 36>>1;
+ gr_info.region2start = 576>>1;
+ }
+ else
+ {
+ int i,r0c,r1c;
+ for (i=0; i<3; i++) {
+ gr_info.table_select[i] = fr.getBits(5);
+ }
+ r0c = gr_info.region0_count = fr.getBits(4);
+ r1c = gr_info.region1_count = fr.getBits(3);
+ gr_info.region1start = bandInfo[sfreq].longIdx[r0c+1] >> 1 ;
+ gr_info.region2start = bandInfo[sfreq].longIdx[r0c+1+r1c+1] >> 1;
+ gr_info.block_type = 0;
+ gr_info.mixed_block_flag = 0;
+ }
+ gr_info.preflag = fr.get1Bit();
+ gr_info.scalefac_scale = fr.get1Bit();
+ gr_info.count1table_select = fr.get1Bit();
+ }
+ }
+}
+
+static void getSideInfo2(MP3FrameParams& fr, MP3SideInfo& si,
+ int stereo, int ms_stereo, long sfreq,
+ int /*single*/) {
+ int ch;
+#if 0
+ int powdiff = (single == 3) ? 4 : 0;
+#endif
+
+ /* initialize all four "part2_3_length" fields to zero: */
+ si.ch[0].gr[0].part2_3_length = 0; si.ch[1].gr[0].part2_3_length = 0;
+ si.ch[0].gr[1].part2_3_length = 0; si.ch[1].gr[1].part2_3_length = 0;
+
+ si.main_data_begin = fr.getBits(8);
+ if (stereo == 1)
+ si.private_bits = fr.get1Bit();
+ else
+ si.private_bits = fr.getBits(2);
+
+ for (ch=0; ch<stereo; ch++) {
+ MP3SideInfo::gr_info_s_t& gr_info = si.ch[ch].gr[0];
+
+ gr_info.part2_3_length = fr.getBits(12);
+ si.ch[ch].gr[1].part2_3_length = 0; /* to ensure granule 1 unused */
+
+ gr_info.big_values = fr.getBits(9);
+ gr_info.global_gain = fr.getBits(8);
+#if 0
+ gr_info.pow2gain = gainpow2+256 - gr_info.global_gain + powdiff;
+ if (ms_stereo) gr_info.pow2gain += 2;
+#endif
+ gr_info.scalefac_compress = fr.getBits(9);
+/* window-switching flag == 1 for block_Type != 0 .. and block-type == 0 -> win-sw-flag = 0 */
+ gr_info.window_switching_flag = fr.get1Bit();
+ if (gr_info.window_switching_flag) {
+ int i;
+ gr_info.block_type = fr.getBits(2);
+ gr_info.mixed_block_flag = fr.get1Bit();
+ gr_info.table_select[0] = fr.getBits(5);
+ gr_info.table_select[1] = fr.getBits(5);
+ /*
+ * table_select[2] not needed, because there is no region2,
+ * but to satisfy some verifications tools we set it either.
+ */
+ gr_info.table_select[2] = 0;
+ for (i=0;i<3;i++) {
+ gr_info.subblock_gain[i] = fr.getBits(3);
+ gr_info.full_gain[i]
+ = gr_info.pow2gain + ((gr_info.subblock_gain[i])<<3);
+ }
+
+#ifdef DEBUG_ERRORS
+ if (gr_info.block_type == 0) {
+ fprintf(stderr,"Blocktype == 0 and window-switching == 1 not allowed.\n");
+ }
+#endif
+ /* region_count/start parameters are implicit in this case. */
+/* check this again! */
+ if (gr_info.block_type == 2)
+ gr_info.region1start = 36>>1;
+ else {
+ gr_info.region1start = 54>>1;
+ }
+ gr_info.region2start = 576>>1;
+ }
+ else
+ {
+ int i,r0c,r1c;
+ for (i=0; i<3; i++) {
+ gr_info.table_select[i] = fr.getBits(5);
+ }
+ r0c = gr_info.region0_count = fr.getBits(4);
+ r1c = gr_info.region1_count = fr.getBits(3);
+ gr_info.region1start = bandInfo[sfreq].longIdx[r0c+1] >> 1 ;
+ gr_info.region2start = bandInfo[sfreq].longIdx[r0c+1+r1c+1] >> 1;
+ gr_info.block_type = 0;
+ gr_info.mixed_block_flag = 0;
+ }
+ gr_info.scalefac_scale = fr.get1Bit();
+ gr_info.count1table_select = fr.get1Bit();
+ }
+}
+
+
+#define MPG_MD_JOINT_STEREO 1
+
+void MP3FrameParams::getSideInfo(MP3SideInfo& si) {
+ // First skip over the CRC if present:
+ if (hasCRC) getBits(16);
+
+ int single = -1;
+ int ms_stereo;
+ int sfreq = samplingFreqIndex;
+
+ if (stereo == 1) {
+ single = 0;
+ }
+
+ ms_stereo = (mode == MPG_MD_JOINT_STEREO) && (mode_ext & 0x2);
+
+ if (isMPEG2) {
+ getSideInfo2(*this, si, stereo, ms_stereo, sfreq, single);
+ } else {
+ getSideInfo1(*this, si, stereo, ms_stereo, sfreq, single);
+ }
+}
+
+static void putSideInfo1(BitVector& bv,
+ MP3SideInfo const& si, Boolean isStereo) {
+ int ch, gr, i;
+ int stereo = isStereo ? 2 : 1;
+
+ bv.putBits(si.main_data_begin,9);
+ if (stereo == 1)
+ bv.putBits(si.private_bits, 5);
+ else
+ bv.putBits(si.private_bits, 3);
+
+ for (ch=0; ch<stereo; ch++) {
+ bv.putBits(si.ch[ch].gr[1].scfsi, 4);
+ }
+
+ for (gr=0; gr<2; gr++) {
+ for (ch=0; ch<stereo; ch++) {
+ MP3SideInfo::gr_info_s_t const& gr_info = si.ch[ch].gr[gr];
+
+ bv.putBits(gr_info.part2_3_length, 12);
+ bv.putBits(gr_info.big_values, 9);
+ bv.putBits(gr_info.global_gain, 8);
+ bv.putBits(gr_info.scalefac_compress, 4);
+ bv.put1Bit(gr_info.window_switching_flag);
+ if (gr_info.window_switching_flag) {
+ bv.putBits(gr_info.block_type, 2);
+ bv.put1Bit(gr_info.mixed_block_flag);
+ for (i=0; i<2; i++)
+ bv.putBits(gr_info.table_select[i], 5);
+ for (i=0; i<3; i++)
+ bv.putBits(gr_info.subblock_gain[i], 3);
+ }
+ else {
+ for (i=0; i<3; i++)
+ bv.putBits(gr_info.table_select[i], 5);
+ bv.putBits(gr_info.region0_count, 4);
+ bv.putBits(gr_info.region1_count, 3);
+ }
+
+ bv.put1Bit(gr_info.preflag);
+ bv.put1Bit(gr_info.scalefac_scale);
+ bv.put1Bit(gr_info.count1table_select);
+ }
+ }
+}
+
+static void putSideInfo2(BitVector& bv,
+ MP3SideInfo const& si, Boolean isStereo) {
+ int ch, i;
+ int stereo = isStereo ? 2 : 1;
+
+ bv.putBits(si.main_data_begin,8);
+ if (stereo == 1)
+ bv.put1Bit(si.private_bits);
+ else
+ bv.putBits(si.private_bits, 2);
+
+ for (ch=0; ch<stereo; ch++) {
+ MP3SideInfo::gr_info_s_t const& gr_info = si.ch[ch].gr[0];
+
+ bv.putBits(gr_info.part2_3_length, 12);
+ bv.putBits(gr_info.big_values, 9);
+ bv.putBits(gr_info.global_gain, 8);
+ bv.putBits(gr_info.scalefac_compress, 9);
+ bv.put1Bit(gr_info.window_switching_flag);
+ if (gr_info.window_switching_flag) {
+ bv.putBits(gr_info.block_type, 2);
+ bv.put1Bit(gr_info.mixed_block_flag);
+ for (i=0; i<2; i++)
+ bv.putBits(gr_info.table_select[i], 5);
+ for (i=0; i<3; i++)
+ bv.putBits(gr_info.subblock_gain[i], 3);
+ }
+ else {
+ for (i=0; i<3; i++)
+ bv.putBits(gr_info.table_select[i], 5);
+ bv.putBits(gr_info.region0_count, 4);
+ bv.putBits(gr_info.region1_count, 3);
+ }
+
+ bv.put1Bit(gr_info.scalefac_scale);
+ bv.put1Bit(gr_info.count1table_select);
+ }
+}
+
+static void PutMP3SideInfoIntoFrame(MP3SideInfo const& si,
+ MP3FrameParams const& fr,
+ unsigned char* framePtr) {
+ if (fr.hasCRC) framePtr += 2; // skip CRC
+
+ BitVector bv(framePtr, 0, 8*fr.sideInfoSize);
+
+ if (fr.isMPEG2) {
+ putSideInfo2(bv, si, fr.isStereo);
+ } else {
+ putSideInfo1(bv, si, fr.isStereo);
+ }
+}
+
+
+Boolean ZeroOutMP3SideInfo(unsigned char* framePtr, unsigned totFrameSize,
+ unsigned newBackpointer) {
+ if (totFrameSize < 4) return False; // there's not enough data
+
+ MP3FrameParams fr;
+ fr.hdr = ((unsigned)framePtr[0] << 24) | ((unsigned)framePtr[1] << 16)
+ | ((unsigned)framePtr[2] << 8) | (unsigned)framePtr[3];
+ fr.setParamsFromHeader();
+ fr.setBytePointer(framePtr + 4, totFrameSize - 4); // skip hdr
+
+ if (totFrameSize < 4 + fr.sideInfoSize) return False; // not enough data
+
+ MP3SideInfo si;
+ fr.getSideInfo(si);
+
+ si.main_data_begin = newBackpointer; /* backpointer */
+ /* set all four "part2_3_length" and "big_values" fields to zero: */
+ si.ch[0].gr[0].part2_3_length = si.ch[0].gr[0].big_values = 0;
+ si.ch[1].gr[0].part2_3_length = si.ch[1].gr[0].big_values = 0;
+ si.ch[0].gr[1].part2_3_length = si.ch[0].gr[1].big_values = 0;
+ si.ch[1].gr[1].part2_3_length = si.ch[1].gr[1].big_values = 0;
+
+ PutMP3SideInfoIntoFrame(si, fr, framePtr + 4);
+
+ return True;
+}
+
+
+static unsigned MP3BitrateToBitrateIndex(unsigned bitrate /* in kbps */,
+ Boolean isMPEG2) {
+ for (unsigned i = 1; i < 15; ++i) {
+ if (live_tabsel[isMPEG2][2][i] >= bitrate)
+ return i;
+ }
+
+ // "bitrate" was larger than any possible, so return the largest possible:
+ return 14;
+}
+
+static void outputHeader(unsigned char* toPtr, unsigned hdr) {
+ toPtr[0] = (unsigned char)(hdr>>24);
+ toPtr[1] = (unsigned char)(hdr>>16);
+ toPtr[2] = (unsigned char)(hdr>>8);
+ toPtr[3] = (unsigned char)(hdr);
+}
+
+static void assignADUBackpointer(MP3FrameParams const& fr,
+ unsigned aduSize,
+ MP3SideInfo& sideInfo,
+ unsigned& availableBytesForBackpointer) {
+ // Give the ADU as large a backpointer as possible:
+ unsigned maxBackpointerSize = fr.isMPEG2 ? 255 : 511;
+
+ unsigned backpointerSize = availableBytesForBackpointer;
+ if (backpointerSize > maxBackpointerSize) {
+ backpointerSize = maxBackpointerSize;
+ }
+
+ // Store the new backpointer now:
+ sideInfo.main_data_begin = backpointerSize;
+
+ // Figure out how many bytes are available for the *next* ADU's backpointer:
+ availableBytesForBackpointer
+ = backpointerSize + fr.frameSize - fr.sideInfoSize ;
+ if (availableBytesForBackpointer < aduSize) {
+ availableBytesForBackpointer = 0;
+ } else {
+ availableBytesForBackpointer -= aduSize;
+ }
+}
+
+unsigned TranscodeMP3ADU(unsigned char const* fromPtr, unsigned fromSize,
+ unsigned toBitrate,
+ unsigned char* toPtr, unsigned toMaxSize,
+ unsigned& availableBytesForBackpointer) {
+ // Begin by parsing the input ADU's parameters:
+ unsigned hdr, inFrameSize, inSideInfoSize, backpointer, inAduSize;
+ MP3SideInfo sideInfo;
+ if (!GetADUInfoFromMP3Frame(fromPtr, fromSize,
+ hdr, inFrameSize, sideInfo, inSideInfoSize,
+ backpointer, inAduSize)) {
+ return 0;
+ }
+ fromPtr += (4+inSideInfoSize); // skip to 'main data'
+
+ // Alter the 4-byte MPEG header to reflect the output ADU:
+ // (different bitrate; mono; no CRC)
+ Boolean isMPEG2 = ((hdr&0x00080000) == 0);
+ unsigned toBitrateIndex = MP3BitrateToBitrateIndex(toBitrate, isMPEG2);
+ hdr &=~ 0xF000; hdr |= (toBitrateIndex<<12); // set bitrate index
+ hdr |= 0x10200; // turn on !error-prot and padding bits
+ hdr &=~ 0xC0; hdr |= 0xC0; // set mode to 3 (mono)
+
+ // Set up the rest of the parameters of the new ADU:
+ MP3FrameParams outFr;
+ outFr.hdr = hdr;
+ outFr.setParamsFromHeader();
+
+ // Figure out how big to make the output ADU:
+ unsigned inAveAduSize = inFrameSize - inSideInfoSize;
+ unsigned outAveAduSize = outFr.frameSize - outFr.sideInfoSize;
+ unsigned desiredOutAduSize /*=inAduSize*outAveAduSize/inAveAduSize*/
+ = (2*inAduSize*outAveAduSize + inAveAduSize)/(2*inAveAduSize);
+ // this rounds to the nearest integer
+
+ if (toMaxSize < (4 + outFr.sideInfoSize)) return 0;
+ unsigned maxOutAduSize = toMaxSize - (4 + outFr.sideInfoSize);
+ if (desiredOutAduSize > maxOutAduSize) {
+ desiredOutAduSize = maxOutAduSize;
+ }
+
+ // Figure out the new sizes of the various 'part23 lengths',
+ // and how much they are truncated:
+ unsigned part23Length0a, part23Length0aTruncation;
+ unsigned part23Length0b, part23Length0bTruncation;
+ unsigned part23Length1a, part23Length1aTruncation;
+ unsigned part23Length1b, part23Length1bTruncation;
+ unsigned numAduBits
+ = updateSideInfoSizes(sideInfo, outFr.isMPEG2,
+ fromPtr, 8*desiredOutAduSize,
+ part23Length0a, part23Length0aTruncation,
+ part23Length0b, part23Length0bTruncation,
+ part23Length1a, part23Length1aTruncation,
+ part23Length1b, part23Length1bTruncation);
+#ifdef DEBUG
+fprintf(stderr, "shrinkage %d->%d [(%d,%d),(%d,%d)] (trunc: [(%d,%d),(%d,%d)]) {%d}\n", inAduSize, (numAduBits+7)/8,
+ part23Length0a, part23Length0b, part23Length1a, part23Length1b,
+ part23Length0aTruncation, part23Length0bTruncation,
+ part23Length1aTruncation, part23Length1bTruncation,
+ maxOutAduSize);
+#endif
+ unsigned actualOutAduSize = (numAduBits+7)/8;
+
+ // Give the new ADU an appropriate 'backpointer':
+ assignADUBackpointer(outFr, actualOutAduSize, sideInfo, availableBytesForBackpointer);
+
+ ///// Now output the new ADU:
+
+ // 4-byte header
+ outputHeader(toPtr, hdr); toPtr += 4;
+
+ // side info
+ PutMP3SideInfoIntoFrame(sideInfo, outFr, toPtr); toPtr += outFr.sideInfoSize;
+
+ // 'main data', using the new lengths
+ unsigned toBitOffset = 0;
+ unsigned fromBitOffset = 0;
+
+ /* rebuild portion 0a: */
+ memmove(toPtr, fromPtr, (part23Length0a+7)/8);
+ toBitOffset += part23Length0a;
+ fromBitOffset += part23Length0a + part23Length0aTruncation;
+
+ /* rebuild portion 0b: */
+ shiftBits(toPtr, toBitOffset, fromPtr, fromBitOffset, part23Length0b);
+ toBitOffset += part23Length0b;
+ fromBitOffset += part23Length0b + part23Length0bTruncation;
+
+ /* rebuild portion 1a: */
+ shiftBits(toPtr, toBitOffset, fromPtr, fromBitOffset, part23Length1a);
+ toBitOffset += part23Length1a;
+ fromBitOffset += part23Length1a + part23Length1aTruncation;
+
+ /* rebuild portion 1b: */
+ shiftBits(toPtr, toBitOffset, fromPtr, fromBitOffset, part23Length1b);
+ toBitOffset += part23Length1b;
+
+ /* zero out any remaining bits (probably unnecessary, but...) */
+ unsigned char const zero = '\0';
+ shiftBits(toPtr, toBitOffset, &zero, 0,
+ actualOutAduSize*8 - numAduBits);
+
+ return 4 + outFr.sideInfoSize + actualOutAduSize;
+}
diff --git a/liveMedia/MP3Internals.hh b/liveMedia/MP3Internals.hh
new file mode 100644
index 0000000..6301af8
--- /dev/null
+++ b/liveMedia/MP3Internals.hh
@@ -0,0 +1,143 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MP3 internal implementation details
+// C++ header
+
+#ifndef _MP3_INTERNALS_HH
+#define _MP3_INTERNALS_HH
+
+#ifndef _BOOLEAN_HH
+#include "Boolean.hh"
+#endif
+#ifndef _BIT_VECTOR_HH
+#include "BitVector.hh"
+#endif
+
+typedef struct MP3SideInfo {
+ unsigned main_data_begin;
+ unsigned private_bits;
+ typedef struct gr_info_s {
+ int scfsi;
+ unsigned part2_3_length;
+ unsigned big_values;
+ unsigned global_gain;
+ unsigned scalefac_compress;
+ unsigned window_switching_flag;
+ unsigned block_type;
+ unsigned mixed_block_flag;
+ unsigned table_select[3];
+ unsigned region0_count;
+ unsigned region1_count;
+ unsigned subblock_gain[3];
+ unsigned maxband[3];
+ unsigned maxbandl;
+ unsigned maxb;
+ unsigned region1start;
+ unsigned region2start;
+ unsigned preflag;
+ unsigned scalefac_scale;
+ unsigned count1table_select;
+ double *full_gain[3];
+ double *pow2gain;
+ } gr_info_s_t;
+ struct {
+ gr_info_s_t gr[2];
+ } ch[2];
+} MP3SideInfo_t;
+
+#define SBLIMIT 32
+#define MAX_MP3_FRAME_SIZE 2500 /* also big enough for an 'ADU'ized frame */
+
+class MP3FrameParams {
+public:
+ MP3FrameParams();
+ ~MP3FrameParams();
+
+ // 4-byte MPEG header:
+ unsigned hdr;
+
+ // a buffer that can be used to hold the rest of the frame:
+ unsigned char frameBytes[MAX_MP3_FRAME_SIZE];
+
+ // public parameters derived from the header
+ void setParamsFromHeader(); // this sets them
+ Boolean isMPEG2;
+ unsigned layer; // currently only 3 is supported
+ unsigned bitrate; // in kbps
+ unsigned samplingFreq;
+ Boolean isStereo;
+ Boolean isFreeFormat;
+ unsigned frameSize; // doesn't include the initial 4-byte header
+ unsigned sideInfoSize;
+ Boolean hasCRC;
+
+ void setBytePointer(unsigned char const* restOfFrame,
+ unsigned totNumBytes) {// called during setup
+ bv.setup((unsigned char*)restOfFrame, 0, 8*totNumBytes);
+ }
+
+ // other, public parameters used when parsing input (perhaps get rid of)
+ unsigned oldHdr, firstHdr;
+
+ // Extract (unpack) the side info from the frame into a struct:
+ void getSideInfo(MP3SideInfo& si);
+
+ // The bit pointer used for reading data from frame data
+ unsigned getBits(unsigned numBits) { return bv.getBits(numBits); }
+ unsigned get1Bit() { return bv.get1Bit(); }
+
+private:
+ BitVector bv;
+
+ // other, private parameters derived from the header
+ unsigned bitrateIndex;
+ unsigned samplingFreqIndex;
+ Boolean isMPEG2_5;
+ Boolean padding;
+ Boolean extension;
+ unsigned mode;
+ unsigned mode_ext;
+ Boolean copyright;
+ Boolean original;
+ unsigned emphasis;
+ unsigned stereo;
+
+private:
+ unsigned computeSideInfoSize();
+};
+
+unsigned ComputeFrameSize(unsigned bitrate, unsigned samplingFreq,
+ Boolean usePadding, Boolean isMPEG2,
+ unsigned char layer);
+
+Boolean GetADUInfoFromMP3Frame(unsigned char const* framePtr,
+ unsigned totFrameSize,
+ unsigned& hdr, unsigned& frameSize,
+ MP3SideInfo& sideInfo, unsigned& sideInfoSize,
+ unsigned& backpointer, unsigned& aduSize);
+
+Boolean ZeroOutMP3SideInfo(unsigned char* framePtr, unsigned totFrameSize,
+ unsigned newBackpointer);
+
+unsigned TranscodeMP3ADU(unsigned char const* fromPtr, unsigned fromSize,
+ unsigned toBitrate,
+ unsigned char* toPtr, unsigned toMaxSize,
+ unsigned& availableBytesForBackpointer);
+ // returns the size of the resulting ADU (0 on failure)
+
+#endif
diff --git a/liveMedia/MP3InternalsHuffman.cpp b/liveMedia/MP3InternalsHuffman.cpp
new file mode 100644
index 0000000..87b68ac
--- /dev/null
+++ b/liveMedia/MP3InternalsHuffman.cpp
@@ -0,0 +1,976 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MP3 internal implementation details (Huffman encoding)
+// Implementation
+
+#include "MP3InternalsHuffman.hh"
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+MP3HuffmanEncodingInfo
+::MP3HuffmanEncodingInfo(Boolean includeDecodedValues) {
+ if (includeDecodedValues) {
+ decodedValues = new unsigned[(SBLIMIT*SSLIMIT + 1)*4];
+ } else {
+ decodedValues = NULL;
+ }
+}
+
+MP3HuffmanEncodingInfo::~MP3HuffmanEncodingInfo() {
+ delete[] decodedValues;
+}
+
+// This is crufty old code that needs to be cleaned up #####
+
+static unsigned debugCount = 0; /* for debugging */
+
+#define TRUNC_FAVORa
+
+void updateSideInfoForHuffman(MP3SideInfo& sideInfo, Boolean isMPEG2,
+ unsigned char const* mainDataPtr,
+ unsigned p23L0, unsigned p23L1,
+ unsigned& part23Length0a,
+ unsigned& part23Length0aTruncation,
+ unsigned& part23Length0b,
+ unsigned& part23Length0bTruncation,
+ unsigned& part23Length1a,
+ unsigned& part23Length1aTruncation,
+ unsigned& part23Length1b,
+ unsigned& part23Length1bTruncation) {
+ int i, j;
+ unsigned sfLength, origTotABsize, adjustment;
+ MP3SideInfo::gr_info_s_t* gr;
+
+ /* First, Huffman-decode each part of the segment's main data,
+ to see at which bit-boundaries the samples appear:
+ */
+ MP3HuffmanEncodingInfo hei;
+
+ ++debugCount;
+#ifdef DEBUG
+ fprintf(stderr, "usifh-start: p23L0: %d, p23L1: %d\n", p23L0, p23L1);
+#endif
+
+ /* Process granule 0 */
+ {
+ gr = &(sideInfo.ch[0].gr[0]);
+ origTotABsize = gr->part2_3_length;
+
+ MP3HuffmanDecode(gr, isMPEG2, mainDataPtr, 0, origTotABsize, sfLength, hei);
+
+ /* Begin by computing new sizes for parts a & b (& their truncations) */
+#ifdef DEBUG
+ fprintf(stderr, "usifh-0: %d, %d:%d, %d:%d, %d:%d, %d:%d, %d:%d\n",
+ hei.numSamples,
+ sfLength/8, sfLength%8,
+ hei.reg1Start/8, hei.reg1Start%8,
+ hei.reg2Start/8, hei.reg2Start%8,
+ hei.bigvalStart/8, hei.bigvalStart%8,
+ origTotABsize/8, origTotABsize%8);
+#endif
+ if (p23L0 < sfLength) {
+ /* We can't use this, so give it all to the next granule: */
+ p23L1 += p23L0;
+ p23L0 = 0;
+ }
+
+ part23Length0a = hei.bigvalStart;
+ part23Length0b = origTotABsize - hei.bigvalStart;
+ part23Length0aTruncation = part23Length0bTruncation = 0;
+ if (origTotABsize > p23L0) {
+ /* We need to shorten one or both of fields a & b */
+ unsigned truncation = origTotABsize - p23L0;
+#ifdef TRUNC_FAIRLY
+ part23Length0aTruncation = (truncation*(part23Length0a-sfLength))
+ /(origTotABsize-sfLength);
+ part23Length0bTruncation = truncation - part23Length0aTruncation;
+#endif
+#ifdef TRUNC_FAVORa
+ part23Length0bTruncation
+ = (truncation > part23Length0b) ? part23Length0b : truncation;
+ part23Length0aTruncation = truncation - part23Length0bTruncation;
+#endif
+#ifdef TRUNC_FAVORb
+ part23Length0aTruncation = (truncation > part23Length0a-sfLength)
+ ? (part23Length0a-sfLength) : truncation;
+ part23Length0bTruncation = truncation - part23Length0aTruncation;
+#endif
+ }
+ /* ASSERT: part23Length0xTruncation <= part23Length0x */
+ part23Length0a -= part23Length0aTruncation;
+ part23Length0b -= part23Length0bTruncation;
+#ifdef DEBUG
+ fprintf(stderr, "usifh-0: interim sizes: %d (%d), %d (%d)\n",
+ part23Length0a, part23Length0aTruncation,
+ part23Length0b, part23Length0bTruncation);
+#endif
+
+ /* Adjust these new lengths so they end on sample bit boundaries: */
+ for (i = 0; i < (int)hei.numSamples; ++i) {
+ if (hei.allBitOffsets[i] == part23Length0a) break;
+ else if (hei.allBitOffsets[i] > part23Length0a) {--i; break;}
+ }
+ if (i < 0) { /* should happen only if we couldn't fit sfLength */
+ i = 0; adjustment = 0;
+ } else {
+ adjustment = part23Length0a - hei.allBitOffsets[i];
+ }
+#ifdef DEBUG
+ fprintf(stderr, "%d usifh-0: adjustment 1: %d\n", debugCount, adjustment);
+#endif
+ part23Length0a -= adjustment;
+ part23Length0aTruncation += adjustment;
+ /* Assign the bits we just shaved to field b and granule 1: */
+ if (part23Length0bTruncation < adjustment) {
+ p23L1 += (adjustment - part23Length0bTruncation);
+ adjustment = part23Length0bTruncation;
+ }
+ part23Length0b += adjustment;
+ part23Length0bTruncation -= adjustment;
+ for (j = i; j < (int)hei.numSamples; ++j) {
+ if (hei.allBitOffsets[j]
+ == part23Length0a + part23Length0aTruncation + part23Length0b)
+ break;
+ else if (hei.allBitOffsets[j]
+ > part23Length0a + part23Length0aTruncation + part23Length0b)
+ {--j; break;}
+ }
+ if (j < 0) { /* should happen only if we couldn't fit sfLength */
+ j = 0; adjustment = 0;
+ } else {
+ adjustment = part23Length0a+part23Length0aTruncation+part23Length0b
+ - hei.allBitOffsets[j];
+ }
+#ifdef DEBUG
+ fprintf(stderr, "%d usifh-0: adjustment 2: %d\n", debugCount, adjustment);
+#endif
+ if (adjustment > part23Length0b) adjustment = part23Length0b; /*sanity*/
+ part23Length0b -= adjustment;
+ part23Length0bTruncation += adjustment;
+ /* Assign the bits we just shaved to granule 1 */
+ p23L1 += adjustment;
+
+ if (part23Length0aTruncation > 0) {
+ /* Change the granule's 'big_values' field to reflect the truncation */
+ gr->big_values = i;
+ }
+ }
+
+ /* Process granule 1 (MPEG-1 only) */
+
+ if (isMPEG2) {
+ part23Length1a = part23Length1b = 0;
+ part23Length1aTruncation = part23Length1bTruncation = 0;
+ } else {
+ unsigned granule1Offset
+ = origTotABsize + sideInfo.ch[1].gr[0].part2_3_length;
+
+ gr = &(sideInfo.ch[0].gr[1]);
+ origTotABsize = gr->part2_3_length;
+
+ MP3HuffmanDecode(gr, isMPEG2, mainDataPtr, granule1Offset,
+ origTotABsize, sfLength, hei);
+
+ /* Begin by computing new sizes for parts a & b (& their truncations) */
+#ifdef DEBUG
+ fprintf(stderr, "usifh-1: %d, %d:%d, %d:%d, %d:%d, %d:%d, %d:%d\n",
+ hei.numSamples,
+ sfLength/8, sfLength%8,
+ hei.reg1Start/8, hei.reg1Start%8,
+ hei.reg2Start/8, hei.reg2Start%8,
+ hei.bigvalStart/8, hei.bigvalStart%8,
+ origTotABsize/8, origTotABsize%8);
+#endif
+ if (p23L1 < sfLength) {
+ /* We can't use this, so give up on this granule: */
+ p23L1 = 0;
+ }
+
+ part23Length1a = hei.bigvalStart;
+ part23Length1b = origTotABsize - hei.bigvalStart;
+ part23Length1aTruncation = part23Length1bTruncation = 0;
+ if (origTotABsize > p23L1) {
+ /* We need to shorten one or both of fields a & b */
+ unsigned truncation = origTotABsize - p23L1;
+#ifdef TRUNC_FAIRLY
+ part23Length1aTruncation = (truncation*(part23Length1a-sfLength))
+ /(origTotABsize-sfLength);
+ part23Length1bTruncation = truncation - part23Length1aTruncation;
+#endif
+#ifdef TRUNC_FAVORa
+ part23Length1bTruncation
+ = (truncation > part23Length1b) ? part23Length1b : truncation;
+ part23Length1aTruncation = truncation - part23Length1bTruncation;
+#endif
+#ifdef TRUNC_FAVORb
+ part23Length1aTruncation = (truncation > part23Length1a-sfLength)
+ ? (part23Length1a-sfLength) : truncation;
+ part23Length1bTruncation = truncation - part23Length1aTruncation;
+#endif
+ }
+ /* ASSERT: part23Length1xTruncation <= part23Length1x */
+ part23Length1a -= part23Length1aTruncation;
+ part23Length1b -= part23Length1bTruncation;
+#ifdef DEBUG
+ fprintf(stderr, "usifh-1: interim sizes: %d (%d), %d (%d)\n",
+ part23Length1a, part23Length1aTruncation,
+ part23Length1b, part23Length1bTruncation);
+#endif
+
+ /* Adjust these new lengths so they end on sample bit boundaries: */
+ for (i = 0; i < (int)hei.numSamples; ++i) {
+ if (hei.allBitOffsets[i] == part23Length1a) break;
+ else if (hei.allBitOffsets[i] > part23Length1a) {--i; break;}
+ }
+ if (i < 0) { /* should happen only if we couldn't fit sfLength */
+ i = 0; adjustment = 0;
+ } else {
+ adjustment = part23Length1a - hei.allBitOffsets[i];
+ }
+#ifdef DEBUG
+ fprintf(stderr, "%d usifh-1: adjustment 0: %d\n", debugCount, adjustment);
+#endif
+ part23Length1a -= adjustment;
+ part23Length1aTruncation += adjustment;
+ /* Assign the bits we just shaved to field b: */
+ if (part23Length1bTruncation < adjustment) {
+ adjustment = part23Length1bTruncation;
+ }
+ part23Length1b += adjustment;
+ part23Length1bTruncation -= adjustment;
+ for (j = i; j < (int)hei.numSamples; ++j) {
+ if (hei.allBitOffsets[j]
+ == part23Length1a + part23Length1aTruncation + part23Length1b)
+ break;
+ else if (hei.allBitOffsets[j]
+ > part23Length1a + part23Length1aTruncation + part23Length1b)
+ {--j; break;}
+ }
+ if (j < 0) { /* should happen only if we couldn't fit sfLength */
+ j = 0; adjustment = 0;
+ } else {
+ adjustment = part23Length1a+part23Length1aTruncation+part23Length1b
+ - hei.allBitOffsets[j];
+ }
+#ifdef DEBUG
+ fprintf(stderr, "%d usifh-1: adjustment 1: %d\n", debugCount, adjustment);
+#endif
+ if (adjustment > part23Length1b) adjustment = part23Length1b; /*sanity*/
+ part23Length1b -= adjustment;
+ part23Length1bTruncation += adjustment;
+
+ if (part23Length1aTruncation > 0) {
+ /* Change the granule's 'big_values' field to reflect the truncation */
+ gr->big_values = i;
+ }
+ }
+#ifdef DEBUG
+ fprintf(stderr, "usifh-end, new vals: %d (%d), %d (%d), %d (%d), %d (%d)\n",
+ part23Length0a, part23Length0aTruncation,
+ part23Length0b, part23Length0bTruncation,
+ part23Length1a, part23Length1aTruncation,
+ part23Length1b, part23Length1bTruncation);
+#endif
+}
+
+static void rsf_getline(char* line, unsigned max, unsigned char**fi) {
+ unsigned i;
+ for (i = 0; i < max; ++i) {
+ line[i] = *(*fi)++;
+ if (line[i] == '\n') {
+ line[i++] = '\0';
+ return;
+ }
+ }
+ line[i] = '\0';
+}
+
+static void rsfscanf(unsigned char **fi, unsigned int* v) {
+ while (sscanf((char*)*fi, "%x", v) == 0) {
+ /* skip past the next '\0' */
+ while (*(*fi)++ != '\0') {}
+ }
+
+ /* skip past any white-space before the value: */
+ while (*(*fi) <= ' ') ++(*fi);
+
+ /* skip past the value: */
+ while (*(*fi) > ' ') ++(*fi);
+}
+
+#define HUFFBITS unsigned long int
+#define SIZEOF_HUFFBITS 4
+#define HTN 34
+#define MXOFF 250
+
+struct huffcodetab {
+ char tablename[3]; /*string, containing table_description */
+ unsigned int xlen; /*max. x-index+ */
+ unsigned int ylen; /*max. y-index+ */
+ unsigned int linbits; /*number of linbits */
+ unsigned int linmax; /*max number to be stored in linbits */
+ int ref; /*a positive value indicates a reference*/
+ HUFFBITS *table; /*pointer to array[xlen][ylen] */
+ unsigned char *hlen; /*pointer to array[xlen][ylen] */
+ unsigned char(*val)[2];/*decoder tree */
+ unsigned int treelen; /*length of decoder tree */
+};
+
+static struct huffcodetab rsf_ht[HTN]; // array of all huffcodetable headers
+ /* 0..31 Huffman code table 0..31 */
+ /* 32,33 count1-tables */
+
+/* read the huffman decoder table */
+static int read_decoder_table(unsigned char* fi) {
+ int n,i,nn,t;
+ unsigned int v0,v1;
+ char command[100],line[100];
+ for (n=0;n<HTN;n++) {
+ rsf_ht[n].table = NULL;
+ rsf_ht[n].hlen = NULL;
+
+ /* .table number treelen xlen ylen linbits */
+ do {
+ rsf_getline(line,99,&fi);
+ } while ((line[0] == '#') || (line[0] < ' '));
+
+ sscanf(line,"%s %s %u %u %u %u",command,rsf_ht[n].tablename,
+ &rsf_ht[n].treelen, &rsf_ht[n].xlen, &rsf_ht[n].ylen, &rsf_ht[n].linbits);
+ if (strcmp(command,".end")==0)
+ return n;
+ else if (strcmp(command,".table")!=0) {
+#ifdef DEBUG
+ fprintf(stderr,"huffman table %u data corrupted\n",n);
+#endif
+ return -1;
+ }
+ rsf_ht[n].linmax = (1<<rsf_ht[n].linbits)-1;
+
+ sscanf(rsf_ht[n].tablename,"%u",&nn);
+ if (nn != n) {
+#ifdef DEBUG
+ fprintf(stderr,"wrong table number %u\n",n);
+#endif
+ return(-2);
+ }
+ do {
+ rsf_getline(line,99,&fi);
+ } while ((line[0] == '#') || (line[0] < ' '));
+
+ sscanf(line,"%s %u",command,&t);
+ if (strcmp(command,".reference")==0) {
+ rsf_ht[n].ref = t;
+ rsf_ht[n].val = rsf_ht[t].val;
+ rsf_ht[n].treelen = rsf_ht[t].treelen;
+ if ( (rsf_ht[n].xlen != rsf_ht[t].xlen) ||
+ (rsf_ht[n].ylen != rsf_ht[t].ylen) ) {
+#ifdef DEBUG
+ fprintf(stderr,"wrong table %u reference\n",n);
+#endif
+ return (-3);
+ };
+ while ((line[0] == '#') || (line[0] < ' ') ) {
+ rsf_getline(line,99,&fi);
+ }
+ }
+ else if (strcmp(command,".treedata")==0) {
+ rsf_ht[n].ref = -1;
+ rsf_ht[n].val = (unsigned char (*)[2])
+ new unsigned char[2*(rsf_ht[n].treelen)];
+ if ((rsf_ht[n].val == NULL) && ( rsf_ht[n].treelen != 0 )){
+#ifdef DEBUG
+ fprintf(stderr, "heaperror at table %d\n",n);
+#endif
+ return -1;
+ }
+ for (i=0;(unsigned)i<rsf_ht[n].treelen; i++) {
+ rsfscanf(&fi, &v0);
+ rsfscanf(&fi, &v1);
+/*replaces fscanf(fi,"%x %x",&v0, &v1);*/
+ rsf_ht[n].val[i][0]=(unsigned char)v0;
+ rsf_ht[n].val[i][1]=(unsigned char)v1;
+ }
+ rsf_getline(line,99,&fi); /* read the rest of the line */
+ }
+ else {
+#ifdef DEBUG
+ fprintf(stderr,"huffman decodertable error at table %d\n",n);
+#endif
+ }
+ }
+ return n;
+}
+
+static void initialize_huffman() {
+ static Boolean huffman_initialized = False;
+
+ if (huffman_initialized) return;
+
+ if (read_decoder_table(huffdec) != HTN) {
+#ifdef DEBUG
+ fprintf(stderr,"decoder table read error\n");
+#endif
+ return;
+ }
+ huffman_initialized = True;
+}
+
+static unsigned char const slen[2][16] = {
+ {0, 0, 0, 0, 3, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4},
+ {0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 2, 3}
+};
+
+static unsigned char const stab[3][6][4] = {
+ { { 6, 5, 5,5 } , { 6, 5, 7,3 } , { 11,10,0,0} ,
+ { 7, 7, 7,0 } , { 6, 6, 6,3 } , { 8, 8,5,0} } ,
+ { { 9, 9, 9,9 } , { 9, 9,12,6 } , { 18,18,0,0} ,
+ {12,12,12,0 } , {12, 9, 9,6 } , { 15,12,9,0} } ,
+ { { 6, 9, 9,9 } , { 6, 9,12,6 } , { 15,18,0,0} ,
+ { 6,15,12,0 } , { 6,12, 9,6 } , { 6,18,9,0} }
+};
+
+static unsigned rsf_get_scale_factors_1(MP3SideInfo::gr_info_s_t *gr_info) {
+ int numbits;
+ int num0 = slen[0][gr_info->scalefac_compress];
+ int num1 = slen[1][gr_info->scalefac_compress];
+
+ if (gr_info->block_type == 2)
+ {
+ numbits = (num0 + num1) * 18;
+
+ if (gr_info->mixed_block_flag) {
+ numbits -= num0; /* num0 * 17 + num1 * 18 */
+ }
+ }
+ else
+ {
+ int scfsi = gr_info->scfsi;
+
+ if(scfsi < 0) { /* scfsi < 0 => granule == 0 */
+ numbits = (num0 + num1) * 10 + num0;
+ }
+ else {
+ numbits = 0;
+ if(!(scfsi & 0x8)) {
+ numbits += num0 * 6;
+ }
+ else {
+ }
+
+ if(!(scfsi & 0x4)) {
+ numbits += num0 * 5;
+ }
+ else {
+ }
+
+ if(!(scfsi & 0x2)) {
+ numbits += num1 * 5;
+ }
+ else {
+ }
+
+ if(!(scfsi & 0x1)) {
+ numbits += num1 * 5;
+ }
+ else {
+ }
+ }
+ }
+
+ return numbits;
+}
+
+extern unsigned n_slen2[];
+extern unsigned i_slen2[];
+
+static unsigned rsf_get_scale_factors_2(MP3SideInfo::gr_info_s_t *gr_info) {
+ unsigned char const* pnt;
+ int i;
+ unsigned int slen;
+ int n = 0;
+ int numbits = 0;
+
+ slen = n_slen2[gr_info->scalefac_compress];
+
+ gr_info->preflag = (slen>>15) & 0x1;
+
+ n = 0;
+ if( gr_info->block_type == 2 ) {
+ n++;
+ if(gr_info->mixed_block_flag)
+ n++;
+ }
+
+ pnt = stab[n][(slen>>12)&0x7];
+
+ for(i=0;i<4;i++) {
+ int num = slen & 0x7;
+ slen >>= 3;
+ numbits += pnt[i] * num;
+ }
+
+ return numbits;
+}
+
+static unsigned getScaleFactorsLength(MP3SideInfo::gr_info_s_t* gr,
+ Boolean isMPEG2) {
+ return isMPEG2 ? rsf_get_scale_factors_2(gr)
+ : rsf_get_scale_factors_1(gr);
+}
+
+static int rsf_huffman_decoder(BitVector& bv,
+ struct huffcodetab const* h,
+ int* x, int* y, int* v, int* w); // forward
+
+void MP3HuffmanDecode(MP3SideInfo::gr_info_s_t* gr, Boolean isMPEG2,
+ unsigned char const* fromBasePtr,
+ unsigned fromBitOffset, unsigned fromLength,
+ unsigned& scaleFactorsLength,
+ MP3HuffmanEncodingInfo& hei) {
+ unsigned i;
+ int x, y, v, w;
+ struct huffcodetab *h;
+ BitVector bv((unsigned char*)fromBasePtr, fromBitOffset, fromLength);
+
+ /* Compute the size of the scale factors (& also advance bv): */
+ scaleFactorsLength = getScaleFactorsLength(gr, isMPEG2);
+ bv.skipBits(scaleFactorsLength);
+
+ initialize_huffman();
+
+ hei.reg1Start = hei.reg2Start = hei.numSamples = 0;
+
+ /* Read bigvalues area. */
+ if (gr->big_values < gr->region1start + gr->region2start) {
+ gr->big_values = gr->region1start + gr->region2start; /* sanity check */
+ }
+ for (i = 0; i < gr->big_values; ++i) {
+ if (i < gr->region1start) {
+ /* in region 0 */
+ h = &rsf_ht[gr->table_select[0]];
+ } else if (i < gr->region2start) {
+ /* in region 1 */
+ h = &rsf_ht[gr->table_select[1]];
+ if (hei.reg1Start == 0) {
+ hei.reg1Start = bv.curBitIndex();
+ }
+ } else {
+ /* in region 2 */
+ h = &rsf_ht[gr->table_select[2]];
+ if (hei.reg2Start == 0) {
+ hei.reg2Start = bv.curBitIndex();
+ }
+ }
+
+ hei.allBitOffsets[i] = bv.curBitIndex();
+ rsf_huffman_decoder(bv, h, &x, &y, &v, &w);
+ if (hei.decodedValues != NULL) {
+ // Record the decoded values:
+ unsigned* ptr = &hei.decodedValues[4*i];
+ ptr[0] = x; ptr[1] = y; ptr[2] = v; ptr[3] = w;
+ }
+ }
+ hei.bigvalStart = bv.curBitIndex();
+
+ /* Read count1 area. */
+ h = &rsf_ht[gr->count1table_select+32];
+ while (bv.curBitIndex() < bv.totNumBits() && i < SSLIMIT*SBLIMIT) {
+ hei.allBitOffsets[i] = bv.curBitIndex();
+ rsf_huffman_decoder(bv, h, &x, &y, &v, &w);
+ if (hei.decodedValues != NULL) {
+ // Record the decoded values:
+ unsigned* ptr = &hei.decodedValues[4*i];
+ ptr[0] = x; ptr[1] = y; ptr[2] = v; ptr[3] = w;
+ }
+ ++i;
+ }
+
+ hei.allBitOffsets[i] = bv.curBitIndex();
+ hei.numSamples = i;
+}
+
+HUFFBITS dmask = 1 << (SIZEOF_HUFFBITS*8-1);
+unsigned int hs = SIZEOF_HUFFBITS*8;
+
+/* do the huffman-decoding */
+static int rsf_huffman_decoder(BitVector& bv,
+ struct huffcodetab const* h, // ptr to huffman code record
+ /* unsigned */ int *x, // returns decoded x value
+ /* unsigned */ int *y, // returns decoded y value
+ int* v, int* w) {
+ HUFFBITS level;
+ unsigned point = 0;
+ int error = 1;
+ level = dmask;
+ *x = *y = *v = *w = 0;
+ if (h->val == NULL) return 2;
+
+ /* table 0 needs no bits */
+ if (h->treelen == 0) return 0;
+
+ /* Lookup in Huffman table. */
+
+ do {
+ if (h->val[point][0]==0) { /*end of tree*/
+ *x = h->val[point][1] >> 4;
+ *y = h->val[point][1] & 0xf;
+
+ error = 0;
+ break;
+ }
+ if (bv.get1Bit()) {
+ while (h->val[point][1] >= MXOFF) point += h->val[point][1];
+ point += h->val[point][1];
+ }
+ else {
+ while (h->val[point][0] >= MXOFF) point += h->val[point][0];
+ point += h->val[point][0];
+ }
+ level >>= 1;
+ } while (level || (point < h->treelen) );
+///// } while (level || (point < rsf_ht->treelen) );
+
+ /* Check for error. */
+
+ if (error) { /* set x and y to a medium value as a simple concealment */
+ printf("Illegal Huffman code in data.\n");
+ *x = ((h->xlen-1) << 1);
+ *y = ((h->ylen-1) << 1);
+ }
+
+ /* Process sign encodings for quadruples tables. */
+
+ if (h->tablename[0] == '3'
+ && (h->tablename[1] == '2' || h->tablename[1] == '3')) {
+ *v = (*y>>3) & 1;
+ *w = (*y>>2) & 1;
+ *x = (*y>>1) & 1;
+ *y = *y & 1;
+
+ if (*v)
+ if (bv.get1Bit() == 1) *v = -*v;
+ if (*w)
+ if (bv.get1Bit() == 1) *w = -*w;
+ if (*x)
+ if (bv.get1Bit() == 1) *x = -*x;
+ if (*y)
+ if (bv.get1Bit() == 1) *y = -*y;
+ }
+
+ /* Process sign and escape encodings for dual tables. */
+
+ else {
+ if (h->linbits)
+ if ((h->xlen-1) == (unsigned)*x)
+ *x += bv.getBits(h->linbits);
+ if (*x)
+ if (bv.get1Bit() == 1) *x = -*x;
+ if (h->linbits)
+ if ((h->ylen-1) == (unsigned)*y)
+ *y += bv.getBits(h->linbits);
+ if (*y)
+ if (bv.get1Bit() == 1) *y = -*y;
+ }
+
+ return error;
+}
+
+#ifdef DO_HUFFMAN_ENCODING
+inline int getNextSample(unsigned char const*& fromPtr) {
+ int sample
+#ifdef FOUR_BYTE_SAMPLES
+ = (fromPtr[0]<<24) | (fromPtr[1]<<16) | (fromPtr[2]<<8) | fromPtr[3];
+#else
+#ifdef TWO_BYTE_SAMPLES
+ = (fromPtr[0]<<8) | fromPtr[1];
+#else
+ // ONE_BYTE_SAMPLES
+ = fromPtr[0];
+#endif
+#endif
+ fromPtr += BYTES_PER_SAMPLE_VALUE;
+ return sample;
+}
+
+static void rsf_huffman_encoder(BitVector& bv,
+ struct huffcodetab* h,
+ int x, int y, int v, int w); // forward
+
+unsigned MP3HuffmanEncode(MP3SideInfo::gr_info_s_t const* gr,
+ unsigned char const* fromPtr,
+ unsigned char* toPtr, unsigned toBitOffset,
+ unsigned numHuffBits) {
+ unsigned i;
+ struct huffcodetab *h;
+ int x, y, v, w;
+ BitVector bv(toPtr, toBitOffset, numHuffBits);
+
+ initialize_huffman();
+
+ // Encode big_values area:
+ unsigned big_values = gr->big_values;
+ if (big_values < gr->region1start + gr->region2start) {
+ big_values = gr->region1start + gr->region2start; /* sanity check */
+ }
+ for (i = 0; i < big_values; ++i) {
+ if (i < gr->region1start) {
+ /* in region 0 */
+ h = &rsf_ht[gr->table_select[0]];
+ } else if (i < gr->region2start) {
+ /* in region 1 */
+ h = &rsf_ht[gr->table_select[1]];
+ } else {
+ /* in region 2 */
+ h = &rsf_ht[gr->table_select[2]];
+ }
+
+ x = getNextSample(fromPtr);
+ y = getNextSample(fromPtr);
+ v = getNextSample(fromPtr);
+ w = getNextSample(fromPtr);
+ rsf_huffman_encoder(bv, h, x, y, v, w);
+ }
+
+ // Encode count1 area:
+ h = &rsf_ht[gr->count1table_select+32];
+ while (bv.curBitIndex() < bv.totNumBits() && i < SSLIMIT*SBLIMIT) {
+ x = getNextSample(fromPtr);
+ y = getNextSample(fromPtr);
+ v = getNextSample(fromPtr);
+ w = getNextSample(fromPtr);
+ rsf_huffman_encoder(bv, h, x, y, v, w);
+ ++i;
+ }
+
+ return i;
+}
+
+static Boolean lookupHuffmanTableEntry(struct huffcodetab const* h,
+ HUFFBITS bits, unsigned bitsLength,
+ unsigned char& xy) {
+ unsigned point = 0;
+ unsigned mask = 1;
+ unsigned numBitsTestedSoFar = 0;
+ do {
+ if (h->val[point][0]==0) { // end of tree
+ xy = h->val[point][1];
+ if (h->hlen[xy] == 0) { // this entry hasn't already been used
+ h->table[xy] = bits;
+ h->hlen[xy] = bitsLength;
+ return True;
+ } else { // this entry has already been seen
+ return False;
+ }
+ }
+
+ if (numBitsTestedSoFar++ == bitsLength) {
+ // We don't yet have enough bits for this prefix
+ return False;
+ }
+ if (bits&mask) {
+ while (h->val[point][1] >= MXOFF) point += h->val[point][1];
+ point += h->val[point][1];
+ } else {
+ while (h->val[point][0] >= MXOFF) point += h->val[point][0];
+ point += h->val[point][0];
+ }
+ mask <<= 1;
+ } while (mask || (point < h->treelen));
+
+ return False;
+}
+
+static void buildHuffmanEncodingTable(struct huffcodetab* h) {
+ h->table = new unsigned long[256];
+ h->hlen = new unsigned char[256];
+ if (h->table == NULL || h->hlen == NULL) { h->table = NULL; return; }
+ for (unsigned i = 0; i < 256; ++i) {
+ h->table[i] = 0; h->hlen[i] = 0;
+ }
+
+ // Look up entries for each possible bit sequence length:
+ unsigned maxNumEntries = h->xlen * h->ylen;
+ unsigned numEntries = 0;
+ unsigned powerOf2 = 1;
+ for (unsigned bitsLength = 1;
+ bitsLength <= 8*SIZEOF_HUFFBITS; ++bitsLength) {
+ powerOf2 *= 2;
+ for (HUFFBITS bits = 0; bits < powerOf2; ++bits) {
+ // Find the table value - if any - for 'bits' (length 'bitsLength'):
+ unsigned char xy;
+ if (lookupHuffmanTableEntry(h, bits, bitsLength, xy)) {
+ ++numEntries;
+ if (numEntries == maxNumEntries) return; // we're done
+ }
+ }
+ }
+#ifdef DEBUG
+ fprintf(stderr, "Didn't find enough entries!\n"); // shouldn't happen
+#endif
+}
+
+static void lookupXYandPutBits(BitVector& bv, struct huffcodetab const* h,
+ unsigned char xy) {
+ HUFFBITS bits = h->table[xy];
+ unsigned bitsLength = h->hlen[xy];
+
+ // Note that "bits" is in reverse order, so read them from right-to-left:
+ while (bitsLength-- > 0) {
+ bv.put1Bit(bits&0x00000001);
+ bits >>= 1;
+ }
+}
+
+static void putLinbits(BitVector& bv, struct huffcodetab const* h,
+ HUFFBITS bits) {
+ bv.putBits(bits, h->linbits);
+}
+
+static void rsf_huffman_encoder(BitVector& bv,
+ struct huffcodetab* h,
+ int x, int y, int v, int w) {
+ if (h->val == NULL) return;
+
+ /* table 0 produces no bits */
+ if (h->treelen == 0) return;
+
+ if (h->table == NULL) {
+ // We haven't yet built the encoding array for this table; do it now:
+ buildHuffmanEncodingTable(h);
+ if (h->table == NULL) return;
+ }
+
+ Boolean xIsNeg = False, yIsNeg = False, vIsNeg = False, wIsNeg = False;
+ unsigned char xy;
+
+#ifdef FOUR_BYTE_SAMPLES
+#else
+#ifdef TWO_BYTE_SAMPLES
+ // Convert 2-byte negative numbers to their 4-byte equivalents:
+ if (x&0x8000) x |= 0xFFFF0000;
+ if (y&0x8000) y |= 0xFFFF0000;
+ if (v&0x8000) v |= 0xFFFF0000;
+ if (w&0x8000) w |= 0xFFFF0000;
+#else
+ // ONE_BYTE_SAMPLES
+ // Convert 1-byte negative numbers to their 4-byte equivalents:
+ if (x&0x80) x |= 0xFFFFFF00;
+ if (y&0x80) y |= 0xFFFFFF00;
+ if (v&0x80) v |= 0xFFFFFF00;
+ if (w&0x80) w |= 0xFFFFFF00;
+#endif
+#endif
+
+ if (h->tablename[0] == '3'
+ && (h->tablename[1] == '2' || h->tablename[1] == '3')) {// quad tables
+ if (x < 0) { xIsNeg = True; x = -x; }
+ if (y < 0) { yIsNeg = True; y = -y; }
+ if (v < 0) { vIsNeg = True; v = -v; }
+ if (w < 0) { wIsNeg = True; w = -w; }
+
+ // Sanity check: x,y,v,w must all be 0 or 1:
+ if (x>1 || y>1 || v>1 || w>1) {
+#ifdef DEBUG
+ fprintf(stderr, "rsf_huffman_encoder quad sanity check fails: %x,%x,%x,%x\n", x, y, v, w);
+#endif
+ }
+
+ xy = (v<<3)|(w<<2)|(x<<1)|y;
+ lookupXYandPutBits(bv, h, xy);
+
+ if (v) bv.put1Bit(vIsNeg);
+ if (w) bv.put1Bit(wIsNeg);
+ if (x) bv.put1Bit(xIsNeg);
+ if (y) bv.put1Bit(yIsNeg);
+ } else { // dual tables
+ // Sanity check: v and w must be 0:
+ if (v != 0 || w != 0) {
+#ifdef DEBUG
+ fprintf(stderr, "rsf_huffman_encoder dual sanity check 1 fails: %x,%x,%x,%x\n", x, y, v, w);
+#endif
+ }
+
+ if (x < 0) { xIsNeg = True; x = -x; }
+ if (y < 0) { yIsNeg = True; y = -y; }
+
+ // Sanity check: x and y must be <= 255:
+ if (x > 255 || y > 255) {
+#ifdef DEBUG
+ fprintf(stderr, "rsf_huffman_encoder dual sanity check 2 fails: %x,%x,%x,%x\n", x, y, v, w);
+#endif
+ }
+
+ int xl1 = h->xlen-1;
+ int yl1 = h->ylen-1;
+ unsigned linbitsX = 0; unsigned linbitsY = 0;
+
+ if (((x < xl1) || (xl1 == 0)) && (y < yl1)) {
+ // normal case
+ xy = (x<<4)|y;
+ lookupXYandPutBits(bv, h, xy);
+ if (x) bv.put1Bit(xIsNeg);
+ if (y) bv.put1Bit(yIsNeg);
+ } else if (x >= xl1) {
+ linbitsX = (unsigned)(x - xl1);
+ if (linbitsX > h->linmax) {
+#ifdef DEBUG
+ fprintf(stderr,"warning: Huffman X table overflow\n");
+#endif
+ linbitsX = h->linmax;
+ };
+
+ if (y >= yl1) {
+ xy = (xl1<<4)|yl1;
+ lookupXYandPutBits(bv, h, xy);
+ linbitsY = (unsigned)(y - yl1);
+ if (linbitsY > h->linmax) {
+#ifdef DEBUG
+ fprintf(stderr,"warning: Huffman Y table overflow\n");
+#endif
+ linbitsY = h->linmax;
+ };
+
+ if (h->linbits) putLinbits(bv, h, linbitsX);
+ if (x) bv.put1Bit(xIsNeg);
+ if (h->linbits) putLinbits(bv, h, linbitsY);
+ if (y) bv.put1Bit(yIsNeg);
+ } else { /* x >= h->xlen, y < h->ylen */
+ xy = (xl1<<4)|y;
+ lookupXYandPutBits(bv, h, xy);
+ if (h->linbits) putLinbits(bv, h, linbitsX);
+ if (x) bv.put1Bit(xIsNeg);
+ if (y) bv.put1Bit(yIsNeg);
+ }
+ } else { /* ((x < h->xlen) && (y >= h->ylen)) */
+ xy = (x<<4)|yl1;
+ lookupXYandPutBits(bv, h, xy);
+ linbitsY = y-yl1;
+ if (linbitsY > h->linmax) {
+#ifdef DEBUG
+ fprintf(stderr,"warning: Huffman Y table overflow\n");
+#endif
+ linbitsY = h->linmax;
+ };
+ if (x) bv.put1Bit(xIsNeg);
+ if (h->linbits) putLinbits(bv, h, linbitsY);
+ if (y) bv.put1Bit(yIsNeg);
+ }
+ }
+}
+#endif
diff --git a/liveMedia/MP3InternalsHuffman.hh b/liveMedia/MP3InternalsHuffman.hh
new file mode 100644
index 0000000..dcd4f2f
--- /dev/null
+++ b/liveMedia/MP3InternalsHuffman.hh
@@ -0,0 +1,82 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MP3 internal implementation details (Huffman encoding)
+// C++ header
+
+#ifndef _MP3_INTERNALS_HUFFMAN_HH
+#define _MP3_INTERNALS_HUFFMAN_HH
+
+#ifndef _MP3_INTERNALS_HH
+#include "MP3Internals.hh"
+#endif
+
+void updateSideInfoForHuffman(MP3SideInfo& sideInfo, Boolean isMPEG2,
+ unsigned char const* mainDataPtr,
+ unsigned p23L0, unsigned p23L1,
+ unsigned& part23Length0a,
+ unsigned& part23Length0aTruncation,
+ unsigned& part23Length0b,
+ unsigned& part23Length0bTruncation,
+ unsigned& part23Length1a,
+ unsigned& part23Length1aTruncation,
+ unsigned& part23Length1b,
+ unsigned& part23Length1bTruncation);
+
+#define SSLIMIT 18
+
+class MP3HuffmanEncodingInfo {
+public:
+ MP3HuffmanEncodingInfo(Boolean includeDecodedValues = False);
+ ~MP3HuffmanEncodingInfo();
+
+public:
+ unsigned numSamples;
+ unsigned allBitOffsets[SBLIMIT*SSLIMIT + 1];
+ unsigned reg1Start, reg2Start, bigvalStart; /* special bit offsets */
+ unsigned* decodedValues;
+};
+
+/* forward */
+void MP3HuffmanDecode(MP3SideInfo::gr_info_s_t* gr, Boolean isMPEG2,
+ unsigned char const* fromBasePtr,
+ unsigned fromBitOffset, unsigned fromLength,
+ unsigned& scaleFactorsLength,
+ MP3HuffmanEncodingInfo& hei);
+
+extern unsigned char huffdec[]; // huffman table data
+
+// The following are used if we process Huffman-decoded values
+#ifdef FOUR_BYTE_SAMPLES
+#define BYTES_PER_SAMPLE_VALUE 4
+#else
+#ifdef TWO_BYTE_SAMPLES
+#define BYTES_PER_SAMPLE_VALUE 2
+#else
+// ONE_BYTE_SAMPLES
+#define BYTES_PER_SAMPLE_VALUE 1
+#endif
+#endif
+
+#ifdef DO_HUFFMAN_ENCODING
+unsigned MP3HuffmanEncode(MP3SideInfo::gr_info_s_t const* gr,
+ unsigned char const* fromPtr,
+ unsigned char* toPtr, unsigned toBitOffset,
+ unsigned numHuffBits);
+#endif
+
+#endif
diff --git a/liveMedia/MP3InternalsHuffmanTable.cpp b/liveMedia/MP3InternalsHuffmanTable.cpp
new file mode 100644
index 0000000..8419e44
--- /dev/null
+++ b/liveMedia/MP3InternalsHuffmanTable.cpp
@@ -0,0 +1,1548 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MP3 internal implementation details (Huffman encoding)
+// Table
+
+#include "MP3InternalsHuffman.hh"
+
+unsigned char huffdec[] = {
+0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x0a,
+0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x0a, 0x2e,
+0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x20, 0x37,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74,
+0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31,
+0x31, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x37, 0x20, 0x20, 0x33, 0x20, 0x20, 0x33, 0x20,
+0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61,
+0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31,
+0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31,
+0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x0a, 0x0a, 0x2e, 0x74,
+0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x33, 0x20, 0x20, 0x31, 0x37, 0x20,
+0x20, 0x33, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72,
+0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x32, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61,
+0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x35, 0x20,
+0x20, 0x33, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x30,
+0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20,
+0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x0a,
+0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20,
+0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20,
+0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20,
+0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33,
+0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61,
+0x62, 0x6c, 0x65, 0x20, 0x20, 0x36, 0x20, 0x20, 0x33, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65,
+0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20,
+0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20,
+0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33,
+0x30, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33,
+0x33, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20,
+0x37, 0x20, 0x20, 0x37, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x36, 0x20,
+0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61,
+0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30,
+0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31,
+0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x32, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33,
+0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x34, 0x30, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x30,
+0x20, 0x32, 0x34, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x30,
+0x20, 0x34, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20,
+0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x20,
+0x30, 0x20, 0x32, 0x35, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20,
+0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20,
+0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20,
+0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x38, 0x20,
+0x20, 0x37, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x30,
+0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20,
+0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x0a,
+0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20,
+0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20,
+0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x0a, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33,
+0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34,
+0x32, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33,
+0x33, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x34, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20,
+0x35, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20,
+0x35, 0x32, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30,
+0x20, 0x33, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x35, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x34, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x35, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x0a, 0x0a,
+0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x20, 0x39, 0x20, 0x20, 0x37,
+0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x30, 0x0a, 0x2e,
+0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x38, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20,
+0x31, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x61, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x0a, 0x20, 0x30,
+0x20, 0x32, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x31, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x63,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x33, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x0a, 0x20,
+0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20,
+0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20,
+0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20,
+0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20,
+0x0a, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31,
+0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35,
+0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34,
+0x20, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35,
+0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33,
+0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34,
+0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x0a, 0x0a, 0x2e, 0x74,
+0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x30, 0x20, 0x31, 0x32, 0x37, 0x20,
+0x20, 0x38, 0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72,
+0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x31, 0x63, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x32, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20,
+0x31, 0x33, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x30, 0x20,
+0x34, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x0a, 0x20, 0x30,
+0x20, 0x31, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x30,
+0x20, 0x33, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x34, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x31, 0x63,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x36, 0x30, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x0a,
+0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20,
+0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x36, 0x20,
+0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x31, 0x20,
+0x0a, 0x31, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x37,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x30, 0x20, 0x34,
+0x35, 0x20, 0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x30, 0x20, 0x36,
+0x34, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37,
+0x32, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x37, 0x20, 0x20, 0x36, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x36, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x35, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x20, 0x30, 0x20,
+0x37, 0x33, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x33, 0x37, 0x20, 0x20, 0x30, 0x20, 0x36, 0x35, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x30,
+0x20, 0x37, 0x34, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x37, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x20, 0x30,
+0x20, 0x37, 0x35, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x37, 0x20, 0x20,
+0x30, 0x20, 0x37, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x37, 0x20, 0x0a,
+0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x31, 0x20, 0x31,
+0x32, 0x37, 0x20, 0x20, 0x38, 0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x0a,
+0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x31, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x31, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x0a, 0x20,
+0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x31,
+0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20,
+0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20,
+0x20, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x0a, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34,
+0x20, 0x31, 0x65, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33,
+0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x30, 0x20, 0x35,
+0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31,
+0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x36, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20,
+0x35, 0x32, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x32, 0x35, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30,
+0x20, 0x36, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x36, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x37, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x37, 0x20, 0x20, 0x30,
+0x20, 0x37, 0x31, 0x20, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20,
+0x30, 0x20, 0x36, 0x34, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x37, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x35, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x35,
+0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35,
+0x20, 0x20, 0x30, 0x20, 0x35, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x34,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x37,
+0x20, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37,
+0x35, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37,
+0x37, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31,
+0x32, 0x20, 0x31, 0x32, 0x37, 0x20, 0x20, 0x38, 0x20, 0x20, 0x38, 0x20,
+0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61,
+0x0a, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32,
+0x20, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x32, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x20,
+0x34, 0x30, 0x20, 0x31, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20,
+0x32, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x34, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x61,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x30,
+0x20, 0x34, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x32, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x31, 0x35, 0x20, 0x31, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x35, 0x32, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20,
+0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x30, 0x20,
+0x20, 0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x36,
+0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34,
+0x20, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36,
+0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37,
+0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x37, 0x20, 0x0a, 0x20, 0x30, 0x20,
+0x36, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x34, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32, 0x20, 0x20, 0x61, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x37, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x20, 0x30, 0x20,
+0x37, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30,
+0x20, 0x33, 0x37, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x35, 0x20, 0x20, 0x30,
+0x20, 0x37, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x34, 0x37, 0x20, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20,
+0x30, 0x20, 0x37, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x37, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x37, 0x20, 0x20,
+0x30, 0x20, 0x37, 0x37, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c,
+0x65, 0x20, 0x31, 0x33, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20,
+0x31, 0x36, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64,
+0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x31, 0x31, 0x20, 0x31, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20,
+0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20,
+0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20,
+0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x0a, 0x34, 0x36, 0x20, 0x20, 0x31,
+0x20, 0x31, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x35,
+0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34,
+0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x0a, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20,
+0x35, 0x33, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x30, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30,
+0x20, 0x36, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x38, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x38, 0x20, 0x20, 0x30,
+0x20, 0x38, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x0a, 0x20,
+0x30, 0x20, 0x36, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x30, 0x20, 0x36, 0x33, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20,
+0x30, 0x20, 0x37, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x0a,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20,
+0x20, 0x30, 0x20, 0x37, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x31, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x32, 0x37, 0x20, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20,
+0x34, 0x38, 0x20, 0x20, 0x31, 0x20, 0x31, 0x38, 0x20, 0x20, 0x31, 0x20,
+0x0a, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x38,
+0x20, 0x20, 0x30, 0x20, 0x38, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x38, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x34,
+0x20, 0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32,
+0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38,
+0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x39, 0x20, 0x31, 0x38, 0x20, 0x20,
+0x31, 0x20, 0x0a, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20, 0x20, 0x30, 0x20,
+0x36, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x35, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x34, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x34, 0x37, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x20, 0x30,
+0x20, 0x38, 0x33, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x38, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x35, 0x20, 0x20, 0x30,
+0x20, 0x35, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x39, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x39, 0x20, 0x20, 0x65,
+0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x30, 0x20, 0x38, 0x35, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x38, 0x20, 0x20,
+0x30, 0x20, 0x33, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x39, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x34, 0x39, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x38, 0x36, 0x20,
+0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x61, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x36, 0x38, 0x20, 0x20, 0x30, 0x20, 0x20, 0x61, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x31, 0x61, 0x20, 0x34, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x31, 0x38, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x63, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x61, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x61,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x39, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x39,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x33,
+0x20, 0x20, 0x30, 0x20, 0x33, 0x61, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x61, 0x20, 0x20, 0x30, 0x20, 0x39,
+0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62,
+0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31,
+0x62, 0x20, 0x31, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x38, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x62, 0x32, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20, 0x20, 0x30, 0x20,
+0x37, 0x37, 0x20, 0x20, 0x30, 0x20, 0x39, 0x34, 0x20, 0x20, 0x36, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x37, 0x20, 0x0a, 0x20, 0x30,
+0x20, 0x37, 0x38, 0x20, 0x20, 0x30, 0x20, 0x61, 0x34, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x36, 0x39, 0x20, 0x20, 0x30, 0x20, 0x61, 0x35, 0x20, 0x20, 0x30,
+0x20, 0x32, 0x62, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x61, 0x20, 0x0a, 0x20,
+0x30, 0x20, 0x38, 0x38, 0x20, 0x20, 0x30, 0x20, 0x62, 0x33, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x62, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x39, 0x20, 0x20,
+0x30, 0x20, 0x61, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x36, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x34, 0x20, 0x0a,
+0x20, 0x30, 0x20, 0x63, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x63, 0x20,
+0x20, 0x30, 0x20, 0x39, 0x38, 0x20, 0x20, 0x30, 0x20, 0x63, 0x31, 0x20,
+0x33, 0x63, 0x20, 0x20, 0x31, 0x20, 0x31, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x63, 0x20,
+0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x39,
+0x20, 0x20, 0x30, 0x20, 0x62, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x35, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x32,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x63, 0x20, 0x20, 0x30, 0x20, 0x33, 0x63,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x0a, 0x20, 0x30, 0x20, 0x62, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36,
+0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63,
+0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x63, 0x20, 0x31, 0x30, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61,
+0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x64, 0x30, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x64, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x34, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x39, 0x37, 0x20, 0x20, 0x30, 0x20, 0x61, 0x37, 0x20, 0x20, 0x63, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x63, 0x33, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x61, 0x20, 0x20, 0x30,
+0x20, 0x39, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x35, 0x20, 0x20, 0x30,
+0x20, 0x35, 0x63, 0x20, 0x20, 0x30, 0x20, 0x62, 0x37, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x31, 0x64, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x64, 0x32, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x64, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x62, 0x20, 0x20,
+0x30, 0x20, 0x64, 0x33, 0x20, 0x33, 0x34, 0x20, 0x20, 0x31, 0x20, 0x31,
+0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x33, 0x64, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x63, 0x36, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x36, 0x63, 0x20, 0x20, 0x30, 0x20, 0x61, 0x39, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x61, 0x20,
+0x20, 0x30, 0x20, 0x64, 0x34, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x62, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x38, 0x62,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x64,
+0x20, 0x20, 0x30, 0x20, 0x63, 0x37, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x63,
+0x20, 0x20, 0x30, 0x20, 0x64, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x35, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x30,
+0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x65, 0x20, 0x20, 0x30, 0x20, 0x32, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65,
+0x32, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20,
+0x65, 0x33, 0x20, 0x20, 0x30, 0x20, 0x36, 0x64, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x63, 0x20, 0x20, 0x30, 0x20,
+0x65, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x35, 0x20, 0x20, 0x30, 0x20,
+0x62, 0x61, 0x20, 0x20, 0x30, 0x20, 0x66, 0x30, 0x20, 0x32, 0x36, 0x20,
+0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x66, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x66, 0x20, 0x20, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x61, 0x20, 0x20, 0x30,
+0x20, 0x39, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x39, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x65, 0x20, 0x0a, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x36, 0x20, 0x20,
+0x30, 0x20, 0x63, 0x38, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x34, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x64, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x64, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x62, 0x20, 0x0a,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x65, 0x20,
+0x20, 0x30, 0x20, 0x63, 0x39, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x66, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x63, 0x20,
+0x20, 0x30, 0x20, 0x36, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x66, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x66, 0x20,
+0x0a, 0x32, 0x30, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x38,
+0x20, 0x20, 0x30, 0x20, 0x38, 0x64, 0x20, 0x20, 0x30, 0x20, 0x33, 0x66,
+0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x66, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x0a, 0x20, 0x30, 0x20, 0x65, 0x36, 0x20, 0x20, 0x30, 0x20, 0x63,
+0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66,
+0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x66, 0x20, 0x20, 0x38, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x62, 0x20, 0x20, 0x30, 0x20, 0x61,
+0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65,
+0x37, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x66, 0x35, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x64, 0x39, 0x20, 0x20, 0x30, 0x20, 0x39, 0x64, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x66, 0x20, 0x20, 0x30, 0x20,
+0x65, 0x38, 0x20, 0x31, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x63, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x66, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x36, 0x20, 0x20, 0x30,
+0x20, 0x63, 0x62, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x63, 0x20, 0x20, 0x30,
+0x20, 0x61, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x61, 0x20, 0x20, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x66, 0x37, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x65, 0x20, 0x20,
+0x30, 0x20, 0x37, 0x66, 0x20, 0x20, 0x30, 0x20, 0x38, 0x65, 0x20, 0x20,
+0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x65, 0x20, 0x20,
+0x30, 0x20, 0x61, 0x65, 0x20, 0x20, 0x30, 0x20, 0x63, 0x63, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x66, 0x38, 0x20,
+0x20, 0x30, 0x20, 0x38, 0x66, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x62, 0x20,
+0x20, 0x30, 0x20, 0x62, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x65, 0x61, 0x20, 0x20, 0x30, 0x20, 0x66, 0x39, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x39, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x62,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x65,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x64,
+0x20, 0x20, 0x30, 0x20, 0x66, 0x61, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x64, 0x64, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x65,
+0x63, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65,
+0x39, 0x20, 0x20, 0x30, 0x20, 0x61, 0x66, 0x20, 0x20, 0x30, 0x20, 0x64,
+0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63,
+0x65, 0x20, 0x20, 0x30, 0x20, 0x66, 0x62, 0x20, 0x20, 0x38, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x66, 0x20, 0x20, 0x30, 0x20,
+0x64, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x63, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x65, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x64, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x66, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x66, 0x20, 0x0a, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x64, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x64, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x63, 0x20, 0x20, 0x30,
+0x20, 0x66, 0x65, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65,
+0x20, 0x31, 0x34, 0x20, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61,
+0x74, 0x61, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31,
+0x35, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20,
+0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61,
+0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x30,
+0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31,
+0x32, 0x20, 0x33, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x30, 0x20, 0x33,
+0x31, 0x20, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x30, 0x20,
+0x34, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x33, 0x20, 0x20, 0x65, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x30,
+0x20, 0x34, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x33, 0x20, 0x20, 0x30,
+0x20, 0x34, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x32, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x61,
+0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x34, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x35, 0x32, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20,
+0x20, 0x30, 0x20, 0x36, 0x31, 0x20, 0x35, 0x61, 0x20, 0x20, 0x31, 0x20,
+0x32, 0x34, 0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x35,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x30,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x32,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34,
+0x35, 0x20, 0x20, 0x30, 0x20, 0x36, 0x33, 0x20, 0x20, 0x61, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x0a, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x31, 0x37, 0x20, 0x20, 0x30, 0x20, 0x36, 0x34, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32, 0x20, 0x20, 0x30, 0x20,
+0x32, 0x37, 0x20, 0x31, 0x38, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30,
+0x20, 0x34, 0x36, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20, 0x20, 0x30,
+0x20, 0x36, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x30,
+0x20, 0x38, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x20, 0x37, 0x34, 0x20, 0x0a, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x38, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x38, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x32, 0x20, 0x20,
+0x30, 0x20, 0x32, 0x38, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x37, 0x20, 0x0a,
+0x20, 0x30, 0x20, 0x36, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x38, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x38, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x37, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x37, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x34, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x38, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x39, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x39,
+0x20, 0x20, 0x30, 0x20, 0x39, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x32,
+0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x30, 0x20, 0x32, 0x39,
+0x20, 0x0a, 0x35, 0x63, 0x20, 0x20, 0x31, 0x20, 0x32, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35,
+0x38, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x39, 0x20, 0x20, 0x30, 0x20, 0x37,
+0x37, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x39, 0x33, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x33, 0x39, 0x20, 0x20, 0x30, 0x20, 0x39, 0x34, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x39, 0x20, 0x20, 0x30, 0x20,
+0x38, 0x36, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x36, 0x38, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x61, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x61, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x31, 0x61, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x32, 0x20, 0x20, 0x30,
+0x20, 0x32, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x39, 0x35, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x35, 0x39, 0x20, 0x31,
+0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x61, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x33, 0x61, 0x20, 0x20, 0x30, 0x20, 0x38, 0x37, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x37, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x61, 0x34, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x61, 0x20,
+0x20, 0x30, 0x20, 0x39, 0x36, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x36, 0x39, 0x20, 0x20, 0x30, 0x20, 0x62, 0x30, 0x20,
+0x20, 0x30, 0x20, 0x62, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x31, 0x62,
+0x20, 0x20, 0x30, 0x20, 0x61, 0x35, 0x20, 0x20, 0x30, 0x20, 0x62, 0x32,
+0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x35, 0x61, 0x20, 0x20, 0x30, 0x20, 0x32, 0x62,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x38,
+0x20, 0x20, 0x30, 0x20, 0x39, 0x37, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x39, 0x20, 0x20, 0x30, 0x20, 0x33,
+0x62, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36,
+0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x62, 0x20, 0x0a, 0x20, 0x30, 0x20,
+0x63, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x38, 0x20, 0x20, 0x30, 0x20,
+0x38, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x31, 0x63, 0x20, 0x20, 0x30, 0x20, 0x62, 0x35, 0x20, 0x35, 0x30, 0x20,
+0x20, 0x31, 0x20, 0x32, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x35, 0x62, 0x20, 0x20, 0x30, 0x20, 0x32, 0x63, 0x20, 0x20, 0x30,
+0x20, 0x63, 0x32, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x30, 0x20, 0x20, 0x30,
+0x20, 0x61, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20,
+0x30, 0x20, 0x61, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x61, 0x20, 0x20,
+0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x33, 0x20, 0x20,
+0x30, 0x20, 0x33, 0x63, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x63, 0x20, 0x20,
+0x30, 0x20, 0x39, 0x39, 0x20, 0x20, 0x30, 0x20, 0x62, 0x36, 0x20, 0x0a,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x36, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x34, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x63, 0x20,
+0x20, 0x30, 0x20, 0x61, 0x38, 0x20, 0x31, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x61, 0x20,
+0x0a, 0x20, 0x30, 0x20, 0x63, 0x35, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x30,
+0x20, 0x20, 0x30, 0x20, 0x35, 0x63, 0x20, 0x20, 0x30, 0x20, 0x64, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x62, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x62,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x64,
+0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x64, 0x20, 0x20, 0x30, 0x20, 0x32, 0x64, 0x20, 0x20, 0x63, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x32, 0x20, 0x20, 0x30, 0x20, 0x64,
+0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x64, 0x20, 0x20, 0x30, 0x20, 0x63,
+0x36, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x36, 0x63, 0x20, 0x20, 0x30, 0x20, 0x61, 0x39, 0x20, 0x20, 0x36, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x61, 0x20, 0x20, 0x30, 0x20,
+0x62, 0x38, 0x20, 0x20, 0x30, 0x20, 0x64, 0x34, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x38, 0x62, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x64, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x37, 0x20, 0x20, 0x30,
+0x20, 0x37, 0x63, 0x20, 0x34, 0x34, 0x20, 0x20, 0x31, 0x20, 0x32, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x35, 0x20, 0x20, 0x30,
+0x20, 0x35, 0x64, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x30, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x31, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x32, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x61, 0x20, 0x20,
+0x30, 0x20, 0x32, 0x65, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x62, 0x39, 0x20, 0x20, 0x30, 0x20, 0x39, 0x62, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x33, 0x20,
+0x20, 0x30, 0x20, 0x64, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x64, 0x20,
+0x20, 0x30, 0x20, 0x33, 0x65, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x63, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x63,
+0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x65, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x65,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x37,
+0x20, 0x20, 0x30, 0x20, 0x37, 0x64, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65,
+0x35, 0x20, 0x20, 0x30, 0x20, 0x62, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x62, 0x20, 0x20, 0x30, 0x20, 0x35,
+0x65, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63,
+0x39, 0x20, 0x20, 0x30, 0x20, 0x39, 0x63, 0x20, 0x0a, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x31, 0x66, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x66, 0x30, 0x20, 0x20, 0x30, 0x20, 0x36, 0x65, 0x20, 0x20, 0x30, 0x20,
+0x66, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x32, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x36, 0x20, 0x0a, 0x32, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x38, 0x20, 0x20, 0x30,
+0x20, 0x66, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x33, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x34, 0x20, 0x20, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20,
+0x30, 0x20, 0x34, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x38, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x39, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x62, 0x20, 0x20,
+0x30, 0x20, 0x63, 0x61, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x61, 0x63, 0x20, 0x20, 0x30, 0x20, 0x65, 0x37, 0x20, 0x0a,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x65, 0x20,
+0x20, 0x30, 0x20, 0x66, 0x35, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x39, 0x64, 0x20, 0x20, 0x30, 0x20, 0x35, 0x66, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x38, 0x20,
+0x20, 0x30, 0x20, 0x38, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x0a, 0x20, 0x30, 0x20, 0x66, 0x36, 0x20, 0x20, 0x30, 0x20, 0x63, 0x62,
+0x20, 0x32, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x66, 0x20, 0x20, 0x30, 0x20, 0x61, 0x65,
+0x20, 0x20, 0x30, 0x20, 0x36, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x0a, 0x20, 0x30, 0x20, 0x62, 0x63, 0x20, 0x20, 0x30, 0x20, 0x64,
+0x61, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x64, 0x20, 0x20, 0x30, 0x20, 0x66,
+0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37,
+0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x39, 0x20, 0x20, 0x38, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x39, 0x65, 0x20, 0x20, 0x30, 0x20,
+0x63, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x66, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x66, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x64, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x64, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x61, 0x20, 0x20, 0x30, 0x20,
+0x66, 0x39, 0x20, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x66, 0x20, 0x20, 0x30,
+0x20, 0x64, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x63, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x62, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x62, 0x65, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x66, 0x61, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x66, 0x20, 0x20,
+0x30, 0x20, 0x64, 0x64, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x63, 0x20, 0x20,
+0x30, 0x20, 0x63, 0x65, 0x20, 0x20, 0x30, 0x20, 0x66, 0x62, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x62, 0x66, 0x20, 0x20, 0x30, 0x20, 0x65, 0x64, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x65, 0x20,
+0x20, 0x30, 0x20, 0x66, 0x63, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x63, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x64, 0x20,
+0x20, 0x30, 0x20, 0x65, 0x65, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x66,
+0x20, 0x20, 0x30, 0x20, 0x66, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x65, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x66,
+0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x36,
+0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20,
+0x31, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x31, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20,
+0x32, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x0a, 0x20, 0x30, 0x20, 0x32, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x32, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x32, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x33, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33,
+0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x33, 0x20, 0x20, 0x61, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x34, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x36, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x31, 0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x33, 0x33, 0x20, 0x20, 0x30, 0x20, 0x34, 0x32, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x32, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20, 0x30,
+0x20, 0x33, 0x34, 0x20, 0x38, 0x61, 0x20, 0x20, 0x31, 0x20, 0x32, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30,
+0x20, 0x31, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x35, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x35, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20, 0x20,
+0x30, 0x20, 0x35, 0x33, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x30, 0x20,
+0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x36, 0x20,
+0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x32, 0x36, 0x20, 0x20, 0x30, 0x20, 0x35, 0x34, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x35,
+0x20, 0x20, 0x30, 0x20, 0x36, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x36,
+0x20, 0x20, 0x30, 0x20, 0x37, 0x30, 0x20, 0x20, 0x30, 0x20, 0x37, 0x31,
+0x20, 0x32, 0x38, 0x20, 0x20, 0x31, 0x20, 0x31, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x37, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x35, 0x20, 0x20, 0x30, 0x20, 0x36,
+0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x37, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x0a, 0x20, 0x30, 0x20,
+0x36, 0x35, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20, 0x20, 0x61, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x38, 0x30, 0x20, 0x20, 0x30, 0x20, 0x38, 0x31, 0x20, 0x0a, 0x20, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x31, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x37, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x37, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x32, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x38, 0x20, 0x20, 0x30,
+0x20, 0x36, 0x36, 0x20, 0x31, 0x38, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20,
+0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x38, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x38, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x35, 0x20, 0x20,
+0x30, 0x20, 0x38, 0x34, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x38, 0x20, 0x0a,
+0x20, 0x30, 0x20, 0x39, 0x30, 0x20, 0x20, 0x30, 0x20, 0x39, 0x31, 0x20,
+0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x31, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x20, 0x39, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x32, 0x20,
+0x20, 0x30, 0x20, 0x32, 0x39, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20,
+0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x35,
+0x20, 0x20, 0x30, 0x20, 0x35, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x39, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x39,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x61, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x61,
+0x20, 0x0a, 0x20, 0x30, 0x20, 0x31, 0x61, 0x20, 0x20, 0x38, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61,
+0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36,
+0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35,
+0x37, 0x20, 0x20, 0x30, 0x20, 0x34, 0x39, 0x20, 0x20, 0x36, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39,
+0x34, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x37, 0x37, 0x20, 0x20, 0x30, 0x20, 0x38, 0x36, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x38, 0x20, 0x20, 0x30, 0x20,
+0x39, 0x35, 0x20, 0x64, 0x63, 0x20, 0x20, 0x31, 0x20, 0x37, 0x65, 0x20,
+0x20, 0x31, 0x20, 0x33, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x61, 0x20,
+0x20, 0x31, 0x20, 0x0a, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x32, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x35, 0x39, 0x20, 0x20, 0x30, 0x20, 0x33, 0x61, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x33, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x37, 0x20, 0x20, 0x30,
+0x20, 0x37, 0x38, 0x20, 0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x61, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x61, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x36, 0x20, 0x20,
+0x30, 0x20, 0x36, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x30, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x62, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x62, 0x31, 0x20,
+0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x62, 0x20,
+0x20, 0x30, 0x20, 0x62, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x32, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x61, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x61, 0x20,
+0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x62, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x61, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36, 0x61,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x62, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x62,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x63,
+0x20, 0x20, 0x30, 0x20, 0x63, 0x31, 0x20, 0x0a, 0x31, 0x65, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x35, 0x20, 0x20, 0x30, 0x20, 0x63,
+0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x63, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61,
+0x37, 0x20, 0x20, 0x30, 0x20, 0x63, 0x33, 0x20, 0x0a, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x62, 0x20, 0x20, 0x30, 0x20,
+0x63, 0x34, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x64, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x38, 0x38, 0x20, 0x20, 0x30, 0x20, 0x39, 0x37, 0x20, 0x20, 0x30, 0x20,
+0x33, 0x62, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x64, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x32, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x33, 0x20, 0x31, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x31, 0x65, 0x20, 0x20, 0x30, 0x20, 0x32, 0x65, 0x20, 0x0a, 0x20,
+0x30, 0x20, 0x65, 0x32, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x37, 0x39, 0x20, 0x20, 0x30, 0x20, 0x39, 0x38, 0x20, 0x20,
+0x30, 0x20, 0x63, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x31, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x38, 0x39, 0x20, 0x20, 0x30, 0x20, 0x35, 0x62, 0x20, 0x0a,
+0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x63, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x61, 0x20,
+0x20, 0x30, 0x20, 0x62, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x63, 0x20,
+0x20, 0x30, 0x20, 0x39, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x0a, 0x20, 0x30, 0x20, 0x61, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x61,
+0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x63, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x63,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x33, 0x64, 0x20, 0x20, 0x30, 0x20, 0x63, 0x36,
+0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36,
+0x63, 0x20, 0x20, 0x30, 0x20, 0x39, 0x61, 0x20, 0x35, 0x38, 0x20, 0x20,
+0x31, 0x20, 0x35, 0x36, 0x20, 0x20, 0x31, 0x20, 0x32, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x62, 0x20, 0x20, 0x30, 0x20, 0x34,
+0x64, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x63, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x63, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x64, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x64, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x30, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x65, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x65, 0x33, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x64, 0x30, 0x20, 0x20, 0x30, 0x20, 0x62, 0x37, 0x20, 0x20, 0x30,
+0x20, 0x37, 0x62, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x61, 0x39, 0x20, 0x20, 0x30, 0x20, 0x62, 0x38, 0x20, 0x20, 0x30,
+0x20, 0x64, 0x34, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x65, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x61, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x39, 0x20, 0x31,
+0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x62, 0x20, 0x20,
+0x30, 0x20, 0x64, 0x36, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x64, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x65, 0x20,
+0x20, 0x30, 0x20, 0x63, 0x38, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x38, 0x63, 0x20, 0x20, 0x30, 0x20, 0x65, 0x34, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x64, 0x37,
+0x20, 0x20, 0x30, 0x20, 0x65, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x62, 0x61, 0x20, 0x20, 0x30, 0x20, 0x61, 0x62,
+0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x63,
+0x20, 0x20, 0x30, 0x20, 0x65, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36,
+0x65, 0x20, 0x20, 0x30, 0x20, 0x64, 0x38, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x64, 0x20, 0x20, 0x30, 0x20, 0x62,
+0x62, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65,
+0x37, 0x20, 0x20, 0x30, 0x20, 0x39, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20,
+0x38, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x62, 0x20, 0x20, 0x30, 0x20,
+0x62, 0x63, 0x20, 0x20, 0x30, 0x20, 0x39, 0x65, 0x20, 0x20, 0x30, 0x20,
+0x66, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x31, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x66, 0x20, 0x20, 0x30, 0x20, 0x32, 0x66, 0x20, 0x0a, 0x34, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x33, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x32, 0x20, 0x33, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x33, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x64, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x65, 0x20, 0x0a, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x64, 0x20, 0x20,
+0x30, 0x20, 0x63, 0x39, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x61, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x63, 0x20, 0x20,
+0x30, 0x20, 0x37, 0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x61, 0x20, 0x0a,
+0x20, 0x30, 0x20, 0x61, 0x64, 0x20, 0x20, 0x30, 0x20, 0x63, 0x63, 0x20,
+0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x65, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x62, 0x20,
+0x20, 0x30, 0x20, 0x64, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x63, 0x64, 0x20, 0x20, 0x30, 0x20, 0x62, 0x65, 0x20,
+0x0a, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x62,
+0x20, 0x20, 0x30, 0x20, 0x65, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x65,
+0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x39,
+0x20, 0x20, 0x30, 0x20, 0x65, 0x61, 0x20, 0x20, 0x30, 0x20, 0x65, 0x39,
+0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64,
+0x65, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65,
+0x63, 0x20, 0x20, 0x30, 0x20, 0x63, 0x65, 0x20, 0x20, 0x30, 0x20, 0x33,
+0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x30, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66,
+0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x66, 0x34, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x66, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x35, 0x20, 0x20, 0x30, 0x20,
+0x35, 0x66, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x66, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x66, 0x36, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x66, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x37, 0x20, 0x20, 0x30,
+0x20, 0x37, 0x66, 0x20, 0x20, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x38, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x66, 0x38, 0x20, 0x20, 0x30, 0x20, 0x66, 0x39, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x39, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x61, 0x20, 0x20,
+0x30, 0x20, 0x61, 0x66, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x66, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x66, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x63, 0x20, 0x20,
+0x30, 0x20, 0x63, 0x66, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x64, 0x20,
+0x20, 0x30, 0x20, 0x64, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x66, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x66, 0x20,
+0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31, 0x37, 0x20,
+0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x32,
+0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20,
+0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x31,
+0x38, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20,
+0x20, 0x33, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
+0x65, 0x20, 0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65,
+0x20, 0x31, 0x39, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31,
+0x36, 0x20, 0x20, 0x34, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65,
+0x6e, 0x63, 0x65, 0x20, 0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62,
+0x6c, 0x65, 0x20, 0x32, 0x30, 0x20, 0x35, 0x31, 0x31, 0x20, 0x31, 0x36,
+0x20, 0x31, 0x36, 0x20, 0x20, 0x36, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65,
+0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74,
+0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x31, 0x20, 0x35, 0x31, 0x31, 0x20,
+0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x38, 0x0a, 0x2e, 0x72, 0x65,
+0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x31, 0x36, 0x0a, 0x0a,
+0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x32, 0x20, 0x35, 0x31,
+0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x31, 0x30, 0x0a, 0x2e,
+0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x31, 0x36,
+0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x33, 0x20,
+0x35, 0x31, 0x31, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x31, 0x33,
+0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20,
+0x31, 0x36, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32,
+0x34, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20,
+0x20, 0x34, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61,
+0x0a, 0x33, 0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x30,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x31, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31,
+0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33,
+0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x65, 0x20, 0x20,
+0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x31, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x32, 0x20, 0x20, 0x30, 0x20,
+0x32, 0x33, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x30, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x34, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x31, 0x20, 0x20, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x34, 0x20, 0x20, 0x30,
+0x20, 0x33, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x34, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x34, 0x20, 0x20, 0x36,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x34, 0x33, 0x20, 0x20,
+0x30, 0x20, 0x33, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x31, 0x20, 0x20,
+0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x30, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x31, 0x35, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x32, 0x20, 0x20,
+0x30, 0x20, 0x32, 0x35, 0x20, 0x0a, 0x66, 0x61, 0x20, 0x20, 0x31, 0x20,
+0x36, 0x32, 0x20, 0x20, 0x31, 0x20, 0x32, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x31, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x34, 0x20, 0x20, 0x30, 0x20, 0x35, 0x33, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x35, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x36, 0x30,
+0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x36, 0x32, 0x20, 0x20, 0x30, 0x20, 0x32, 0x36,
+0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x35,
+0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33,
+0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35,
+0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36,
+0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x36, 0x20, 0x0a, 0x32, 0x30, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x65, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x37, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x32, 0x37, 0x20, 0x20, 0x30, 0x20, 0x33, 0x37, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x33, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30,
+0x20, 0x37, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x20, 0x30,
+0x20, 0x31, 0x37, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x36, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x36, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x38, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x38, 0x20, 0x0a, 0x20,
+0x30, 0x20, 0x38, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x34, 0x20, 0x20,
+0x30, 0x20, 0x34, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x31, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x32, 0x20, 0x31,
+0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a,
+0x20, 0x30, 0x20, 0x32, 0x38, 0x20, 0x20, 0x30, 0x20, 0x36, 0x36, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x33, 0x20,
+0x20, 0x30, 0x20, 0x33, 0x38, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x35, 0x20,
+0x20, 0x30, 0x20, 0x35, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x38, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x38, 0x20,
+0x0a, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x31, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x39, 0x32, 0x20, 0x20, 0x30, 0x20, 0x37, 0x36,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x36, 0x37, 0x20, 0x20, 0x30, 0x20, 0x32, 0x39,
+0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38,
+0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x38, 0x20, 0x35, 0x63, 0x20, 0x20,
+0x31, 0x20, 0x32, 0x32, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39,
+0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x39, 0x34, 0x20, 0x20, 0x30, 0x20,
+0x34, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x37, 0x20, 0x20, 0x30, 0x20,
+0x38, 0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x36, 0x38, 0x20, 0x20, 0x30, 0x20, 0x61, 0x31, 0x20, 0x20, 0x38, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x61, 0x32, 0x20, 0x20, 0x30,
+0x20, 0x32, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x39, 0x35, 0x20, 0x20, 0x30, 0x20, 0x35, 0x39, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x61, 0x33, 0x20, 0x20, 0x30, 0x20, 0x33, 0x61, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x37, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x37, 0x38, 0x20, 0x20,
+0x30, 0x20, 0x34, 0x61, 0x20, 0x31, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x63, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x34, 0x20, 0x20,
+0x30, 0x20, 0x39, 0x36, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x36, 0x39, 0x20, 0x20,
+0x30, 0x20, 0x62, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x31, 0x62, 0x20, 0x20, 0x30, 0x20, 0x61, 0x35, 0x20,
+0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x62, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x35, 0x61, 0x20, 0x20, 0x30, 0x20, 0x32, 0x62, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x38, 0x20,
+0x20, 0x30, 0x20, 0x62, 0x33, 0x20, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x30,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x39,
+0x20, 0x20, 0x30, 0x20, 0x61, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x39, 0x37, 0x20, 0x20, 0x30, 0x20, 0x37, 0x39,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x36, 0x20, 0x20, 0x30, 0x20, 0x36,
+0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x34, 0x20, 0x20, 0x63, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62,
+0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20,
+0x33, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x30, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x34, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x38, 0x20, 0x20, 0x30, 0x20,
+0x38, 0x39, 0x20, 0x34, 0x33, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x32, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x63, 0x20, 0x20, 0x30,
+0x20, 0x62, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x35, 0x62, 0x20, 0x20, 0x30, 0x20, 0x63, 0x32, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20,
+0x30, 0x20, 0x32, 0x63, 0x20, 0x20, 0x30, 0x20, 0x61, 0x37, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x61, 0x20, 0x20,
+0x30, 0x20, 0x63, 0x33, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x36, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x33, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x63, 0x20, 0x20, 0x30, 0x20, 0x64, 0x30, 0x20, 0x0a,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x36, 0x20,
+0x20, 0x30, 0x20, 0x36, 0x62, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63, 0x34, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x39, 0x39, 0x20, 0x20, 0x30, 0x20, 0x61, 0x38, 0x20,
+0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20,
+0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x38, 0x61, 0x20, 0x20, 0x30, 0x20, 0x63, 0x35,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x35, 0x63,
+0x20, 0x20, 0x30, 0x20, 0x64, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x37,
+0x20, 0x20, 0x30, 0x20, 0x37, 0x62, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x0a, 0x20, 0x30, 0x20, 0x31, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64,
+0x32, 0x20, 0x20, 0x39, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32,
+0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x33, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x64, 0x20, 0x20, 0x30, 0x20, 0x63,
+0x36, 0x20, 0x35, 0x35, 0x20, 0x66, 0x61, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x36, 0x63, 0x20, 0x20, 0x30, 0x20, 0x61, 0x39, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x61, 0x20, 0x20, 0x30, 0x20,
+0x64, 0x34, 0x20, 0x32, 0x30, 0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x62, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x38, 0x62, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x34, 0x64, 0x20, 0x20, 0x30,
+0x20, 0x63, 0x37, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x37, 0x63, 0x20, 0x20, 0x30,
+0x20, 0x64, 0x35, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x35, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x31, 0x20, 0x20, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x31, 0x65, 0x20, 0x20,
+0x30, 0x20, 0x65, 0x32, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x61, 0x61, 0x20, 0x20, 0x30, 0x20, 0x62, 0x39, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x39, 0x62, 0x20, 0x20, 0x30, 0x20, 0x65, 0x33, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x64, 0x36, 0x20,
+0x20, 0x30, 0x20, 0x36, 0x64, 0x20, 0x31, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x36, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x33, 0x65, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x32, 0x65, 0x20,
+0x20, 0x30, 0x20, 0x34, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x63, 0x38, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x38, 0x63,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x65, 0x34, 0x20, 0x20, 0x30, 0x20, 0x64, 0x37,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x37, 0x64, 0x20, 0x20, 0x30, 0x20, 0x61, 0x62,
+0x20, 0x20, 0x30, 0x20, 0x65, 0x35, 0x20, 0x20, 0x61, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x61, 0x20, 0x20, 0x30, 0x20, 0x35,
+0x65, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63,
+0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39,
+0x63, 0x20, 0x20, 0x30, 0x20, 0x36, 0x65, 0x20, 0x20, 0x38, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65,
+0x36, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20,
+0x20, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x65, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x65, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x64, 0x38, 0x20, 0x20, 0x30, 0x20, 0x38, 0x64, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x62, 0x62, 0x20, 0x20, 0x30, 0x20,
+0x63, 0x61, 0x20, 0x34, 0x61, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x66, 0x20, 0x34, 0x30,
+0x20, 0x20, 0x31, 0x20, 0x33, 0x61, 0x20, 0x20, 0x31, 0x20, 0x32, 0x30,
+0x20, 0x20, 0x31, 0x20, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x63, 0x20, 0x20, 0x30,
+0x20, 0x65, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20,
+0x30, 0x20, 0x37, 0x65, 0x20, 0x20, 0x30, 0x20, 0x64, 0x39, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x39, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65, 0x38, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x38, 0x65, 0x20, 0x20,
+0x30, 0x20, 0x63, 0x62, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x0a,
+0x20, 0x30, 0x20, 0x62, 0x63, 0x20, 0x20, 0x30, 0x20, 0x64, 0x61, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x61, 0x64, 0x20,
+0x20, 0x30, 0x20, 0x65, 0x39, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x65, 0x20,
+0x20, 0x30, 0x20, 0x63, 0x63, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x64, 0x62, 0x20, 0x20, 0x30, 0x20, 0x62, 0x64, 0x20,
+0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x65, 0x61, 0x20, 0x20, 0x30, 0x20, 0x61, 0x65,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x63,
+0x20, 0x20, 0x30, 0x20, 0x63, 0x64, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x65, 0x62,
+0x20, 0x0a, 0x20, 0x30, 0x20, 0x62, 0x65, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x64, 0x20, 0x20, 0x30, 0x20, 0x65,
+0x63, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x63,
+0x65, 0x20, 0x20, 0x30, 0x20, 0x65, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x64, 0x65, 0x20, 0x20, 0x30, 0x20, 0x65,
+0x65, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x66, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x66, 0x30, 0x20, 0x20, 0x30, 0x20, 0x31, 0x66, 0x20, 0x20, 0x30, 0x20,
+0x66, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x32, 0x20, 0x20, 0x30, 0x20,
+0x32, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x66, 0x33, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x33, 0x66, 0x20, 0x31, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x66, 0x34, 0x20, 0x20, 0x30, 0x20, 0x34, 0x66, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x35, 0x20, 0x20, 0x30,
+0x20, 0x35, 0x66, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x66, 0x36, 0x20, 0x20,
+0x30, 0x20, 0x36, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x66, 0x37, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x37, 0x66, 0x20, 0x20, 0x30, 0x20, 0x38, 0x66, 0x20, 0x20,
+0x61, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x38, 0x20, 0x20,
+0x30, 0x20, 0x66, 0x39, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x39, 0x66, 0x20,
+0x20, 0x30, 0x20, 0x61, 0x66, 0x20, 0x20, 0x30, 0x20, 0x66, 0x61, 0x20,
+0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x62, 0x20,
+0x20, 0x30, 0x20, 0x62, 0x66, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20,
+0x20, 0x30, 0x20, 0x66, 0x63, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x63, 0x66,
+0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31,
+0x20, 0x20, 0x30, 0x20, 0x66, 0x64, 0x20, 0x20, 0x30, 0x20, 0x64, 0x66,
+0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x66, 0x65,
+0x20, 0x20, 0x30, 0x20, 0x65, 0x66, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61,
+0x62, 0x6c, 0x65, 0x20, 0x32, 0x35, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31,
+0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x35, 0x0a, 0x2e, 0x72, 0x65, 0x66,
+0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e,
+0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x36, 0x20, 0x35, 0x31, 0x32,
+0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x36, 0x0a, 0x2e, 0x72,
+0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x32, 0x34, 0x0a,
+0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x37, 0x20, 0x35,
+0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20, 0x37, 0x0a,
+0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x32,
+0x34, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x32, 0x38,
+0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36, 0x20, 0x20,
+0x38, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65,
+0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20,
+0x32, 0x39, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20, 0x31, 0x36,
+0x20, 0x20, 0x39, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e,
+0x63, 0x65, 0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c,
+0x65, 0x20, 0x33, 0x30, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31, 0x36, 0x20,
+0x31, 0x36, 0x20, 0x31, 0x31, 0x0a, 0x2e, 0x72, 0x65, 0x66, 0x65, 0x72,
+0x65, 0x6e, 0x63, 0x65, 0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e, 0x74, 0x61,
+0x62, 0x6c, 0x65, 0x20, 0x33, 0x31, 0x20, 0x35, 0x31, 0x32, 0x20, 0x31,
+0x36, 0x20, 0x31, 0x36, 0x20, 0x31, 0x33, 0x0a, 0x2e, 0x72, 0x65, 0x66,
+0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x32, 0x34, 0x0a, 0x0a, 0x2e,
+0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x33, 0x32, 0x20, 0x20, 0x33, 0x31,
+0x20, 0x20, 0x31, 0x20, 0x31, 0x36, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74,
+0x72, 0x65, 0x65, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x38, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x34, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x32, 0x20, 0x20, 0x38, 0x20, 0x20,
+0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x0a, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x63, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x61, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x33, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x36, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x39, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x35, 0x20, 0x20, 0x30, 0x20, 0x20, 0x37, 0x20, 0x0a, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x65, 0x20, 0x20, 0x30, 0x20, 0x20, 0x64, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x66, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x62, 0x20, 0x0a, 0x0a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65,
+0x20, 0x33, 0x33, 0x20, 0x20, 0x33, 0x31, 0x20, 0x20, 0x31, 0x20, 0x31,
+0x36, 0x20, 0x20, 0x30, 0x0a, 0x2e, 0x74, 0x72, 0x65, 0x65, 0x64, 0x61,
+0x74, 0x61, 0x0a, 0x31, 0x30, 0x20, 0x20, 0x31, 0x20, 0x20, 0x38, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x32, 0x20, 0x20, 0x30, 0x20, 0x20, 0x33, 0x20, 0x20, 0x34, 0x20,
+0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20,
+0x20, 0x34, 0x20, 0x0a, 0x20, 0x30, 0x20, 0x20, 0x35, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x36, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x37, 0x20, 0x20, 0x38, 0x20, 0x20, 0x31, 0x20, 0x20, 0x34,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x38, 0x20, 0x20, 0x30, 0x20, 0x20, 0x39, 0x20, 0x20, 0x32,
+0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x61, 0x20, 0x20, 0x30,
+0x20, 0x20, 0x62, 0x20, 0x0a, 0x20, 0x34, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x32, 0x20, 0x20, 0x31, 0x20, 0x20, 0x30, 0x20, 0x20, 0x63, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x64, 0x20, 0x20, 0x32, 0x20, 0x20, 0x31, 0x20, 0x20,
+0x30, 0x20, 0x20, 0x65, 0x20, 0x20, 0x30, 0x20, 0x20, 0x66, 0x20, 0x0a,
+0x0a, 0x2e, 0x65, 0x6e, 0x64, 0x0a
+};
diff --git a/liveMedia/MP3StreamState.cpp b/liveMedia/MP3StreamState.cpp
new file mode 100644
index 0000000..01bf52f
--- /dev/null
+++ b/liveMedia/MP3StreamState.cpp
@@ -0,0 +1,438 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class encapsulating the state of a MP3 stream
+// Implementation
+
+#include "MP3StreamState.hh"
+#include "InputFile.hh"
+#include "GroupsockHelper.hh"
+
+#if defined(__WIN32__) || defined(_WIN32)
+#define snprintf _snprintf
+#if _MSC_VER >= 1400 // 1400 == vs2005
+#define fileno _fileno
+#endif
+#endif
+
+#define MILLION 1000000
+
+MP3StreamState::MP3StreamState(UsageEnvironment& env)
+ : fEnv(env), fFid(NULL), fPresentationTimeScale(1) {
+}
+
+MP3StreamState::~MP3StreamState() {
+ // Close our open file or socket:
+ if (fFid != NULL && fFid != stdin) {
+ if (fFidIsReallyASocket) {
+ intptr_t fid_long = (intptr_t)fFid;
+ closeSocket((int)fid_long);
+ } else {
+ CloseInputFile(fFid);
+ }
+ }
+}
+
+void MP3StreamState::assignStream(FILE* fid, unsigned fileSize) {
+ fFid = fid;
+
+ if (fileSize == (unsigned)(-1)) { /*HACK#####*/
+ fFidIsReallyASocket = 1;
+ fFileSize = 0;
+ } else {
+ fFidIsReallyASocket = 0;
+ fFileSize = fileSize;
+ }
+ fNumFramesInFile = 0; // until we know otherwise
+ fIsVBR = fHasXingTOC = False; // ditto
+
+ // Set the first frame's 'presentation time' to the current wall time:
+ gettimeofday(&fNextFramePresentationTime, NULL);
+}
+
+struct timeval MP3StreamState::currentFramePlayTime() const {
+ unsigned const numSamples = 1152;
+ unsigned const freq = fr().samplingFreq*(1 + fr().isMPEG2);
+
+ // result is numSamples/freq
+ unsigned const uSeconds
+ = ((numSamples*2*MILLION)/freq + 1)/2; // rounds to nearest integer
+
+ struct timeval result;
+ result.tv_sec = uSeconds/MILLION;
+ result.tv_usec = uSeconds%MILLION;
+ return result;
+}
+
+float MP3StreamState::filePlayTime() const {
+ unsigned numFramesInFile = fNumFramesInFile;
+ if (numFramesInFile == 0) {
+ // Estimate the number of frames from the file size, and the
+ // size of the current frame:
+ numFramesInFile = fFileSize/(4 + fCurrentFrame.frameSize);
+ }
+
+ struct timeval const pt = currentFramePlayTime();
+ return numFramesInFile*(pt.tv_sec + pt.tv_usec/(float)MILLION);
+}
+
+unsigned MP3StreamState::getByteNumberFromPositionFraction(float fraction) {
+ if (fHasXingTOC) {
+ // The file is VBR, with a Xing TOC; use it to determine which byte to seek to:
+ float percent = fraction*100.0f;
+ unsigned a = (unsigned)percent;
+ if (a > 99) a = 99;
+
+ unsigned fa = fXingTOC[a];
+ unsigned fb;
+ if (a < 99) {
+ fb = fXingTOC[a+1];
+ } else {
+ fb = 256;
+ }
+ fraction = (fa + (fb-fa)*(percent-a))/256.0f;
+ }
+
+ return (unsigned)(fraction*fFileSize);
+}
+
+void MP3StreamState::seekWithinFile(unsigned seekByteNumber) {
+ if (fFidIsReallyASocket) return; // it's not seekable
+
+ SeekFile64(fFid, seekByteNumber, SEEK_SET);
+}
+
+unsigned MP3StreamState::findNextHeader(struct timeval& presentationTime) {
+ presentationTime = fNextFramePresentationTime;
+
+ if (!findNextFrame()) return 0;
+
+ // From this frame, figure out the *next* frame's presentation time:
+ struct timeval framePlayTime = currentFramePlayTime();
+ if (fPresentationTimeScale > 1) {
+ // Scale this value
+ unsigned secondsRem = framePlayTime.tv_sec % fPresentationTimeScale;
+ framePlayTime.tv_sec -= secondsRem;
+ framePlayTime.tv_usec += secondsRem*MILLION;
+ framePlayTime.tv_sec /= fPresentationTimeScale;
+ framePlayTime.tv_usec /= fPresentationTimeScale;
+ }
+ fNextFramePresentationTime.tv_usec += framePlayTime.tv_usec;
+ fNextFramePresentationTime.tv_sec
+ += framePlayTime.tv_sec + fNextFramePresentationTime.tv_usec/MILLION;
+ fNextFramePresentationTime.tv_usec %= MILLION;
+
+ return fr().hdr;
+}
+
+Boolean MP3StreamState::readFrame(unsigned char* outBuf, unsigned outBufSize,
+ unsigned& resultFrameSize,
+ unsigned& resultDurationInMicroseconds) {
+ /* We assume that "mp3FindNextHeader()" has already been called */
+
+ resultFrameSize = 4 + fr().frameSize;
+
+ if (outBufSize < resultFrameSize) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "Insufficient buffer size for reading input frame (%d, need %d)\n",
+ outBufSize, resultFrameSize);
+#endif
+ if (outBufSize < 4) outBufSize = 0;
+ resultFrameSize = outBufSize;
+
+ return False;
+ }
+
+ if (resultFrameSize >= 4) {
+ unsigned& hdr = fr().hdr;
+ *outBuf++ = (unsigned char)(hdr>>24);
+ *outBuf++ = (unsigned char)(hdr>>16);
+ *outBuf++ = (unsigned char)(hdr>>8);
+ *outBuf++ = (unsigned char)(hdr);
+
+ memmove(outBuf, fr().frameBytes, resultFrameSize-4);
+ }
+
+ struct timeval const pt = currentFramePlayTime();
+ resultDurationInMicroseconds = pt.tv_sec*MILLION + pt.tv_usec;
+
+ return True;
+}
+
+void MP3StreamState::getAttributes(char* buffer, unsigned bufferSize) const {
+ char const* formatStr
+ = "bandwidth %d MPEGnumber %d MPEGlayer %d samplingFrequency %d isStereo %d playTime %d isVBR %d";
+ unsigned fpt = (unsigned)(filePlayTime() + 0.5); // rounds to nearest integer
+#if defined(IRIX) || defined(ALPHA) || defined(_QNX4) || defined(IMN_PIM) || defined(CRIS)
+ /* snprintf() isn't defined, so just use sprintf() - ugh! */
+ sprintf(buffer, formatStr,
+ fr().bitrate, fr().isMPEG2 ? 2 : 1, fr().layer, fr().samplingFreq, fr().isStereo,
+ fpt, fIsVBR);
+#else
+ snprintf(buffer, bufferSize, formatStr,
+ fr().bitrate, fr().isMPEG2 ? 2 : 1, fr().layer, fr().samplingFreq, fr().isStereo,
+ fpt, fIsVBR);
+#endif
+}
+
+// This is crufty old code that needs to be cleaned up #####
+#define HDRCMPMASK 0xfffffd00
+
+Boolean MP3StreamState::findNextFrame() {
+ unsigned char hbuf[8];
+ unsigned l; int i;
+ int attempt = 0;
+
+ read_again:
+ if (readFromStream(hbuf, 4) != 4) return False;
+
+ fr().hdr = ((unsigned long) hbuf[0] << 24)
+ | ((unsigned long) hbuf[1] << 16)
+ | ((unsigned long) hbuf[2] << 8)
+ | (unsigned long) hbuf[3];
+
+#ifdef DEBUG_PARSE
+ fprintf(stderr, "fr().hdr: 0x%08x\n", fr().hdr);
+#endif
+ if (fr().oldHdr != fr().hdr || !fr().oldHdr) {
+ i = 0;
+ init_resync:
+#ifdef DEBUG_PARSE
+ fprintf(stderr, "init_resync: fr().hdr: 0x%08x\n", fr().hdr);
+#endif
+ if ( (fr().hdr & 0xffe00000) != 0xffe00000
+ || (fr().hdr & 0x00060000) == 0 // undefined 'layer' field
+ || (fr().hdr & 0x0000F000) == 0 // 'free format' bitrate index
+ || (fr().hdr & 0x0000F000) == 0x0000F000 // undefined bitrate index
+ || (fr().hdr & 0x00000C00) == 0x00000C00 // undefined frequency index
+ || (fr().hdr & 0x00000003) != 0x00000000 // 'emphasis' field unexpectedly set
+ ) {
+ /* RSF: Do the following test even if we're not at the
+ start of the file, in case we have two or more
+ separate MP3 files cat'ed together:
+ */
+ /* Check for RIFF hdr */
+ if (fr().hdr == ('R'<<24)+('I'<<16)+('F'<<8)+'F') {
+ unsigned char buf[70 /*was: 40*/];
+#ifdef DEBUG_ERRORS
+ fprintf(stderr,"Skipped RIFF header\n");
+#endif
+ readFromStream(buf, 66); /* already read 4 */
+ goto read_again;
+ }
+ /* Check for ID3 hdr */
+ if ((fr().hdr&0xFFFFFF00) == ('I'<<24)+('D'<<16)+('3'<<8)) {
+ unsigned tagSize, bytesToSkip;
+ unsigned char buf[1000];
+ readFromStream(buf, 6); /* already read 4 */
+ tagSize = ((buf[2]&0x7F)<<21) + ((buf[3]&0x7F)<<14) + ((buf[4]&0x7F)<<7) + (buf[5]&0x7F);
+ bytesToSkip = tagSize;
+ while (bytesToSkip > 0) {
+ unsigned bytesToRead = sizeof buf;
+ if (bytesToRead > bytesToSkip) {
+ bytesToRead = bytesToSkip;
+ }
+ readFromStream(buf, bytesToRead);
+ bytesToSkip -= bytesToRead;
+ }
+#ifdef DEBUG_ERRORS
+ fprintf(stderr,"Skipped %d-byte ID3 header\n", tagSize);
+#endif
+ goto read_again;
+ }
+ /* give up after 20,000 bytes */
+ if (i++ < 20000/*4096*//*1024*/) {
+ memmove (&hbuf[0], &hbuf[1], 3);
+ if (readFromStream(hbuf+3,1) != 1) {
+ return False;
+ }
+ fr().hdr <<= 8;
+ fr().hdr |= hbuf[3];
+ fr().hdr &= 0xffffffff;
+#ifdef DEBUG_PARSE
+ fprintf(stderr, "calling init_resync %d\n", i);
+#endif
+ goto init_resync;
+ }
+#ifdef DEBUG_ERRORS
+ fprintf(stderr,"Giving up searching valid MPEG header\n");
+#endif
+ return False;
+
+#ifdef DEBUG_ERRORS
+ fprintf(stderr,"Illegal Audio-MPEG-Header 0x%08lx at offset 0x%lx.\n",
+ fr().hdr,tell_stream(str)-4);
+#endif
+ /* Read more bytes until we find something that looks
+ reasonably like a valid header. This is not a
+ perfect strategy, but it should get us back on the
+ track within a short time (and hopefully without
+ too much distortion in the audio output). */
+ do {
+ attempt++;
+ memmove (&hbuf[0], &hbuf[1], 7);
+ if (readFromStream(&hbuf[3],1) != 1) {
+ return False;
+ }
+
+ /* This is faster than combining fr().hdr from scratch */
+ fr().hdr = ((fr().hdr << 8) | hbuf[3]) & 0xffffffff;
+
+ if (!fr().oldHdr)
+ goto init_resync; /* "considered harmful", eh? */
+
+ } while ((fr().hdr & HDRCMPMASK) != (fr().oldHdr & HDRCMPMASK)
+ && (fr().hdr & HDRCMPMASK) != (fr().firstHdr & HDRCMPMASK));
+#ifdef DEBUG_ERRORS
+ fprintf (stderr, "Skipped %d bytes in input.\n", attempt);
+#endif
+ }
+ if (!fr().firstHdr) {
+ fr().firstHdr = fr().hdr;
+ }
+
+ fr().setParamsFromHeader();
+ fr().setBytePointer(fr().frameBytes, fr().frameSize);
+
+ fr().oldHdr = fr().hdr;
+
+ if (fr().isFreeFormat) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr,"Free format not supported.\n");
+#endif
+ return False;
+ }
+
+#ifdef MP3_ONLY
+ if (fr().layer != 3) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG layer %d is not supported!\n", fr().layer);
+#endif
+ return False;
+ }
+#endif
+ }
+
+ if ((l = readFromStream(fr().frameBytes, fr().frameSize))
+ != fr().frameSize) {
+ if (l == 0) return False;
+ memset(fr().frameBytes+1, 0, fr().frameSize-1);
+ }
+
+ return True;
+}
+
+static Boolean socketIsReadable(int socket) {
+ const unsigned numFds = socket+1;
+ fd_set rd_set;
+ FD_ZERO(&rd_set);
+ FD_SET((unsigned)socket, &rd_set);
+ struct timeval timeout;
+ timeout.tv_sec = timeout.tv_usec = 0;
+
+ int result = select(numFds, &rd_set, NULL, NULL, &timeout);
+ return result != 0; // not > 0, because windows can return -1 for file sockets
+}
+
+static char watchVariable;
+
+static void checkFunc(void* /*clientData*/) {
+ watchVariable = ~0;
+}
+
+static void waitUntilSocketIsReadable(UsageEnvironment& env, int socket) {
+ while (!socketIsReadable(socket)) {
+ // Delay a short period of time before checking again.
+ unsigned usecsToDelay = 1000; // 1 ms
+ env.taskScheduler().scheduleDelayedTask(usecsToDelay,
+ (TaskFunc*)checkFunc, (void*)NULL);
+ watchVariable = 0;
+ env.taskScheduler().doEventLoop(&watchVariable);
+ // This allows other tasks to run while we're waiting:
+ }
+}
+
+unsigned MP3StreamState::readFromStream(unsigned char* buf,
+ unsigned numChars) {
+ // Hack for doing socket I/O instead of file I/O (e.g., on Windows)
+ if (fFidIsReallyASocket) {
+ intptr_t fid_long = (intptr_t)fFid;
+ int sock = (int)fid_long;
+ unsigned totBytesRead = 0;
+ do {
+ waitUntilSocketIsReadable(fEnv, sock);
+ int bytesRead
+ = recv(sock, &((char*)buf)[totBytesRead], numChars-totBytesRead, 0);
+ if (bytesRead < 0) return 0;
+
+ totBytesRead += (unsigned)bytesRead;
+ } while (totBytesRead < numChars);
+
+ return totBytesRead;
+ } else {
+#ifndef _WIN32_WCE
+ waitUntilSocketIsReadable(fEnv, (int)fileno(fFid));
+#endif
+ return fread(buf, 1, numChars, fFid);
+ }
+}
+
+#define XING_FRAMES_FLAG 0x0001
+#define XING_BYTES_FLAG 0x0002
+#define XING_TOC_FLAG 0x0004
+#define XING_VBR_SCALE_FLAG 0x0008
+
+void MP3StreamState::checkForXingHeader() {
+ // Look for 'Xing' in the first 4 bytes after the 'side info':
+ if (fr().frameSize < fr().sideInfoSize) return;
+ unsigned bytesAvailable = fr().frameSize - fr().sideInfoSize;
+ unsigned char* p = &(fr().frameBytes[fr().sideInfoSize]);
+
+ if (bytesAvailable < 8) return;
+ if (p[0] != 'X' || p[1] != 'i' || p[2] != 'n' || p[3] != 'g') return;
+
+ // We found it.
+ fIsVBR = True;
+
+ u_int32_t flags = (p[4]<<24) | (p[5]<<16) | (p[6]<<8) | p[7];
+ unsigned i = 8;
+ bytesAvailable -= 8;
+
+ if (flags&XING_FRAMES_FLAG) {
+ // The next 4 bytes are the number of frames:
+ if (bytesAvailable < 4) return;
+ fNumFramesInFile = (p[i]<<24)|(p[i+1]<<16)|(p[i+2]<<8)|(p[i+3]);
+ i += 4; bytesAvailable -= 4;
+ }
+
+ if (flags&XING_BYTES_FLAG) {
+ // The next 4 bytes is the file size:
+ if (bytesAvailable < 4) return;
+ fFileSize = (p[i]<<24)|(p[i+1]<<16)|(p[i+2]<<8)|(p[i+3]);
+ i += 4; bytesAvailable -= 4;
+ }
+
+ if (flags&XING_TOC_FLAG) {
+ // Fill in the Xing 'table of contents':
+ if (bytesAvailable < XING_TOC_LENGTH) return;
+ fHasXingTOC = True;
+ for (unsigned j = 0; j < XING_TOC_LENGTH; ++j) {
+ fXingTOC[j] = p[i+j];
+ }
+ i += XING_TOC_FLAG; bytesAvailable -= XING_TOC_FLAG;
+ }
+}
diff --git a/liveMedia/MP3StreamState.hh b/liveMedia/MP3StreamState.hh
new file mode 100644
index 0000000..75f2b93
--- /dev/null
+++ b/liveMedia/MP3StreamState.hh
@@ -0,0 +1,90 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class encapsulating the state of a MP3 stream
+// C++ header
+
+#ifndef _MP3_STREAM_STATE_HH
+#define _MP3_STREAM_STATE_HH
+
+#ifndef _USAGE_ENVIRONMENT_HH
+#include "UsageEnvironment.hh"
+#endif
+#ifndef _BOOLEAN_HH
+#include "Boolean.hh"
+#endif
+#ifndef _MP3_INTERNALS_HH
+#include "MP3Internals.hh"
+#endif
+#ifndef _NET_COMMON_H
+#include "NetCommon.h"
+#endif
+
+#include <stdio.h>
+
+#define XING_TOC_LENGTH 100
+
+class MP3StreamState {
+public:
+ MP3StreamState(UsageEnvironment& env);
+ virtual ~MP3StreamState();
+
+ void assignStream(FILE* fid, unsigned fileSize);
+
+ unsigned findNextHeader(struct timeval& presentationTime);
+ Boolean readFrame(unsigned char* outBuf, unsigned outBufSize,
+ unsigned& resultFrameSize,
+ unsigned& resultDurationInMicroseconds);
+ // called after findNextHeader()
+
+ void getAttributes(char* buffer, unsigned bufferSize) const;
+
+ float filePlayTime() const; // in seconds
+ unsigned fileSize() const { return fFileSize; }
+ void setPresentationTimeScale(unsigned scale) { fPresentationTimeScale = scale; }
+ unsigned getByteNumberFromPositionFraction(float fraction); // 0.0 <= fraction <= 1.0
+ void seekWithinFile(unsigned seekByteNumber);
+
+ void checkForXingHeader(); // hack for Xing VBR files
+
+protected: // private->protected requested by Pierre l'Hussiez
+ unsigned readFromStream(unsigned char* buf, unsigned numChars);
+
+private:
+ MP3FrameParams& fr() {return fCurrentFrame;}
+ MP3FrameParams const& fr() const {return fCurrentFrame;}
+
+ struct timeval currentFramePlayTime() const;
+
+ Boolean findNextFrame();
+
+private:
+ UsageEnvironment& fEnv;
+ FILE* fFid;
+ Boolean fFidIsReallyASocket;
+ unsigned fFileSize;
+ unsigned fNumFramesInFile;
+ unsigned fPresentationTimeScale;
+ // used if we're streaming at other than the normal rate
+ Boolean fIsVBR, fHasXingTOC;
+ u_int8_t fXingTOC[XING_TOC_LENGTH]; // set iff "fHasXingTOC" is True
+
+ MP3FrameParams fCurrentFrame;
+ struct timeval fNextFramePresentationTime;
+};
+
+#endif
diff --git a/liveMedia/MP3Transcoder.cpp b/liveMedia/MP3Transcoder.cpp
new file mode 100644
index 0000000..54f65d8
--- /dev/null
+++ b/liveMedia/MP3Transcoder.cpp
@@ -0,0 +1,52 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MP3 Transcoder
+// Implementation
+
+#include "MP3Transcoder.hh"
+
+MP3Transcoder::MP3Transcoder(UsageEnvironment& env,
+ MP3ADUTranscoder* aduTranscoder)
+ : MP3FromADUSource(env, aduTranscoder, False) {
+}
+
+MP3Transcoder::~MP3Transcoder() {
+}
+
+MP3Transcoder* MP3Transcoder::createNew(UsageEnvironment& env,
+ unsigned outBitrate /* in kbps */,
+ FramedSource* inputSource) {
+ MP3Transcoder* newSource = NULL;
+
+ do {
+ // Create the intermediate filters that help implement the transcoder:
+ ADUFromMP3Source* aduFromMP3
+ = ADUFromMP3Source::createNew(env, inputSource, False);
+ // Note: This also checks that "inputSource" is an MP3 source
+ if (aduFromMP3 == NULL) break;
+
+ MP3ADUTranscoder* aduTranscoder
+ = MP3ADUTranscoder::createNew(env, outBitrate, aduFromMP3);
+ if (aduTranscoder == NULL) break;
+
+ // Then create the transcoder itself:
+ newSource = new MP3Transcoder(env, aduTranscoder);
+ } while (0);
+
+ return newSource;
+}
diff --git a/liveMedia/MPEG1or2AudioRTPSink.cpp b/liveMedia/MPEG1or2AudioRTPSink.cpp
new file mode 100644
index 0000000..8486642
--- /dev/null
+++ b/liveMedia/MPEG1or2AudioRTPSink.cpp
@@ -0,0 +1,63 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for MPEG audio (RFC 2250)
+// Implementation
+
+#include "MPEG1or2AudioRTPSink.hh"
+
+MPEG1or2AudioRTPSink::MPEG1or2AudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs)
+ : AudioRTPSink(env, RTPgs, 14, 90000, "MPA") {
+}
+
+MPEG1or2AudioRTPSink::~MPEG1or2AudioRTPSink() {
+}
+
+MPEG1or2AudioRTPSink*
+MPEG1or2AudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) {
+ return new MPEG1or2AudioRTPSink(env, RTPgs);
+}
+
+void MPEG1or2AudioRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ // If this is the 1st frame in the 1st packet, set the RTP 'M' (marker)
+ // bit (because this is considered the start of a talk spurt):
+ if (isFirstPacket() && isFirstFrameInPacket()) {
+ setMarkerBit();
+ }
+
+ // If this is the first frame in the packet, set the lower half of the
+ // audio-specific header (to the "fragmentationOffset"):
+ if (isFirstFrameInPacket()) {
+ setSpecialHeaderWord(fragmentationOffset&0xFFFF);
+ }
+
+ // Important: Also call our base class's doSpecialFrameHandling(),
+ // to set the packet's timestamp:
+ MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
+ frameStart, numBytesInFrame,
+ framePresentationTime,
+ numRemainingBytes);
+}
+
+unsigned MPEG1or2AudioRTPSink::specialHeaderSize() const {
+ // There's a 4 byte special audio header:
+ return 4;
+}
diff --git a/liveMedia/MPEG1or2AudioRTPSource.cpp b/liveMedia/MPEG1or2AudioRTPSource.cpp
new file mode 100644
index 0000000..6d8e75a
--- /dev/null
+++ b/liveMedia/MPEG1or2AudioRTPSource.cpp
@@ -0,0 +1,62 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MPEG-1 or MPEG-2 Audio RTP Sources
+// Implementation
+
+#include "MPEG1or2AudioRTPSource.hh"
+
+MPEG1or2AudioRTPSource*
+MPEG1or2AudioRTPSource::createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new MPEG1or2AudioRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+MPEG1or2AudioRTPSource::MPEG1or2AudioRTPSource(UsageEnvironment& env,
+ Groupsock* rtpGS,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, rtpGS,
+ rtpPayloadFormat, rtpTimestampFrequency) {
+}
+
+MPEG1or2AudioRTPSource::~MPEG1or2AudioRTPSource() {
+}
+
+Boolean MPEG1or2AudioRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ // There's a 4-byte header indicating fragmentation.
+ if (packet->dataSize() < 4) return False;
+
+ // Note: This fragmentation header is actually useless to us, because
+ // it doesn't tell us whether or not this RTP packet *ends* a
+ // fragmented frame. Thus, we can't use it to properly set
+ // "fCurrentPacketCompletesFrame". Instead, we assume that even
+ // a partial audio frame will be usable to clients.
+
+ resultSpecialHeaderSize = 4;
+ return True;
+}
+
+char const* MPEG1or2AudioRTPSource::MIMEtype() const {
+ return "audio/MPEG";
+}
+
diff --git a/liveMedia/MPEG1or2AudioStreamFramer.cpp b/liveMedia/MPEG1or2AudioStreamFramer.cpp
new file mode 100644
index 0000000..b5725dd
--- /dev/null
+++ b/liveMedia/MPEG1or2AudioStreamFramer.cpp
@@ -0,0 +1,210 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an MPEG (1,2) audio elementary stream into frames
+// Implementation
+
+#include "MPEG1or2AudioStreamFramer.hh"
+#include "StreamParser.hh"
+#include "MP3Internals.hh"
+#include <GroupsockHelper.hh>
+
+////////// MPEG1or2AudioStreamParser definition //////////
+
+class MPEG1or2AudioStreamParser: public StreamParser {
+public:
+ MPEG1or2AudioStreamParser(MPEG1or2AudioStreamFramer* usingSource,
+ FramedSource* inputSource);
+ virtual ~MPEG1or2AudioStreamParser();
+
+public:
+ unsigned parse(unsigned& numTruncatedBytes);
+ // returns the size of the frame that was acquired, or 0 if none was
+
+ void registerReadInterest(unsigned char* to, unsigned maxSize);
+
+ MP3FrameParams const& currentFrame() const { return fCurrentFrame; }
+
+private:
+ unsigned char* fTo;
+ unsigned fMaxSize;
+
+ // Parameters of the most recently read frame:
+ MP3FrameParams fCurrentFrame; // also works for layer I or II
+};
+
+
+////////// MPEG1or2AudioStreamFramer implementation //////////
+
+MPEG1or2AudioStreamFramer
+::MPEG1or2AudioStreamFramer(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean syncWithInputSource)
+ : FramedFilter(env, inputSource),
+ fSyncWithInputSource(syncWithInputSource) {
+ reset();
+
+ fParser = new MPEG1or2AudioStreamParser(this, inputSource);
+}
+
+MPEG1or2AudioStreamFramer::~MPEG1or2AudioStreamFramer() {
+ delete fParser;
+}
+
+MPEG1or2AudioStreamFramer*
+MPEG1or2AudioStreamFramer::createNew(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean syncWithInputSource) {
+ // Need to add source type checking here??? #####
+ return new MPEG1or2AudioStreamFramer(env, inputSource, syncWithInputSource);
+}
+
+void MPEG1or2AudioStreamFramer::flushInput() {
+ reset();
+ fParser->flushInput();
+}
+
+void MPEG1or2AudioStreamFramer::reset() {
+ // Use the current wallclock time as the initial 'presentation time':
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ resetPresentationTime(timeNow);
+}
+
+void MPEG1or2AudioStreamFramer
+::resetPresentationTime(struct timeval newPresentationTime) {
+ fNextFramePresentationTime = newPresentationTime;
+}
+
+void MPEG1or2AudioStreamFramer::doGetNextFrame() {
+ fParser->registerReadInterest(fTo, fMaxSize);
+ continueReadProcessing();
+}
+
+#define MILLION 1000000
+
+static unsigned const numSamplesByLayer[4] = {0, 384, 1152, 1152};
+
+struct timeval MPEG1or2AudioStreamFramer::currentFramePlayTime() const {
+ MP3FrameParams const& fr = fParser->currentFrame();
+ unsigned const numSamples = numSamplesByLayer[fr.layer];
+
+ struct timeval result;
+ unsigned const freq = fr.samplingFreq*(1 + fr.isMPEG2);
+ if (freq == 0) {
+ result.tv_sec = 0;
+ result.tv_usec = 0;
+ return result;
+ }
+
+ // result is numSamples/freq
+ unsigned const uSeconds
+ = ((numSamples*2*MILLION)/freq + 1)/2; // rounds to nearest integer
+
+ result.tv_sec = uSeconds/MILLION;
+ result.tv_usec = uSeconds%MILLION;
+ return result;
+}
+
+void MPEG1or2AudioStreamFramer
+::continueReadProcessing(void* clientData,
+ unsigned char* /*ptr*/, unsigned /*size*/,
+ struct timeval presentationTime) {
+ MPEG1or2AudioStreamFramer* framer = (MPEG1or2AudioStreamFramer*)clientData;
+ if (framer->fSyncWithInputSource) {
+ framer->resetPresentationTime(presentationTime);
+ }
+ framer->continueReadProcessing();
+}
+
+void MPEG1or2AudioStreamFramer::continueReadProcessing() {
+ unsigned acquiredFrameSize = fParser->parse(fNumTruncatedBytes);
+ if (acquiredFrameSize > 0) {
+ // We were able to acquire a frame from the input.
+ // It has already been copied to the reader's space.
+ fFrameSize = acquiredFrameSize;
+
+ // Also set the presentation time, and increment it for next time,
+ // based on the length of this frame:
+ fPresentationTime = fNextFramePresentationTime;
+ struct timeval framePlayTime = currentFramePlayTime();
+ fDurationInMicroseconds = framePlayTime.tv_sec*MILLION + framePlayTime.tv_usec;
+ fNextFramePresentationTime.tv_usec += framePlayTime.tv_usec;
+ fNextFramePresentationTime.tv_sec
+ += framePlayTime.tv_sec + fNextFramePresentationTime.tv_usec/MILLION;
+ fNextFramePresentationTime.tv_usec %= MILLION;
+
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking infinite recursion.
+ afterGetting(this);
+ } else {
+ // We were unable to parse a complete frame from the input, because:
+ // - we had to read more data from the source stream, or
+ // - the source stream has ended.
+ }
+}
+
+
+////////// MPEG1or2AudioStreamParser implementation //////////
+
+MPEG1or2AudioStreamParser
+::MPEG1or2AudioStreamParser(MPEG1or2AudioStreamFramer* usingSource,
+ FramedSource* inputSource)
+ : StreamParser(inputSource, FramedSource::handleClosure, usingSource,
+ &MPEG1or2AudioStreamFramer::continueReadProcessing, usingSource) {
+}
+
+MPEG1or2AudioStreamParser::~MPEG1or2AudioStreamParser() {
+}
+
+void MPEG1or2AudioStreamParser::registerReadInterest(unsigned char* to,
+ unsigned maxSize) {
+ fTo = to;
+ fMaxSize = maxSize;
+}
+
+unsigned MPEG1or2AudioStreamParser::parse(unsigned& numTruncatedBytes) {
+ try {
+ saveParserState();
+
+ // We expect a MPEG audio header (first 11 bits set to 1) at the start:
+ while (((fCurrentFrame.hdr = test4Bytes())&0xFFE00000) != 0xFFE00000) {
+ skipBytes(1);
+ saveParserState();
+ }
+
+ fCurrentFrame.setParamsFromHeader();
+
+ // Copy the frame to the requested destination:
+ unsigned frameSize = fCurrentFrame.frameSize + 4; // include header
+ if (frameSize > fMaxSize) {
+ numTruncatedBytes = frameSize - fMaxSize;
+ frameSize = fMaxSize;
+ } else {
+ numTruncatedBytes = 0;
+ }
+
+ getBytes(fTo, frameSize);
+ skipBytes(numTruncatedBytes);
+
+ return frameSize;
+ } catch (int /*e*/) {
+#ifdef DEBUG
+ fprintf(stderr, "MPEG1or2AudioStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
+#endif
+ return 0; // the parsing got interrupted
+ }
+}
diff --git a/liveMedia/MPEG1or2Demux.cpp b/liveMedia/MPEG1or2Demux.cpp
new file mode 100644
index 0000000..932582c
--- /dev/null
+++ b/liveMedia/MPEG1or2Demux.cpp
@@ -0,0 +1,756 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Demultiplexer for a MPEG 1 or 2 Program Stream
+// Implementation
+
+#include "MPEG1or2Demux.hh"
+#include "MPEG1or2DemuxedElementaryStream.hh"
+#include "StreamParser.hh"
+#include <stdlib.h>
+
+////////// MPEGProgramStreamParser definition //////////
+
+// An enum representing the current state of the parser:
+enum MPEGParseState {
+ PARSING_PACK_HEADER,
+ PARSING_SYSTEM_HEADER,
+ PARSING_PES_PACKET
+};
+
+class MPEGProgramStreamParser: public StreamParser {
+public:
+ MPEGProgramStreamParser(MPEG1or2Demux* usingDemux, FramedSource* inputSource);
+ virtual ~MPEGProgramStreamParser();
+
+public:
+ unsigned char parse();
+ // returns the stream id of a stream for which a frame was acquired,
+ // or 0 if no such frame was acquired.
+
+private:
+ void setParseState(MPEGParseState parseState);
+
+ void parsePackHeader();
+ void parseSystemHeader();
+ unsigned char parsePESPacket(); // returns as does parse()
+
+ Boolean isSpecialStreamId(unsigned char stream_id) const;
+ // for PES packet header parsing
+
+private:
+ MPEG1or2Demux* fUsingDemux;
+ MPEGParseState fCurrentParseState;
+};
+
+
+////////// MPEG1or2Demux::OutputDescriptor::SavedData definition/implementation //////////
+
+class MPEG1or2Demux::OutputDescriptor::SavedData {
+public:
+ SavedData(unsigned char* buf, unsigned size)
+ : next(NULL), data(buf), dataSize(size), numBytesUsed(0) {
+ }
+ virtual ~SavedData() {
+ delete[] data;
+ delete next;
+ }
+
+ SavedData* next;
+ unsigned char* data;
+ unsigned dataSize, numBytesUsed;
+};
+
+
+////////// MPEG1or2Demux implementation //////////
+
+MPEG1or2Demux
+::MPEG1or2Demux(UsageEnvironment& env,
+ FramedSource* inputSource, Boolean reclaimWhenLastESDies)
+ : Medium(env),
+ fInputSource(inputSource), fMPEGversion(0),
+ fNextAudioStreamNumber(0), fNextVideoStreamNumber(0),
+ fReclaimWhenLastESDies(reclaimWhenLastESDies), fNumOutstandingESs(0),
+ fNumPendingReads(0), fHaveUndeliveredData(False) {
+ fParser = new MPEGProgramStreamParser(this, inputSource);
+ for (unsigned i = 0; i < 256; ++i) {
+ fOutput[i].savedDataHead = fOutput[i].savedDataTail = NULL;
+ fOutput[i].isPotentiallyReadable = False;
+ fOutput[i].isCurrentlyActive = False;
+ fOutput[i].isCurrentlyAwaitingData = False;
+ }
+}
+
+MPEG1or2Demux::~MPEG1or2Demux() {
+ delete fParser;
+ for (unsigned i = 0; i < 256; ++i) delete fOutput[i].savedDataHead;
+ Medium::close(fInputSource);
+}
+
+MPEG1or2Demux* MPEG1or2Demux
+::createNew(UsageEnvironment& env,
+ FramedSource* inputSource, Boolean reclaimWhenLastESDies) {
+ // Need to add source type checking here??? #####
+
+ return new MPEG1or2Demux(env, inputSource, reclaimWhenLastESDies);
+}
+
+MPEG1or2Demux::SCR::SCR()
+ : highBit(0), remainingBits(0), extension(0), isValid(False) {
+}
+
+void MPEG1or2Demux
+::noteElementaryStreamDeletion(MPEG1or2DemuxedElementaryStream* /*es*/) {
+ if (--fNumOutstandingESs == 0 && fReclaimWhenLastESDies) {
+ Medium::close(this);
+ }
+}
+
+void MPEG1or2Demux::flushInput() {
+ fParser->flushInput();
+}
+
+MPEG1or2DemuxedElementaryStream*
+MPEG1or2Demux::newElementaryStream(u_int8_t streamIdTag) {
+ ++fNumOutstandingESs;
+ fOutput[streamIdTag].isPotentiallyReadable = True;
+ return new MPEG1or2DemuxedElementaryStream(envir(), streamIdTag, *this);
+}
+
+MPEG1or2DemuxedElementaryStream* MPEG1or2Demux::newAudioStream() {
+ unsigned char newAudioStreamTag = 0xC0 | (fNextAudioStreamNumber++&~0xE0);
+ // MPEG audio stream tags are 110x xxxx (binary)
+ return newElementaryStream(newAudioStreamTag);
+}
+
+MPEG1or2DemuxedElementaryStream* MPEG1or2Demux::newVideoStream() {
+ unsigned char newVideoStreamTag = 0xE0 | (fNextVideoStreamNumber++&~0xF0);
+ // MPEG video stream tags are 1110 xxxx (binary)
+ return newElementaryStream(newVideoStreamTag);
+}
+
+// Appropriate one of the reserved stream id tags to mean: return raw PES packets:
+#define RAW_PES 0xFC
+
+MPEG1or2DemuxedElementaryStream* MPEG1or2Demux::newRawPESStream() {
+ return newElementaryStream(RAW_PES);
+}
+
+void MPEG1or2Demux::registerReadInterest(u_int8_t streamIdTag,
+ unsigned char* to, unsigned maxSize,
+ FramedSource::afterGettingFunc* afterGettingFunc,
+ void* afterGettingClientData,
+ FramedSource::onCloseFunc* onCloseFunc,
+ void* onCloseClientData) {
+ struct OutputDescriptor& out = fOutput[streamIdTag];
+
+ // Make sure this stream is not already being read:
+ if (out.isCurrentlyAwaitingData) {
+ envir() << "MPEG1or2Demux::registerReadInterest(): attempt to read stream more than once!\n";
+ envir().internalError();
+ }
+
+ out.to = to; out.maxSize = maxSize;
+ out.fAfterGettingFunc = afterGettingFunc;
+ out.afterGettingClientData = afterGettingClientData;
+ out.fOnCloseFunc = onCloseFunc;
+ out.onCloseClientData = onCloseClientData;
+ out.isCurrentlyActive = True;
+ out.isCurrentlyAwaitingData = True;
+ // out.frameSize and out.presentationTime will be set when a frame's read
+
+ ++fNumPendingReads;
+}
+
+Boolean MPEG1or2Demux::useSavedData(u_int8_t streamIdTag,
+ unsigned char* to, unsigned maxSize,
+ FramedSource::afterGettingFunc* afterGettingFunc,
+ void* afterGettingClientData) {
+ struct OutputDescriptor& out = fOutput[streamIdTag];
+ if (out.savedDataHead == NULL) return False; // common case
+
+ unsigned totNumBytesCopied = 0;
+ while (maxSize > 0 && out.savedDataHead != NULL) {
+ OutputDescriptor::SavedData& savedData = *(out.savedDataHead);
+ unsigned char* from = &savedData.data[savedData.numBytesUsed];
+ unsigned numBytesToCopy = savedData.dataSize - savedData.numBytesUsed;
+ if (numBytesToCopy > maxSize) numBytesToCopy = maxSize;
+ memmove(to, from, numBytesToCopy);
+ to += numBytesToCopy;
+ maxSize -= numBytesToCopy;
+ out.savedDataTotalSize -= numBytesToCopy;
+ totNumBytesCopied += numBytesToCopy;
+ savedData.numBytesUsed += numBytesToCopy;
+ if (savedData.numBytesUsed == savedData.dataSize) {
+ out.savedDataHead = savedData.next;
+ if (out.savedDataHead == NULL) out.savedDataTail = NULL;
+ savedData.next = NULL;
+ delete &savedData;
+ }
+ }
+
+ out.isCurrentlyActive = True;
+ if (afterGettingFunc != NULL) {
+ struct timeval presentationTime;
+ presentationTime.tv_sec = 0; presentationTime.tv_usec = 0; // should fix #####
+ (*afterGettingFunc)(afterGettingClientData, totNumBytesCopied,
+ 0 /* numTruncatedBytes */, presentationTime,
+ 0 /* durationInMicroseconds ?????#####*/);
+ }
+ return True;
+}
+
+void MPEG1or2Demux
+::continueReadProcessing(void* clientData,
+ unsigned char* /*ptr*/, unsigned /*size*/,
+ struct timeval /*presentationTime*/) {
+ MPEG1or2Demux* demux = (MPEG1or2Demux*)clientData;
+ demux->continueReadProcessing();
+}
+
+void MPEG1or2Demux::continueReadProcessing() {
+ while (fNumPendingReads > 0) {
+ unsigned char acquiredStreamIdTag = fParser->parse();
+
+ if (acquiredStreamIdTag != 0) {
+ // We were able to acquire a frame from the input.
+ struct OutputDescriptor& newOut = fOutput[acquiredStreamIdTag];
+ newOut.isCurrentlyAwaitingData = False;
+ // indicates that we can be read again
+ // (This needs to be set before the 'after getting' call below,
+ // in case it tries to read another frame)
+
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking infinite recursion.
+ if (newOut.fAfterGettingFunc != NULL) {
+ (*newOut.fAfterGettingFunc)(newOut.afterGettingClientData,
+ newOut.frameSize, 0 /* numTruncatedBytes */,
+ newOut.presentationTime,
+ 0 /* durationInMicroseconds ?????#####*/);
+ --fNumPendingReads;
+ }
+ } else {
+ // We were unable to parse a complete frame from the input, because:
+ // - we had to read more data from the source stream, or
+ // - we found a frame for a stream that was being read, but whose
+ // reader is not ready to get the frame right now, or
+ // - the source stream has ended.
+ break;
+ }
+ }
+}
+
+void MPEG1or2Demux::getNextFrame(u_int8_t streamIdTag,
+ unsigned char* to, unsigned maxSize,
+ FramedSource::afterGettingFunc* afterGettingFunc,
+ void* afterGettingClientData,
+ FramedSource::onCloseFunc* onCloseFunc,
+ void* onCloseClientData) {
+ // First, check whether we have saved data for this stream id:
+ if (useSavedData(streamIdTag, to, maxSize,
+ afterGettingFunc, afterGettingClientData)) {
+ return;
+ }
+
+ // Then save the parameters of the specified stream id:
+ registerReadInterest(streamIdTag, to, maxSize,
+ afterGettingFunc, afterGettingClientData,
+ onCloseFunc, onCloseClientData);
+
+ // Next, if we're the only currently pending read, continue looking for data:
+ if (fNumPendingReads == 1 || fHaveUndeliveredData) {
+ fHaveUndeliveredData = 0;
+ continueReadProcessing();
+ } // otherwise the continued read processing has already been taken care of
+}
+
+void MPEG1or2Demux::stopGettingFrames(u_int8_t streamIdTag) {
+ struct OutputDescriptor& out = fOutput[streamIdTag];
+
+ if (out.isCurrentlyAwaitingData && fNumPendingReads > 0) --fNumPendingReads;
+
+ out.isCurrentlyActive = out.isCurrentlyAwaitingData = False;
+}
+
+void MPEG1or2Demux::handleClosure(void* clientData) {
+ MPEG1or2Demux* demux = (MPEG1or2Demux*)clientData;
+
+ demux->fNumPendingReads = 0;
+
+ // Tell all pending readers that our source has closed.
+ // Note that we need to make a copy of our readers' close functions
+ // (etc.) before we start calling any of them, in case one of them
+ // ends up deleting this.
+ struct {
+ FramedSource::onCloseFunc* fOnCloseFunc;
+ void* onCloseClientData;
+ } savedPending[256];
+ unsigned i, numPending = 0;
+ for (i = 0; i < 256; ++i) {
+ struct OutputDescriptor& out = demux->fOutput[i];
+ if (out.isCurrentlyAwaitingData) {
+ if (out.fOnCloseFunc != NULL) {
+ savedPending[numPending].fOnCloseFunc = out.fOnCloseFunc;
+ savedPending[numPending].onCloseClientData = out.onCloseClientData;
+ ++numPending;
+ }
+ }
+ delete out.savedDataHead; out.savedDataHead = out.savedDataTail = NULL;
+ out.savedDataTotalSize = 0;
+ out.isPotentiallyReadable = out.isCurrentlyActive = out.isCurrentlyAwaitingData
+ = False;
+ }
+ for (i = 0; i < numPending; ++i) {
+ (*savedPending[i].fOnCloseFunc)(savedPending[i].onCloseClientData);
+ }
+}
+
+
+////////// MPEGProgramStreamParser implementation //////////
+
+#include <string.h>
+
+MPEGProgramStreamParser::MPEGProgramStreamParser(MPEG1or2Demux* usingDemux,
+ FramedSource* inputSource)
+ : StreamParser(inputSource, MPEG1or2Demux::handleClosure, usingDemux,
+ &MPEG1or2Demux::continueReadProcessing, usingDemux),
+ fUsingDemux(usingDemux), fCurrentParseState(PARSING_PACK_HEADER) {
+}
+
+MPEGProgramStreamParser::~MPEGProgramStreamParser() {
+}
+
+void MPEGProgramStreamParser::setParseState(MPEGParseState parseState) {
+ fCurrentParseState = parseState;
+ saveParserState();
+}
+
+unsigned char MPEGProgramStreamParser::parse() {
+ unsigned char acquiredStreamTagId = 0;
+
+ try {
+ do {
+ switch (fCurrentParseState) {
+ case PARSING_PACK_HEADER: {
+ parsePackHeader();
+ break;
+ }
+ case PARSING_SYSTEM_HEADER: {
+ parseSystemHeader();
+ break;
+ }
+ case PARSING_PES_PACKET: {
+ acquiredStreamTagId = parsePESPacket();
+ break;
+ }
+ }
+ } while(acquiredStreamTagId == 0);
+
+ return acquiredStreamTagId;
+ } catch (int /*e*/) {
+#ifdef DEBUG
+ fprintf(stderr, "MPEGProgramStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
+ fflush(stderr);
+#endif
+ return 0; // the parsing got interrupted
+ }
+}
+
+#define PACK_START_CODE 0x000001BA
+#define SYSTEM_HEADER_START_CODE 0x000001BB
+#define PACKET_START_CODE_PREFIX 0x00000100
+
+static inline Boolean isPacketStartCode(unsigned code) {
+ return (code&0xFFFFFF00) == PACKET_START_CODE_PREFIX
+ && code > SYSTEM_HEADER_START_CODE;
+}
+
+void MPEGProgramStreamParser::parsePackHeader() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing pack header\n"); fflush(stderr);
+#endif
+ unsigned first4Bytes;
+ while (1) {
+ first4Bytes = test4Bytes();
+
+ // We're supposed to have a pack header here, but check also for
+ // a system header or a PES packet, just in case:
+ if (first4Bytes == PACK_START_CODE) {
+ skipBytes(4);
+ break;
+ } else if (first4Bytes == SYSTEM_HEADER_START_CODE) {
+#ifdef DEBUG
+ fprintf(stderr, "found system header instead of pack header\n");
+#endif
+ setParseState(PARSING_SYSTEM_HEADER);
+ return;
+ } else if (isPacketStartCode(first4Bytes)) {
+#ifdef DEBUG
+ fprintf(stderr, "found packet start code 0x%02x instead of pack header\n", first4Bytes);
+#endif
+ setParseState(PARSING_PES_PACKET);
+ return;
+ }
+
+ setParseState(PARSING_PACK_HEADER); // ensures we progress over bad data
+ if ((first4Bytes&0xFF) > 1) { // a system code definitely doesn't start here
+ skipBytes(4);
+ } else {
+ skipBytes(1);
+ }
+ }
+
+ // The size of the pack header differs depending on whether it's
+ // MPEG-1 or MPEG-2. The next byte tells us this:
+ unsigned char nextByte = get1Byte();
+ MPEG1or2Demux::SCR& scr = fUsingDemux->fLastSeenSCR; // alias
+ if ((nextByte&0xF0) == 0x20) { // MPEG-1
+ fUsingDemux->fMPEGversion = 1;
+ scr.highBit = (nextByte&0x08)>>3;
+ scr.remainingBits = (nextByte&0x06)<<29;
+ unsigned next4Bytes = get4Bytes();
+ scr.remainingBits |= (next4Bytes&0xFFFE0000)>>2;
+ scr.remainingBits |= (next4Bytes&0x0000FFFE)>>1;
+ scr.extension = 0;
+ scr.isValid = True;
+ skipBits(24);
+
+#if defined(DEBUG_TIMESTAMPS) || defined(DEBUG_SCR_TIMESTAMPS)
+ fprintf(stderr, "pack hdr system_clock_reference_base: 0x%x",
+ scr.highBit);
+ fprintf(stderr, "%08x\n", scr.remainingBits);
+#endif
+ } else if ((nextByte&0xC0) == 0x40) { // MPEG-2
+ fUsingDemux->fMPEGversion = 2;
+ scr.highBit = (nextByte&0x20)>>5;
+ scr.remainingBits = (nextByte&0x18)<<27;
+ scr.remainingBits |= (nextByte&0x03)<<28;
+ unsigned next4Bytes = get4Bytes();
+ scr.remainingBits |= (next4Bytes&0xFFF80000)>>4;
+ scr.remainingBits |= (next4Bytes&0x0003FFF8)>>3;
+ scr.extension = (next4Bytes&0x00000003)<<7;
+ next4Bytes = get4Bytes();
+ scr.extension |= (next4Bytes&0xFE000000)>>25;
+ scr.isValid = True;
+ skipBits(5);
+
+#if defined(DEBUG_TIMESTAMPS) || defined(DEBUG_SCR_TIMESTAMPS)
+ fprintf(stderr, "pack hdr system_clock_reference_base: 0x%x",
+ scr.highBit);
+ fprintf(stderr, "%08x\n", scr.remainingBits);
+ fprintf(stderr, "pack hdr system_clock_reference_extension: 0x%03x\n",
+ scr.extension);
+#endif
+ unsigned char pack_stuffing_length = getBits(3);
+ skipBytes(pack_stuffing_length);
+ } else { // unknown
+ fUsingDemux->envir() << "StreamParser::parsePack() saw strange byte following pack_start_code\n";
+ }
+
+ // Check for a System Header next:
+ setParseState(PARSING_SYSTEM_HEADER);
+}
+
+void MPEGProgramStreamParser::parseSystemHeader() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing system header\n"); fflush(stderr);
+#endif
+ unsigned next4Bytes = test4Bytes();
+ if (next4Bytes != SYSTEM_HEADER_START_CODE) {
+ // The system header was optional. Look for a PES Packet instead:
+ setParseState(PARSING_PES_PACKET);
+ return;
+ }
+
+#ifdef DEBUG
+ fprintf(stderr, "saw system_header_start_code\n"); fflush(stderr);
+#endif
+ skipBytes(4); // we've already seen the system_header_start_code
+
+ unsigned short remaining_header_length = get2Bytes();
+
+ // According to the MPEG-1 and MPEG-2 specs, "remaining_header_length" should be
+ // at least 6 bytes. Check this now:
+ if (remaining_header_length < 6) {
+ fUsingDemux->envir() << "StreamParser::parseSystemHeader(): saw strange header_length: "
+ << remaining_header_length << " < 6\n";
+ }
+ skipBytes(remaining_header_length);
+
+ // Check for a PES Packet next:
+ setParseState(PARSING_PES_PACKET);
+}
+
+#define private_stream_1 0xBD
+#define private_stream_2 0xBF
+
+// A test for stream ids that are exempt from normal PES packet header parsing
+Boolean MPEGProgramStreamParser
+::isSpecialStreamId(unsigned char stream_id) const {
+ if (stream_id == RAW_PES) return True; // hack
+
+ if (fUsingDemux->fMPEGversion == 1) {
+ return stream_id == private_stream_2;
+ } else { // assume MPEG-2
+ if (stream_id <= private_stream_2) {
+ return stream_id != private_stream_1;
+ } else if ((stream_id&0xF0) == 0xF0) {
+ unsigned char lower4Bits = stream_id&0x0F;
+ return lower4Bits <= 2 || lower4Bits == 0x8 || lower4Bits == 0xF;
+ } else {
+ return False;
+ }
+ }
+}
+
+#define READER_NOT_READY 2
+
+unsigned char MPEGProgramStreamParser::parsePESPacket() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing PES packet\n"); fflush(stderr);
+#endif
+ unsigned next4Bytes = test4Bytes();
+ if (!isPacketStartCode(next4Bytes)) {
+ // The PES Packet was optional. Look for a Pack Header instead:
+ setParseState(PARSING_PACK_HEADER);
+ return 0;
+ }
+
+#ifdef DEBUG
+ fprintf(stderr, "saw packet_start_code_prefix\n"); fflush(stderr);
+#endif
+ skipBytes(3); // we've already seen the packet_start_code_prefix
+
+ unsigned char stream_id = get1Byte();
+#if defined(DEBUG) || defined(DEBUG_TIMESTAMPS)
+ unsigned char streamNum = stream_id;
+ char const* streamTypeStr;
+ if ((stream_id&0xE0) == 0xC0) {
+ streamTypeStr = "audio";
+ streamNum = stream_id&~0xE0;
+ } else if ((stream_id&0xF0) == 0xE0) {
+ streamTypeStr = "video";
+ streamNum = stream_id&~0xF0;
+ } else if (stream_id == 0xbc) {
+ streamTypeStr = "reserved";
+ } else if (stream_id == 0xbd) {
+ streamTypeStr = "private_1";
+ } else if (stream_id == 0xbe) {
+ streamTypeStr = "padding";
+ } else if (stream_id == 0xbf) {
+ streamTypeStr = "private_2";
+ } else {
+ streamTypeStr = "unknown";
+ }
+#endif
+#ifdef DEBUG
+ static unsigned frameCount = 1;
+ fprintf(stderr, "%d, saw %s stream: 0x%02x\n", frameCount, streamTypeStr, streamNum); fflush(stderr);
+#endif
+
+ unsigned short PES_packet_length = get2Bytes();
+#ifdef DEBUG
+ fprintf(stderr, "PES_packet_length: %d\n", PES_packet_length); fflush(stderr);
+#endif
+
+ // Parse over the rest of the header, until we get to the packet data itself.
+ // This varies depending upon the MPEG version:
+ if (fUsingDemux->fOutput[RAW_PES].isPotentiallyReadable) {
+ // Hack: We've been asked to return raw PES packets, for every stream:
+ stream_id = RAW_PES;
+ }
+ unsigned savedParserOffset = curOffset();
+#ifdef DEBUG_TIMESTAMPS
+ unsigned char pts_highBit = 0;
+ unsigned pts_remainingBits = 0;
+ unsigned char dts_highBit = 0;
+ unsigned dts_remainingBits = 0;
+#endif
+ if (fUsingDemux->fMPEGversion == 1) {
+ if (!isSpecialStreamId(stream_id)) {
+ unsigned char nextByte;
+ while ((nextByte = get1Byte()) == 0xFF) { // stuffing_byte
+ }
+ if ((nextByte&0xC0) == 0x40) { // '01'
+ skipBytes(1);
+ nextByte = get1Byte();
+ }
+ if ((nextByte&0xF0) == 0x20) { // '0010'
+#ifdef DEBUG_TIMESTAMPS
+ pts_highBit = (nextByte&0x08)>>3;
+ pts_remainingBits = (nextByte&0x06)<<29;
+ unsigned next4Bytes = get4Bytes();
+ pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
+ pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
+#else
+ skipBytes(4);
+#endif
+ } else if ((nextByte&0xF0) == 0x30) { // '0011'
+#ifdef DEBUG_TIMESTAMPS
+ pts_highBit = (nextByte&0x08)>>3;
+ pts_remainingBits = (nextByte&0x06)<<29;
+ unsigned next4Bytes = get4Bytes();
+ pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
+ pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
+
+ nextByte = get1Byte();
+ dts_highBit = (nextByte&0x08)>>3;
+ dts_remainingBits = (nextByte&0x06)<<29;
+ next4Bytes = get4Bytes();
+ dts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
+ dts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
+#else
+ skipBytes(9);
+#endif
+ }
+ }
+ } else { // assume MPEG-2
+ if (!isSpecialStreamId(stream_id)) {
+ // Fields in the next 3 bytes determine the size of the rest:
+ unsigned next3Bytes = getBits(24);
+#ifdef DEBUG_TIMESTAMPS
+ unsigned char PTS_DTS_flags = (next3Bytes&0x00C000)>>14;
+#endif
+#ifdef undef
+ unsigned char ESCR_flag = (next3Bytes&0x002000)>>13;
+ unsigned char ES_rate_flag = (next3Bytes&0x001000)>>12;
+ unsigned char DSM_trick_mode_flag = (next3Bytes&0x000800)>>11;
+#endif
+ unsigned char PES_header_data_length = (next3Bytes&0x0000FF);
+#ifdef DEBUG
+ fprintf(stderr, "PES_header_data_length: 0x%02x\n", PES_header_data_length); fflush(stderr);
+#endif
+#ifdef DEBUG_TIMESTAMPS
+ if (PTS_DTS_flags == 0x2 && PES_header_data_length >= 5) {
+ unsigned char nextByte = get1Byte();
+ pts_highBit = (nextByte&0x08)>>3;
+ pts_remainingBits = (nextByte&0x06)<<29;
+ unsigned next4Bytes = get4Bytes();
+ pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
+ pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
+
+ skipBytes(PES_header_data_length-5);
+ } else if (PTS_DTS_flags == 0x3 && PES_header_data_length >= 10) {
+ unsigned char nextByte = get1Byte();
+ pts_highBit = (nextByte&0x08)>>3;
+ pts_remainingBits = (nextByte&0x06)<<29;
+ unsigned next4Bytes = get4Bytes();
+ pts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
+ pts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
+
+ nextByte = get1Byte();
+ dts_highBit = (nextByte&0x08)>>3;
+ dts_remainingBits = (nextByte&0x06)<<29;
+ next4Bytes = get4Bytes();
+ dts_remainingBits |= (next4Bytes&0xFFFE0000)>>2;
+ dts_remainingBits |= (next4Bytes&0x0000FFFE)>>1;
+
+ skipBytes(PES_header_data_length-10);
+ }
+#else
+ skipBytes(PES_header_data_length);
+#endif
+ }
+ }
+#ifdef DEBUG_TIMESTAMPS
+ fprintf(stderr, "%s stream, ", streamTypeStr);
+ fprintf(stderr, "packet presentation_time_stamp: 0x%x", pts_highBit);
+ fprintf(stderr, "%08x\n", pts_remainingBits);
+ fprintf(stderr, "\t\tpacket decoding_time_stamp: 0x%x", dts_highBit);
+ fprintf(stderr, "%08x\n", dts_remainingBits);
+#endif
+
+ // The rest of the packet will be the "PES_packet_data_byte"s
+ // Make sure that "PES_packet_length" was consistent with where we are now:
+ unsigned char acquiredStreamIdTag = 0;
+ unsigned currentParserOffset = curOffset();
+ unsigned bytesSkipped = currentParserOffset - savedParserOffset;
+ if (stream_id == RAW_PES) {
+ restoreSavedParserState(); // so we deliver from the beginning of the PES packet
+ PES_packet_length += 6; // to include the whole of the PES packet
+ bytesSkipped = 0;
+ }
+ if (PES_packet_length < bytesSkipped) {
+ fUsingDemux->envir() << "StreamParser::parsePESPacket(): saw inconsistent PES_packet_length "
+ << PES_packet_length << " < "
+ << bytesSkipped << "\n";
+ } else {
+ PES_packet_length -= bytesSkipped;
+#ifdef DEBUG
+ unsigned next4Bytes = test4Bytes();
+#endif
+
+ // Check whether our using source is interested in this stream type.
+ // If so, deliver the frame to him:
+ MPEG1or2Demux::OutputDescriptor_t& out = fUsingDemux->fOutput[stream_id];
+ if (out.isCurrentlyAwaitingData) {
+ unsigned numBytesToCopy;
+ if (PES_packet_length > out.maxSize) {
+ fUsingDemux->envir() << "MPEGProgramStreamParser::parsePESPacket() error: PES_packet_length ("
+ << PES_packet_length
+ << ") exceeds max frame size asked for ("
+ << out.maxSize << ")\n";
+ numBytesToCopy = out.maxSize;
+ } else {
+ numBytesToCopy = PES_packet_length;
+ }
+
+ getBytes(out.to, numBytesToCopy);
+ out.frameSize = numBytesToCopy;
+#ifdef DEBUG
+ fprintf(stderr, "%d, %d bytes of PES_packet_data (out.maxSize: %d); first 4 bytes: 0x%08x\n", frameCount, numBytesToCopy, out.maxSize, next4Bytes); fflush(stderr);
+#endif
+ // set out.presentationTime later #####
+ acquiredStreamIdTag = stream_id;
+ PES_packet_length -= numBytesToCopy;
+ } else if (out.isCurrentlyActive) {
+ // Someone has been reading this stream, but isn't right now.
+ // We can't deliver this frame until he asks for it, so punt for now.
+ // The next time he asks for a frame, he'll get it.
+#ifdef DEBUG
+ fprintf(stderr, "%d, currently undeliverable PES data; first 4 bytes: 0x%08x - currently undeliverable!\n", frameCount, next4Bytes); fflush(stderr);
+#endif
+ restoreSavedParserState(); // so we read from the beginning next time
+ fUsingDemux->fHaveUndeliveredData = True;
+ throw READER_NOT_READY;
+ } else if (out.isPotentiallyReadable &&
+ out.savedDataTotalSize + PES_packet_length < 1000000 /*limit*/) {
+ // Someone is interested in this stream, but hasn't begun reading it yet.
+ // Save this data, so that the reader will get it when he later asks for it.
+ unsigned char* buf = new unsigned char[PES_packet_length];
+ getBytes(buf, PES_packet_length);
+ MPEG1or2Demux::OutputDescriptor::SavedData* savedData
+ = new MPEG1or2Demux::OutputDescriptor::SavedData(buf, PES_packet_length);
+ if (out.savedDataHead == NULL) {
+ out.savedDataHead = out.savedDataTail = savedData;
+ } else {
+ out.savedDataTail->next = savedData;
+ out.savedDataTail = savedData;
+ }
+ out.savedDataTotalSize += PES_packet_length;
+ PES_packet_length = 0;
+ }
+ skipBytes(PES_packet_length);
+ }
+
+ // Check for another PES Packet next:
+ setParseState(PARSING_PES_PACKET);
+#ifdef DEBUG
+ ++frameCount;
+#endif
+ return acquiredStreamIdTag;
+}
diff --git a/liveMedia/MPEG1or2DemuxedElementaryStream.cpp b/liveMedia/MPEG1or2DemuxedElementaryStream.cpp
new file mode 100644
index 0000000..7da5805
--- /dev/null
+++ b/liveMedia/MPEG1or2DemuxedElementaryStream.cpp
@@ -0,0 +1,88 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A MPEG 1 or 2 Elementary Stream, demultiplexed from a Program Stream
+// Implementation
+
+#include "MPEG1or2DemuxedElementaryStream.hh"
+
+////////// MPEG1or2DemuxedElementaryStream //////////
+
+MPEG1or2DemuxedElementaryStream::
+MPEG1or2DemuxedElementaryStream(UsageEnvironment& env, u_int8_t streamIdTag,
+ MPEG1or2Demux& sourceDemux)
+ : FramedSource(env),
+ fOurStreamIdTag(streamIdTag), fOurSourceDemux(sourceDemux), fMPEGversion(0) {
+ // Set our MIME type string for known media types:
+ if ((streamIdTag&0xE0) == 0xC0) {
+ fMIMEtype = "audio/MPEG";
+ } else if ((streamIdTag&0xF0) == 0xE0) {
+ fMIMEtype = "video/MPEG";
+ } else {
+ fMIMEtype = MediaSource::MIMEtype();
+ }
+}
+
+MPEG1or2DemuxedElementaryStream::~MPEG1or2DemuxedElementaryStream() {
+ fOurSourceDemux.noteElementaryStreamDeletion(this);
+}
+
+void MPEG1or2DemuxedElementaryStream::doGetNextFrame() {
+ fOurSourceDemux.getNextFrame(fOurStreamIdTag, fTo, fMaxSize,
+ afterGettingFrame, this,
+ handleClosure, this);
+}
+
+void MPEG1or2DemuxedElementaryStream::doStopGettingFrames() {
+ fOurSourceDemux.stopGettingFrames(fOurStreamIdTag);
+}
+
+char const* MPEG1or2DemuxedElementaryStream::MIMEtype() const {
+ return fMIMEtype;
+}
+
+unsigned MPEG1or2DemuxedElementaryStream::maxFrameSize() const {
+ return 6+65535;
+ // because the MPEG spec allows for PES packets as large as
+ // (6 + 65535) bytes (header + data)
+}
+
+void MPEG1or2DemuxedElementaryStream
+::afterGettingFrame(void* clientData,
+ unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ MPEG1or2DemuxedElementaryStream* stream
+ = (MPEG1or2DemuxedElementaryStream*)clientData;
+ stream->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+void MPEG1or2DemuxedElementaryStream
+::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ fFrameSize = frameSize;
+ fNumTruncatedBytes = numTruncatedBytes;
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+
+ fLastSeenSCR = fOurSourceDemux.lastSeenSCR();
+ fMPEGversion = fOurSourceDemux.mpegVersion();
+
+ FramedSource::afterGetting(this);
+}
diff --git a/liveMedia/MPEG1or2DemuxedServerMediaSubsession.cpp b/liveMedia/MPEG1or2DemuxedServerMediaSubsession.cpp
new file mode 100644
index 0000000..6c5dd1a
--- /dev/null
+++ b/liveMedia/MPEG1or2DemuxedServerMediaSubsession.cpp
@@ -0,0 +1,134 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a MPEG-1 or 2 demuxer.
+// Implementation
+
+#include "MPEG1or2DemuxedServerMediaSubsession.hh"
+#include "MPEG1or2AudioStreamFramer.hh"
+#include "MPEG1or2AudioRTPSink.hh"
+#include "MPEG1or2VideoStreamFramer.hh"
+#include "MPEG1or2VideoRTPSink.hh"
+#include "AC3AudioStreamFramer.hh"
+#include "AC3AudioRTPSink.hh"
+#include "ByteStreamFileSource.hh"
+
+MPEG1or2DemuxedServerMediaSubsession* MPEG1or2DemuxedServerMediaSubsession
+::createNew(MPEG1or2FileServerDemux& demux, u_int8_t streamIdTag,
+ Boolean reuseFirstSource, Boolean iFramesOnly, double vshPeriod) {
+ return new MPEG1or2DemuxedServerMediaSubsession(demux, streamIdTag,
+ reuseFirstSource,
+ iFramesOnly, vshPeriod);
+}
+
+MPEG1or2DemuxedServerMediaSubsession
+::MPEG1or2DemuxedServerMediaSubsession(MPEG1or2FileServerDemux& demux,
+ u_int8_t streamIdTag, Boolean reuseFirstSource,
+ Boolean iFramesOnly, double vshPeriod)
+ : OnDemandServerMediaSubsession(demux.envir(), reuseFirstSource),
+ fOurDemux(demux), fStreamIdTag(streamIdTag),
+ fIFramesOnly(iFramesOnly), fVSHPeriod(vshPeriod) {
+}
+
+MPEG1or2DemuxedServerMediaSubsession::~MPEG1or2DemuxedServerMediaSubsession() {
+}
+
+FramedSource* MPEG1or2DemuxedServerMediaSubsession
+::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) {
+ FramedSource* es = NULL;
+ do {
+ es = fOurDemux.newElementaryStream(clientSessionId, fStreamIdTag);
+ if (es == NULL) break;
+
+ if ((fStreamIdTag&0xF0) == 0xC0 /*MPEG audio*/) {
+ estBitrate = 128; // kbps, estimate
+ return MPEG1or2AudioStreamFramer::createNew(envir(), es);
+ } else if ((fStreamIdTag&0xF0) == 0xE0 /*video*/) {
+ estBitrate = 500; // kbps, estimate
+ return MPEG1or2VideoStreamFramer::createNew(envir(), es,
+ fIFramesOnly, fVSHPeriod);
+ } else if (fStreamIdTag == 0xBD /*AC-3 audio*/) {
+ estBitrate = 192; // kbps, estimate
+ return AC3AudioStreamFramer::createNew(envir(), es, 0x80);
+ } else { // unknown stream type
+ break;
+ }
+ } while (0);
+
+ // An error occurred:
+ Medium::close(es);
+ return NULL;
+}
+
+RTPSink* MPEG1or2DemuxedServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource) {
+ if ((fStreamIdTag&0xF0) == 0xC0 /*MPEG audio*/) {
+ return MPEG1or2AudioRTPSink::createNew(envir(), rtpGroupsock);
+ } else if ((fStreamIdTag&0xF0) == 0xE0 /*video*/) {
+ return MPEG1or2VideoRTPSink::createNew(envir(), rtpGroupsock);
+ } else if (fStreamIdTag == 0xBD /*AC-3 audio*/) {
+ // Get the sampling frequency from the audio source; use it for the RTP frequency:
+ AC3AudioStreamFramer* audioSource
+ = (AC3AudioStreamFramer*)inputSource;
+ return AC3AudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ audioSource->samplingRate());
+ } else {
+ return NULL;
+ }
+}
+
+void MPEG1or2DemuxedServerMediaSubsession
+::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) {
+ float const dur = duration();
+ unsigned const size = fOurDemux.fileSize();
+ unsigned absBytePosition = dur == 0.0 ? 0 : (unsigned)((seekNPT/dur)*size);
+
+ // "inputSource" is a 'framer'
+ // Flush its data, to account for the seek that we're about to do:
+ if ((fStreamIdTag&0xF0) == 0xC0 /*MPEG audio*/) {
+ MPEG1or2AudioStreamFramer* framer = (MPEG1or2AudioStreamFramer*)inputSource;
+ framer->flushInput();
+ } else if ((fStreamIdTag&0xF0) == 0xE0 /*video*/) {
+ MPEG1or2VideoStreamFramer* framer = (MPEG1or2VideoStreamFramer*)inputSource;
+ framer->flushInput();
+ }
+
+ // "inputSource" is a filter; its input source is the original elem stream source:
+ MPEG1or2DemuxedElementaryStream* elemStreamSource
+ = (MPEG1or2DemuxedElementaryStream*)(((FramedFilter*)inputSource)->inputSource());
+
+ // Next, get the original source demux:
+ MPEG1or2Demux& sourceDemux = elemStreamSource->sourceDemux();
+
+ // and flush its input buffers:
+ sourceDemux.flushInput();
+
+ // Then, get the original input file stream from the source demux:
+ ByteStreamFileSource* inputFileSource
+ = (ByteStreamFileSource*)(sourceDemux.inputSource());
+ // Note: We can make that cast, because we know that the demux was originally
+ // created from a "ByteStreamFileSource".
+
+ // Do the appropriate seek within the input file stream:
+ inputFileSource->seekToByteAbsolute(absBytePosition);
+}
+
+float MPEG1or2DemuxedServerMediaSubsession::duration() const {
+ return fOurDemux.fileDuration();
+}
diff --git a/liveMedia/MPEG1or2FileServerDemux.cpp b/liveMedia/MPEG1or2FileServerDemux.cpp
new file mode 100644
index 0000000..b668e39
--- /dev/null
+++ b/liveMedia/MPEG1or2FileServerDemux.cpp
@@ -0,0 +1,264 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A server demultiplexer for a MPEG 1 or 2 Program Stream
+// Implementation
+
+#include "MPEG1or2FileServerDemux.hh"
+#include "MPEG1or2DemuxedServerMediaSubsession.hh"
+#include "ByteStreamFileSource.hh"
+
+MPEG1or2FileServerDemux*
+MPEG1or2FileServerDemux::createNew(UsageEnvironment& env, char const* fileName,
+ Boolean reuseFirstSource) {
+ return new MPEG1or2FileServerDemux(env, fileName, reuseFirstSource);
+}
+
+static float MPEG1or2ProgramStreamFileDuration(UsageEnvironment& env,
+ char const* fileName,
+ unsigned& fileSize); // forward
+MPEG1or2FileServerDemux
+::MPEG1or2FileServerDemux(UsageEnvironment& env, char const* fileName,
+ Boolean reuseFirstSource)
+ : Medium(env),
+ fReuseFirstSource(reuseFirstSource),
+ fSession0Demux(NULL), fLastCreatedDemux(NULL), fLastClientSessionId(~0) {
+ fFileName = strDup(fileName);
+ fFileDuration = MPEG1or2ProgramStreamFileDuration(env, fileName, fFileSize);
+}
+
+MPEG1or2FileServerDemux::~MPEG1or2FileServerDemux() {
+ Medium::close(fSession0Demux);
+ delete[] (char*)fFileName;
+}
+
+ServerMediaSubsession*
+MPEG1or2FileServerDemux::newAudioServerMediaSubsession() {
+ return MPEG1or2DemuxedServerMediaSubsession::createNew(*this, 0xC0, fReuseFirstSource);
+}
+
+ServerMediaSubsession*
+MPEG1or2FileServerDemux::newVideoServerMediaSubsession(Boolean iFramesOnly,
+ double vshPeriod) {
+ return MPEG1or2DemuxedServerMediaSubsession::createNew(*this, 0xE0, fReuseFirstSource,
+ iFramesOnly, vshPeriod);
+}
+
+ServerMediaSubsession*
+MPEG1or2FileServerDemux::newAC3AudioServerMediaSubsession() {
+ return MPEG1or2DemuxedServerMediaSubsession::createNew(*this, 0xBD, fReuseFirstSource);
+ // because, in a VOB file, the AC3 audio has stream id 0xBD
+}
+
+MPEG1or2DemuxedElementaryStream*
+MPEG1or2FileServerDemux::newElementaryStream(unsigned clientSessionId,
+ u_int8_t streamIdTag) {
+ MPEG1or2Demux* demuxToUse;
+ if (clientSessionId == 0) {
+ // 'Session 0' is treated especially, because its audio & video streams
+ // are created and destroyed one-at-a-time, rather than both streams being
+ // created, and then (later) both streams being destroyed (as is the case
+ // for other ('real') session ids). Because of this, a separate demux is
+ // used for session 0, and its deletion is managed by us, rather than
+ // happening automatically.
+ if (fSession0Demux == NULL) {
+ // Open our input file as a 'byte-stream file source':
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(envir(), fFileName);
+ if (fileSource == NULL) return NULL;
+ fSession0Demux = MPEG1or2Demux::createNew(envir(), fileSource, False/*note!*/);
+ }
+ demuxToUse = fSession0Demux;
+ } else {
+ // First, check whether this is a new client session. If so, create a new
+ // demux for it:
+ if (clientSessionId != fLastClientSessionId) {
+ // Open our input file as a 'byte-stream file source':
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(envir(), fFileName);
+ if (fileSource == NULL) return NULL;
+
+ fLastCreatedDemux = MPEG1or2Demux::createNew(envir(), fileSource, True);
+ // Note: We tell the demux to delete itself when its last
+ // elementary stream is deleted.
+ fLastClientSessionId = clientSessionId;
+ // Note: This code relies upon the fact that the creation of streams for
+ // different client sessions do not overlap - so one "MPEG1or2Demux" is used
+ // at a time.
+ }
+ demuxToUse = fLastCreatedDemux;
+ }
+
+ if (demuxToUse == NULL) return NULL; // shouldn't happen
+
+ return demuxToUse->newElementaryStream(streamIdTag);
+}
+
+
+static Boolean getMPEG1or2TimeCode(FramedSource* dataSource,
+ MPEG1or2Demux& parentDemux,
+ Boolean returnFirstSeenCode,
+ float& timeCode); // forward
+
+static float MPEG1or2ProgramStreamFileDuration(UsageEnvironment& env,
+ char const* fileName,
+ unsigned& fileSize) {
+ FramedSource* dataSource = NULL;
+ float duration = 0.0; // until we learn otherwise
+ fileSize = 0; // ditto
+
+ do {
+ // Open the input file as a 'byte-stream file source':
+ ByteStreamFileSource* fileSource = ByteStreamFileSource::createNew(env, fileName);
+ if (fileSource == NULL) break;
+ dataSource = fileSource;
+
+ fileSize = (unsigned)(fileSource->fileSize());
+ if (fileSize == 0) break;
+
+ // Create a MPEG demultiplexor that reads from that source.
+ MPEG1or2Demux* baseDemux = MPEG1or2Demux::createNew(env, dataSource, True);
+ if (baseDemux == NULL) break;
+
+ // Create, from this, a source that returns raw PES packets:
+ dataSource = baseDemux->newRawPESStream();
+
+ // Read the first time code from the file:
+ float firstTimeCode;
+ if (!getMPEG1or2TimeCode(dataSource, *baseDemux, True, firstTimeCode)) break;
+
+ // Then, read the last time code from the file.
+ // (Before doing this, flush the demux's input buffers,
+ // and seek towards the end of the file, for efficiency.)
+ baseDemux->flushInput();
+ unsigned const startByteFromEnd = 100000;
+ unsigned newFilePosition
+ = fileSize < startByteFromEnd ? 0 : fileSize - startByteFromEnd;
+ if (newFilePosition > 0) fileSource->seekToByteAbsolute(newFilePosition);
+
+ float lastTimeCode;
+ if (!getMPEG1or2TimeCode(dataSource, *baseDemux, False, lastTimeCode)) break;
+
+ // Take the difference between these time codes as being the file duration:
+ float timeCodeDiff = lastTimeCode - firstTimeCode;
+ if (timeCodeDiff < 0) break;
+ duration = timeCodeDiff;
+ } while (0);
+
+ Medium::close(dataSource);
+ return duration;
+}
+
+#define MFSD_DUMMY_SINK_BUFFER_SIZE (6+65535) /* large enough for a PES packet */
+
+class MFSD_DummySink: public MediaSink {
+public:
+ MFSD_DummySink(MPEG1or2Demux& demux, Boolean returnFirstSeenCode);
+ virtual ~MFSD_DummySink();
+
+ char watchVariable;
+
+private:
+ // redefined virtual function:
+ virtual Boolean continuePlaying();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1();
+
+private:
+ MPEG1or2Demux& fOurDemux;
+ Boolean fReturnFirstSeenCode;
+ unsigned char fBuf[MFSD_DUMMY_SINK_BUFFER_SIZE];
+};
+
+static void afterPlayingMFSD_DummySink(MFSD_DummySink* sink); // forward
+static float computeSCRTimeCode(MPEG1or2Demux::SCR const& scr); // forward
+
+static Boolean getMPEG1or2TimeCode(FramedSource* dataSource,
+ MPEG1or2Demux& parentDemux,
+ Boolean returnFirstSeenCode,
+ float& timeCode) {
+ // Start reading through "dataSource", until we see a SCR time code:
+ parentDemux.lastSeenSCR().isValid = False;
+ UsageEnvironment& env = dataSource->envir(); // alias
+ MFSD_DummySink sink(parentDemux, returnFirstSeenCode);
+ sink.startPlaying(*dataSource,
+ (MediaSink::afterPlayingFunc*)afterPlayingMFSD_DummySink, &sink);
+ env.taskScheduler().doEventLoop(&sink.watchVariable);
+
+ timeCode = computeSCRTimeCode(parentDemux.lastSeenSCR());
+ return parentDemux.lastSeenSCR().isValid;
+}
+
+
+////////// MFSD_DummySink implementation //////////
+
+MFSD_DummySink::MFSD_DummySink(MPEG1or2Demux& demux, Boolean returnFirstSeenCode)
+ : MediaSink(demux.envir()),
+ watchVariable(0), fOurDemux(demux), fReturnFirstSeenCode(returnFirstSeenCode) {
+}
+
+MFSD_DummySink::~MFSD_DummySink() {
+}
+
+Boolean MFSD_DummySink::continuePlaying() {
+ if (fSource == NULL) return False; // sanity check
+
+ fSource->getNextFrame(fBuf, sizeof fBuf,
+ afterGettingFrame, this,
+ onSourceClosure, this);
+ return True;
+}
+
+void MFSD_DummySink::afterGettingFrame(void* clientData, unsigned /*frameSize*/,
+ unsigned /*numTruncatedBytes*/,
+ struct timeval /*presentationTime*/,
+ unsigned /*durationInMicroseconds*/) {
+ MFSD_DummySink* sink = (MFSD_DummySink*)clientData;
+ sink->afterGettingFrame1();
+}
+
+void MFSD_DummySink::afterGettingFrame1() {
+ if (fReturnFirstSeenCode && fOurDemux.lastSeenSCR().isValid) {
+ // We were asked to return the first SCR that we saw, and we've seen one,
+ // so we're done. (Handle this as if the input source had closed.)
+ onSourceClosure();
+ return;
+ }
+
+ continuePlaying();
+}
+
+static void afterPlayingMFSD_DummySink(MFSD_DummySink* sink) {
+ // Return from the "doEventLoop()" call:
+ sink->watchVariable = ~0;
+}
+
+static float computeSCRTimeCode(MPEG1or2Demux::SCR const& scr) {
+ double result = scr.remainingBits/90000.0 + scr.extension/300.0;
+ if (scr.highBit) {
+ // Add (2^32)/90000 == (2^28)/5625
+ double const highBitValue = (256*1024*1024)/5625.0;
+ result += highBitValue;
+ }
+
+ return (float)result;
+}
diff --git a/liveMedia/MPEG1or2VideoFileServerMediaSubsession.cpp b/liveMedia/MPEG1or2VideoFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..4053b82
--- /dev/null
+++ b/liveMedia/MPEG1or2VideoFileServerMediaSubsession.cpp
@@ -0,0 +1,69 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a MPEG-1 or 2 Elementary Stream video file.
+// Implementation
+
+#include "MPEG1or2VideoFileServerMediaSubsession.hh"
+#include "MPEG1or2VideoRTPSink.hh"
+#include "ByteStreamFileSource.hh"
+#include "MPEG1or2VideoStreamFramer.hh"
+
+MPEG1or2VideoFileServerMediaSubsession*
+MPEG1or2VideoFileServerMediaSubsession::createNew(UsageEnvironment& env,
+ char const* fileName,
+ Boolean reuseFirstSource,
+ Boolean iFramesOnly,
+ double vshPeriod) {
+ return new MPEG1or2VideoFileServerMediaSubsession(env, fileName, reuseFirstSource,
+ iFramesOnly, vshPeriod);
+}
+
+MPEG1or2VideoFileServerMediaSubsession
+::MPEG1or2VideoFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName,
+ Boolean reuseFirstSource,
+ Boolean iFramesOnly,
+ double vshPeriod)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource),
+ fIFramesOnly(iFramesOnly), fVSHPeriod(vshPeriod) {
+}
+
+MPEG1or2VideoFileServerMediaSubsession
+::~MPEG1or2VideoFileServerMediaSubsession() {
+}
+
+FramedSource* MPEG1or2VideoFileServerMediaSubsession
+::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
+ estBitrate = 500; // kbps, estimate
+
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(envir(), fFileName);
+ if (fileSource == NULL) return NULL;
+ fFileSize = fileSource->fileSize();
+
+ return MPEG1or2VideoStreamFramer
+ ::createNew(envir(), fileSource, fIFramesOnly, fVSHPeriod);
+}
+
+RTPSink* MPEG1or2VideoFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char /*rtpPayloadTypeIfDynamic*/,
+ FramedSource* /*inputSource*/) {
+ return MPEG1or2VideoRTPSink::createNew(envir(), rtpGroupsock);
+}
diff --git a/liveMedia/MPEG1or2VideoRTPSink.cpp b/liveMedia/MPEG1or2VideoRTPSink.cpp
new file mode 100644
index 0000000..173dd36
--- /dev/null
+++ b/liveMedia/MPEG1or2VideoRTPSink.cpp
@@ -0,0 +1,175 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for MPEG video (RFC 2250)
+// Implementation
+
+#include "MPEG1or2VideoRTPSink.hh"
+#include "MPEG1or2VideoStreamFramer.hh"
+
+MPEG1or2VideoRTPSink::MPEG1or2VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs)
+ : VideoRTPSink(env, RTPgs, 32, 90000, "MPV") {
+ fPictureState.temporal_reference = 0;
+ fPictureState.picture_coding_type = fPictureState.vector_code_bits = 0;
+}
+
+MPEG1or2VideoRTPSink::~MPEG1or2VideoRTPSink() {
+}
+
+MPEG1or2VideoRTPSink*
+MPEG1or2VideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs) {
+ return new MPEG1or2VideoRTPSink(env, RTPgs);
+}
+
+Boolean MPEG1or2VideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
+ // Our source must be an appropriate framer:
+ return source.isMPEG1or2VideoStreamFramer();
+}
+
+Boolean MPEG1or2VideoRTPSink::allowFragmentationAfterStart() const {
+ return True;
+}
+
+Boolean MPEG1or2VideoRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const {
+ // A 'frame' (which in this context can mean a header or a slice as well as a
+ // complete picture) can appear at other than the first position in a packet
+ // in all situations, EXCEPT when it follows the end of (i.e., the last slice
+ // of) a picture. I.e., the headers at the beginning of a picture must
+ // appear at the start of a RTP packet.
+ if (!fPreviousFrameWasSlice) return True;
+
+ // A slice is already packed into this packet. We allow this new 'frame'
+ // to be packed after it, provided that it is also a slice:
+ return numBytesInFrame >= 4
+ && frameStart[0] == 0 && frameStart[1] == 0 && frameStart[2] == 1
+ && frameStart[3] >= 1 && frameStart[3] <= 0xAF;
+}
+
+#define VIDEO_SEQUENCE_HEADER_START_CODE 0x000001B3
+#define PICTURE_START_CODE 0x00000100
+
+void MPEG1or2VideoRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ Boolean thisFrameIsASlice = False; // until we learn otherwise
+ if (isFirstFrameInPacket()) {
+ fSequenceHeaderPresent = fPacketBeginsSlice = fPacketEndsSlice = False;
+ }
+
+ if (fragmentationOffset == 0) {
+ // Begin by inspecting the 4-byte code at the start of the frame:
+ if (numBytesInFrame < 4) return; // shouldn't happen
+ unsigned startCode = (frameStart[0]<<24) | (frameStart[1]<<16)
+ | (frameStart[2]<<8) | frameStart[3];
+
+ if (startCode == VIDEO_SEQUENCE_HEADER_START_CODE) {
+ // This is a video sequence header
+ fSequenceHeaderPresent = True;
+ } else if (startCode == PICTURE_START_CODE) {
+ // This is a picture header
+
+ // Record the parameters of this picture:
+ if (numBytesInFrame < 8) return; // shouldn't happen
+ unsigned next4Bytes = (frameStart[4]<<24) | (frameStart[5]<<16)
+ | (frameStart[6]<<8) | frameStart[7];
+ unsigned char byte8 = numBytesInFrame == 8 ? 0 : frameStart[8];
+
+ fPictureState.temporal_reference = (next4Bytes&0xFFC00000)>>(32-10);
+ fPictureState.picture_coding_type = (next4Bytes&0x00380000)>>(32-(10+3));
+
+ unsigned char FBV, BFC, FFV, FFC;
+ FBV = BFC = FFV = FFC = 0;
+ switch (fPictureState.picture_coding_type) {
+ case 3:
+ FBV = (byte8&0x40)>>6;
+ BFC = (byte8&0x38)>>3;
+ // fall through to:
+ case 2:
+ FFV = (next4Bytes&0x00000004)>>2;
+ FFC = ((next4Bytes&0x00000003)<<1) | ((byte8&0x80)>>7);
+ }
+
+ fPictureState.vector_code_bits = (FBV<<7) | (BFC<<4) | (FFV<<3) | FFC;
+ } else if ((startCode&0xFFFFFF00) == 0x00000100) {
+ unsigned char lastCodeByte = startCode&0xFF;
+
+ if (lastCodeByte <= 0xAF) {
+ // This is (the start of) a slice
+ thisFrameIsASlice = True;
+ } else {
+ // This is probably a GOP header; we don't do anything with this
+ }
+ } else {
+ // The first 4 bytes aren't a code that we recognize.
+ envir() << "Warning: MPEG1or2VideoRTPSink::doSpecialFrameHandling saw strange first 4 bytes "
+ << (void*)startCode << ", but we're not a fragment\n";
+ }
+ } else {
+ // We're a fragment (other than the first) of a slice.
+ thisFrameIsASlice = True;
+ }
+
+ if (thisFrameIsASlice) {
+ // This packet begins a slice iff there's no fragmentation offset:
+ fPacketBeginsSlice = (fragmentationOffset == 0);
+
+ // This packet also ends a slice iff there are no fragments remaining:
+ fPacketEndsSlice = (numRemainingBytes == 0);
+ }
+
+ // Set the video-specific header based on the parameters that we've seen.
+ // Note that this may get done more than once, if several frames appear
+ // in the packet. That's OK, because this situation happens infrequently,
+ // and we want the video-specific header to reflect the most up-to-date
+ // information (in particular, from a Picture Header) anyway.
+ unsigned videoSpecificHeader =
+ // T == 0
+ (fPictureState.temporal_reference<<16) |
+ // AN == N == 0
+ (fSequenceHeaderPresent<<13) |
+ (fPacketBeginsSlice<<12) |
+ (fPacketEndsSlice<<11) |
+ (fPictureState.picture_coding_type<<8) |
+ fPictureState.vector_code_bits;
+ setSpecialHeaderWord(videoSpecificHeader);
+
+ // Also set the RTP timestamp. (As above, we do this for each frame
+ // in the packet.)
+ setTimestamp(framePresentationTime);
+
+ // Set the RTP 'M' (marker) bit iff this frame ends (i.e., is the last
+ // slice of) a picture (and there are no fragments remaining).
+ // This relies on the source being a "MPEG1or2VideoStreamFramer".
+ MPEG1or2VideoStreamFramer* framerSource = (MPEG1or2VideoStreamFramer*)fSource;
+ if (framerSource != NULL && framerSource->pictureEndMarker()
+ && numRemainingBytes == 0) {
+ setMarkerBit();
+ framerSource->pictureEndMarker() = False;
+ }
+
+ fPreviousFrameWasSlice = thisFrameIsASlice;
+}
+
+unsigned MPEG1or2VideoRTPSink::specialHeaderSize() const {
+ // There's a 4 byte special video header:
+ return 4;
+}
diff --git a/liveMedia/MPEG1or2VideoRTPSource.cpp b/liveMedia/MPEG1or2VideoRTPSource.cpp
new file mode 100644
index 0000000..b41466e
--- /dev/null
+++ b/liveMedia/MPEG1or2VideoRTPSource.cpp
@@ -0,0 +1,82 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MPEG-1 or MPEG-2 Video RTP Sources
+// Implementation
+
+#include "MPEG1or2VideoRTPSource.hh"
+
+MPEG1or2VideoRTPSource*
+MPEG1or2VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new MPEG1or2VideoRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+MPEG1or2VideoRTPSource::MPEG1or2VideoRTPSource(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency){
+}
+
+MPEG1or2VideoRTPSource::~MPEG1or2VideoRTPSource() {
+}
+
+Boolean MPEG1or2VideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ // There's a 4-byte video-specific header
+ if (packet->dataSize() < 4) return False;
+
+ u_int32_t header = ntohl(*(u_int32_t*)(packet->data()));
+
+ u_int32_t sBit = header&0x00002000; // sequence-header-present
+ u_int32_t bBit = header&0x00001000; // beginning-of-slice
+ u_int32_t eBit = header&0x00000800; // end-of-slice
+
+ fCurrentPacketBeginsFrame = (sBit|bBit) != 0;
+ fCurrentPacketCompletesFrame = ((sBit != 0) && (bBit == 0)) || (eBit != 0);
+
+ resultSpecialHeaderSize = 4;
+ return True;
+}
+
+Boolean MPEG1or2VideoRTPSource
+::packetIsUsableInJitterCalculation(unsigned char* packet,
+ unsigned packetSize) {
+ // There's a 4-byte video-specific header
+ if (packetSize < 4) return False;
+
+ // Extract the "Picture-Type" field from this, to determine whether
+ // this packet can be used in jitter calculations:
+ unsigned header = ntohl(*(u_int32_t*)packet);
+
+ unsigned short pictureType = (header>>8)&0x7;
+ if (pictureType == 1) { // an I frame
+ return True;
+ } else { // a P, B, D, or other unknown frame type
+ return False;
+ }
+}
+
+char const* MPEG1or2VideoRTPSource::MIMEtype() const {
+ return "video/MPEG";
+}
+
diff --git a/liveMedia/MPEG1or2VideoStreamDiscreteFramer.cpp b/liveMedia/MPEG1or2VideoStreamDiscreteFramer.cpp
new file mode 100644
index 0000000..df5e594
--- /dev/null
+++ b/liveMedia/MPEG1or2VideoStreamDiscreteFramer.cpp
@@ -0,0 +1,203 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simplified version of "MPEG1or2VideoStreamFramer" that takes only
+// complete, discrete frames (rather than an arbitrary byte stream) as input.
+// This avoids the parsing and data copying overhead of the full
+// "MPEG1or2VideoStreamFramer".
+// Implementation
+
+#include "MPEG1or2VideoStreamDiscreteFramer.hh"
+
+MPEG1or2VideoStreamDiscreteFramer*
+MPEG1or2VideoStreamDiscreteFramer::createNew(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean iFramesOnly,
+ double vshPeriod,
+ Boolean leavePresentationTimesUnmodified) {
+ // Need to add source type checking here??? #####
+ return new MPEG1or2VideoStreamDiscreteFramer(env, inputSource,
+ iFramesOnly, vshPeriod, leavePresentationTimesUnmodified);
+}
+
+MPEG1or2VideoStreamDiscreteFramer
+::MPEG1or2VideoStreamDiscreteFramer(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean iFramesOnly, double vshPeriod, Boolean leavePresentationTimesUnmodified)
+ : MPEG1or2VideoStreamFramer(env, inputSource, iFramesOnly, vshPeriod,
+ False/*don't create a parser*/),
+ fLeavePresentationTimesUnmodified(leavePresentationTimesUnmodified),
+ fLastNonBFrameTemporal_reference(0),
+ fSavedVSHSize(0), fSavedVSHTimestamp(0.0),
+ fIFramesOnly(iFramesOnly), fVSHPeriod(vshPeriod) {
+ fLastNonBFramePresentationTime.tv_sec = 0;
+ fLastNonBFramePresentationTime.tv_usec = 0;
+}
+
+MPEG1or2VideoStreamDiscreteFramer::~MPEG1or2VideoStreamDiscreteFramer() {
+}
+
+void MPEG1or2VideoStreamDiscreteFramer::doGetNextFrame() {
+ // Arrange to read data (which should be a complete MPEG-1 or 2 video frame)
+ // from our data source, directly into the client's input buffer.
+ // After reading this, we'll do some parsing on the frame.
+ fInputSource->getNextFrame(fTo, fMaxSize,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+}
+
+void MPEG1or2VideoStreamDiscreteFramer
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ MPEG1or2VideoStreamDiscreteFramer* source
+ = (MPEG1or2VideoStreamDiscreteFramer*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+static double const frameRateFromCode[] = {
+ 0.0, // forbidden
+ 24000/1001.0, // approx 23.976
+ 24.0,
+ 25.0,
+ 30000/1001.0, // approx 29.97
+ 30.0,
+ 50.0,
+ 60000/1001.0, // approx 59.94
+ 60.0,
+ 0.0, // reserved
+ 0.0, // reserved
+ 0.0, // reserved
+ 0.0, // reserved
+ 0.0, // reserved
+ 0.0, // reserved
+ 0.0 // reserved
+};
+
+#define MILLION 1000000
+
+void MPEG1or2VideoStreamDiscreteFramer
+::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // Check that the first 4 bytes are a system code:
+ if (frameSize >= 4 && fTo[0] == 0 && fTo[1] == 0 && fTo[2] == 1) {
+ fPictureEndMarker = True; // Assume that we have a complete 'picture' here
+
+ u_int8_t nextCode = fTo[3];
+ if (nextCode == 0xB3) { // VIDEO_SEQUENCE_HEADER_START_CODE
+ // Note the following 'frame rate' code:
+ if (frameSize >= 8) {
+ u_int8_t frame_rate_code = fTo[7]&0x0F;
+ fFrameRate = frameRateFromCode[frame_rate_code];
+ }
+
+ // Also, save away this Video Sequence Header, in case we need it later:
+ // First, figure out how big it is:
+ unsigned vshSize;
+ for (vshSize = 4; vshSize < frameSize-3; ++vshSize) {
+ if (fTo[vshSize] == 0 && fTo[vshSize+1] == 0 && fTo[vshSize+2] == 1 &&
+ (fTo[vshSize+3] == 0xB8 || fTo[vshSize+3] == 0x00)) break;
+ }
+ if (vshSize == frameSize-3) vshSize = frameSize; // There was nothing else following it
+ if (vshSize <= sizeof fSavedVSHBuffer) {
+ memmove(fSavedVSHBuffer, fTo, vshSize);
+ fSavedVSHSize = vshSize;
+ fSavedVSHTimestamp
+ = presentationTime.tv_sec + presentationTime.tv_usec/(double)MILLION;
+ }
+ } else if (nextCode == 0xB8) { // GROUP_START_CODE
+ // If necessary, insert a saved Video Sequence Header in front of this:
+ double pts = presentationTime.tv_sec + presentationTime.tv_usec/(double)MILLION;
+ if (pts > fSavedVSHTimestamp + fVSHPeriod &&
+ fSavedVSHSize + frameSize <= fMaxSize) {
+ memmove(&fTo[fSavedVSHSize], &fTo[0], frameSize); // make room for the header
+ memmove(&fTo[0], fSavedVSHBuffer, fSavedVSHSize); // insert it
+ frameSize += fSavedVSHSize;
+ fSavedVSHTimestamp = pts;
+ }
+ }
+
+ unsigned i = 3;
+ if (nextCode == 0xB3 /*VIDEO_SEQUENCE_HEADER_START_CODE*/ ||
+ nextCode == 0xB8 /*GROUP_START_CODE*/) {
+ // Skip to the following PICTURE_START_CODE (if any):
+ for (i += 4; i < frameSize; ++i) {
+ if (fTo[i] == 0x00 /*PICTURE_START_CODE*/
+ && fTo[i-1] == 1 && fTo[i-2] == 0 && fTo[i-3] == 0) {
+ nextCode = fTo[i];
+ break;
+ }
+ }
+ }
+
+ if (nextCode == 0x00 /*PICTURE_START_CODE*/ && i+2 < frameSize) {
+ // Get the 'temporal_reference' and 'picture_coding_type' from the
+ // following 2 bytes:
+ ++i;
+ unsigned short temporal_reference = (fTo[i]<<2)|(fTo[i+1]>>6);
+ unsigned char picture_coding_type = (fTo[i+1]&0x38)>>3;
+
+ // If this is not an "I" frame, but we were asked for "I" frames only, then try again:
+ if (fIFramesOnly && picture_coding_type != 1) {
+ doGetNextFrame();
+ return;
+ }
+
+ // If this is a "B" frame, then we have to tweak "presentationTime":
+ if (!fLeavePresentationTimesUnmodified && picture_coding_type == 3/*B*/
+ && (fLastNonBFramePresentationTime.tv_usec > 0 ||
+ fLastNonBFramePresentationTime.tv_sec > 0)) {
+ int trIncrement
+ = fLastNonBFrameTemporal_reference - temporal_reference;
+ if (trIncrement < 0) trIncrement += 1024; // field is 10 bits in size
+
+ unsigned usIncrement = fFrameRate == 0.0 ? 0
+ : (unsigned)((trIncrement*MILLION)/fFrameRate);
+ unsigned secondsToSubtract = usIncrement/MILLION;
+ unsigned uSecondsToSubtract = usIncrement%MILLION;
+
+ presentationTime = fLastNonBFramePresentationTime;
+ if ((unsigned)presentationTime.tv_usec < uSecondsToSubtract) {
+ presentationTime.tv_usec += MILLION;
+ if (presentationTime.tv_sec > 0) --presentationTime.tv_sec;
+ }
+ presentationTime.tv_usec -= uSecondsToSubtract;
+ if ((unsigned)presentationTime.tv_sec > secondsToSubtract) {
+ presentationTime.tv_sec -= secondsToSubtract;
+ } else {
+ presentationTime.tv_sec = presentationTime.tv_usec = 0;
+ }
+ } else {
+ fLastNonBFramePresentationTime = presentationTime;
+ fLastNonBFrameTemporal_reference = temporal_reference;
+ }
+ }
+ }
+
+ // ##### Later:
+ // - do "iFramesOnly" if requested
+
+ // Complete delivery to the client:
+ fFrameSize = frameSize;
+ fNumTruncatedBytes = numTruncatedBytes;
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+ afterGetting(this);
+}
diff --git a/liveMedia/MPEG1or2VideoStreamFramer.cpp b/liveMedia/MPEG1or2VideoStreamFramer.cpp
new file mode 100644
index 0000000..a09ab33
--- /dev/null
+++ b/liveMedia/MPEG1or2VideoStreamFramer.cpp
@@ -0,0 +1,478 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an MPEG 1 or 2 video elementary stream into
+// frames for: Video_Sequence_Header, GOP_Header, Picture_Header
+// Implementation
+
+#include "MPEG1or2VideoStreamFramer.hh"
+#include "MPEGVideoStreamParser.hh"
+#include <string.h>
+
+////////// MPEG1or2VideoStreamParser definition //////////
+
+// An enum representing the current state of the parser:
+enum MPEGParseState {
+ PARSING_VIDEO_SEQUENCE_HEADER,
+ PARSING_VIDEO_SEQUENCE_HEADER_SEEN_CODE,
+ PARSING_GOP_HEADER,
+ PARSING_GOP_HEADER_SEEN_CODE,
+ PARSING_PICTURE_HEADER,
+ PARSING_SLICE
+};
+
+#define VSH_MAX_SIZE 1000
+
+class MPEG1or2VideoStreamParser: public MPEGVideoStreamParser {
+public:
+ MPEG1or2VideoStreamParser(MPEG1or2VideoStreamFramer* usingSource,
+ FramedSource* inputSource,
+ Boolean iFramesOnly, double vshPeriod);
+ virtual ~MPEG1or2VideoStreamParser();
+
+private: // redefined virtual functions:
+ virtual void flushInput();
+ virtual unsigned parse();
+
+private:
+ void reset();
+
+ MPEG1or2VideoStreamFramer* usingSource() {
+ return (MPEG1or2VideoStreamFramer*)fUsingSource;
+ }
+ void setParseState(MPEGParseState parseState);
+
+ unsigned parseVideoSequenceHeader(Boolean haveSeenStartCode);
+ unsigned parseGOPHeader(Boolean haveSeenStartCode);
+ unsigned parsePictureHeader();
+ unsigned parseSlice();
+
+private:
+ MPEGParseState fCurrentParseState;
+ unsigned fPicturesSinceLastGOP;
+ // can be used to compute timestamp for a video_sequence_header
+ unsigned short fCurPicTemporalReference;
+ // used to compute slice timestamp
+ unsigned char fCurrentSliceNumber; // set when parsing a slice
+
+ // A saved copy of the most recently seen 'video_sequence_header',
+ // in case we need to insert it into the stream periodically:
+ unsigned char fSavedVSHBuffer[VSH_MAX_SIZE];
+ unsigned fSavedVSHSize;
+ double fSavedVSHTimestamp;
+ double fVSHPeriod;
+ Boolean fIFramesOnly, fSkippingCurrentPicture;
+
+ void saveCurrentVSH();
+ Boolean needToUseSavedVSH();
+ unsigned useSavedVSH(); // returns the size of the saved VSH
+};
+
+
+////////// MPEG1or2VideoStreamFramer implementation //////////
+
+MPEG1or2VideoStreamFramer::MPEG1or2VideoStreamFramer(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean iFramesOnly,
+ double vshPeriod,
+ Boolean createParser)
+ : MPEGVideoStreamFramer(env, inputSource) {
+ fParser = createParser
+ ? new MPEG1or2VideoStreamParser(this, inputSource,
+ iFramesOnly, vshPeriod)
+ : NULL;
+}
+
+MPEG1or2VideoStreamFramer::~MPEG1or2VideoStreamFramer() {
+}
+
+MPEG1or2VideoStreamFramer*
+MPEG1or2VideoStreamFramer::createNew(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean iFramesOnly,
+ double vshPeriod) {
+ // Need to add source type checking here??? #####
+ return new MPEG1or2VideoStreamFramer(env, inputSource, iFramesOnly, vshPeriod);
+}
+
+double MPEG1or2VideoStreamFramer::getCurrentPTS() const {
+ return fPresentationTime.tv_sec + fPresentationTime.tv_usec/1000000.0;
+}
+
+Boolean MPEG1or2VideoStreamFramer::isMPEG1or2VideoStreamFramer() const {
+ return True;
+}
+
+////////// MPEG1or2VideoStreamParser implementation //////////
+
+MPEG1or2VideoStreamParser
+::MPEG1or2VideoStreamParser(MPEG1or2VideoStreamFramer* usingSource,
+ FramedSource* inputSource,
+ Boolean iFramesOnly, double vshPeriod)
+ : MPEGVideoStreamParser(usingSource, inputSource),
+ fCurrentParseState(PARSING_VIDEO_SEQUENCE_HEADER),
+ fVSHPeriod(vshPeriod), fIFramesOnly(iFramesOnly) {
+ reset();
+}
+
+MPEG1or2VideoStreamParser::~MPEG1or2VideoStreamParser() {
+}
+
+void MPEG1or2VideoStreamParser::setParseState(MPEGParseState parseState) {
+ fCurrentParseState = parseState;
+ MPEGVideoStreamParser::setParseState();
+}
+
+void MPEG1or2VideoStreamParser::reset() {
+ fPicturesSinceLastGOP = 0;
+ fCurPicTemporalReference = 0;
+ fCurrentSliceNumber = 0;
+ fSavedVSHSize = 0;
+ fSkippingCurrentPicture = False;
+}
+
+void MPEG1or2VideoStreamParser::flushInput() {
+ reset();
+ StreamParser::flushInput();
+ if (fCurrentParseState != PARSING_VIDEO_SEQUENCE_HEADER) {
+ setParseState(PARSING_GOP_HEADER); // start from the next GOP
+ }
+}
+
+unsigned MPEG1or2VideoStreamParser::parse() {
+ try {
+ switch (fCurrentParseState) {
+ case PARSING_VIDEO_SEQUENCE_HEADER: {
+ return parseVideoSequenceHeader(False);
+ }
+ case PARSING_VIDEO_SEQUENCE_HEADER_SEEN_CODE: {
+ return parseVideoSequenceHeader(True);
+ }
+ case PARSING_GOP_HEADER: {
+ return parseGOPHeader(False);
+ }
+ case PARSING_GOP_HEADER_SEEN_CODE: {
+ return parseGOPHeader(True);
+ }
+ case PARSING_PICTURE_HEADER: {
+ return parsePictureHeader();
+ }
+ case PARSING_SLICE: {
+ return parseSlice();
+ }
+ default: {
+ return 0; // shouldn't happen
+ }
+ }
+ } catch (int /*e*/) {
+#ifdef DEBUG
+ fprintf(stderr, "MPEG1or2VideoStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
+#endif
+ return 0; // the parsing got interrupted
+ }
+}
+
+void MPEG1or2VideoStreamParser::saveCurrentVSH() {
+ unsigned frameSize = curFrameSize();
+ if (frameSize > sizeof fSavedVSHBuffer) return; // too big to save
+
+ memmove(fSavedVSHBuffer, fStartOfFrame, frameSize);
+ fSavedVSHSize = frameSize;
+ fSavedVSHTimestamp = usingSource()->getCurrentPTS();
+}
+
+Boolean MPEG1or2VideoStreamParser::needToUseSavedVSH() {
+ return usingSource()->getCurrentPTS() > fSavedVSHTimestamp+fVSHPeriod
+ && fSavedVSHSize > 0;
+}
+
+unsigned MPEG1or2VideoStreamParser::useSavedVSH() {
+ unsigned bytesToUse = fSavedVSHSize;
+ unsigned maxBytesToUse = fLimit - fStartOfFrame;
+ if (bytesToUse > maxBytesToUse) bytesToUse = maxBytesToUse;
+
+ memmove(fStartOfFrame, fSavedVSHBuffer, bytesToUse);
+
+ // Also reset the saved timestamp:
+ fSavedVSHTimestamp = usingSource()->getCurrentPTS();
+
+#ifdef DEBUG
+ fprintf(stderr, "used saved video_sequence_header (%d bytes)\n", bytesToUse);
+#endif
+ return bytesToUse;
+}
+
+#define VIDEO_SEQUENCE_HEADER_START_CODE 0x000001B3
+#define GROUP_START_CODE 0x000001B8
+#define PICTURE_START_CODE 0x00000100
+#define SEQUENCE_END_CODE 0x000001B7
+
+static double const frameRateFromCode[] = {
+ 0.0, // forbidden
+ 24000/1001.0, // approx 23.976
+ 24.0,
+ 25.0,
+ 30000/1001.0, // approx 29.97
+ 30.0,
+ 50.0,
+ 60000/1001.0, // approx 59.94
+ 60.0,
+ 0.0, // reserved
+ 0.0, // reserved
+ 0.0, // reserved
+ 0.0, // reserved
+ 0.0, // reserved
+ 0.0, // reserved
+ 0.0 // reserved
+};
+
+unsigned MPEG1or2VideoStreamParser
+::parseVideoSequenceHeader(Boolean haveSeenStartCode) {
+#ifdef DEBUG
+ fprintf(stderr, "parsing video sequence header\n");
+#endif
+ unsigned first4Bytes;
+ if (!haveSeenStartCode) {
+ while ((first4Bytes = test4Bytes()) != VIDEO_SEQUENCE_HEADER_START_CODE) {
+#ifdef DEBUG
+ fprintf(stderr, "ignoring non video sequence header: 0x%08x\n", first4Bytes);
+#endif
+ get1Byte(); setParseState(PARSING_VIDEO_SEQUENCE_HEADER);
+ // ensures we progress over bad data
+ }
+ first4Bytes = get4Bytes();
+ } else {
+ // We've already seen the start code
+ first4Bytes = VIDEO_SEQUENCE_HEADER_START_CODE;
+ }
+ save4Bytes(first4Bytes);
+
+ // Next, extract the size and rate parameters from the next 8 bytes
+ unsigned paramWord1 = get4Bytes();
+ save4Bytes(paramWord1);
+ unsigned next4Bytes = get4Bytes();
+#ifdef DEBUG
+ unsigned short horizontal_size_value = (paramWord1&0xFFF00000)>>(32-12);
+ unsigned short vertical_size_value = (paramWord1&0x000FFF00)>>8;
+ unsigned char aspect_ratio_information = (paramWord1&0x000000F0)>>4;
+#endif
+ unsigned char frame_rate_code = (paramWord1&0x0000000F);
+ usingSource()->fFrameRate = frameRateFromCode[frame_rate_code];
+#ifdef DEBUG
+ unsigned bit_rate_value = (next4Bytes&0xFFFFC000)>>(32-18);
+ unsigned vbv_buffer_size_value = (next4Bytes&0x00001FF8)>>3;
+ fprintf(stderr, "horizontal_size_value: %d, vertical_size_value: %d, aspect_ratio_information: %d, frame_rate_code: %d (=>%f fps), bit_rate_value: %d (=>%d bps), vbv_buffer_size_value: %d\n", horizontal_size_value, vertical_size_value, aspect_ratio_information, frame_rate_code, usingSource()->fFrameRate, bit_rate_value, bit_rate_value*400, vbv_buffer_size_value);
+#endif
+
+ // Now, copy all bytes that we see, up until we reach a GROUP_START_CODE
+ // or a PICTURE_START_CODE:
+ do {
+ saveToNextCode(next4Bytes);
+ } while (next4Bytes != GROUP_START_CODE && next4Bytes != PICTURE_START_CODE);
+
+ setParseState((next4Bytes == GROUP_START_CODE)
+ ? PARSING_GOP_HEADER_SEEN_CODE : PARSING_PICTURE_HEADER);
+
+ // Compute this frame's timestamp by noting how many pictures we've seen
+ // since the last GOP header:
+ usingSource()->computePresentationTime(fPicturesSinceLastGOP);
+
+ // Save this video_sequence_header, in case we need to insert a copy
+ // into the stream later:
+ saveCurrentVSH();
+
+ return curFrameSize();
+}
+
+unsigned MPEG1or2VideoStreamParser::parseGOPHeader(Boolean haveSeenStartCode) {
+ // First check whether we should insert a previously-saved
+ // 'video_sequence_header' here:
+ if (needToUseSavedVSH()) return useSavedVSH();
+
+#ifdef DEBUG
+ fprintf(stderr, "parsing GOP header\n");
+#endif
+ unsigned first4Bytes;
+ if (!haveSeenStartCode) {
+ while ((first4Bytes = test4Bytes()) != GROUP_START_CODE) {
+#ifdef DEBUG
+ fprintf(stderr, "ignoring non GOP start code: 0x%08x\n", first4Bytes);
+#endif
+ get1Byte(); setParseState(PARSING_GOP_HEADER);
+ // ensures we progress over bad data
+ }
+ first4Bytes = get4Bytes();
+ } else {
+ // We've already seen the GROUP_START_CODE
+ first4Bytes = GROUP_START_CODE;
+ }
+ save4Bytes(first4Bytes);
+
+ // Next, extract the (25-bit) time code from the next 4 bytes:
+ unsigned next4Bytes = get4Bytes();
+ unsigned time_code = (next4Bytes&0xFFFFFF80)>>(32-25);
+#if defined(DEBUG) || defined(DEBUG_TIMESTAMPS)
+ Boolean drop_frame_flag = (time_code&0x01000000) != 0;
+#endif
+ unsigned time_code_hours = (time_code&0x00F80000)>>19;
+ unsigned time_code_minutes = (time_code&0x0007E000)>>13;
+ unsigned time_code_seconds = (time_code&0x00000FC0)>>6;
+ unsigned time_code_pictures = (time_code&0x0000003F);
+#if defined(DEBUG) || defined(DEBUG_TIMESTAMPS)
+ fprintf(stderr, "time_code: 0x%07x, drop_frame %d, hours %d, minutes %d, seconds %d, pictures %d\n", time_code, drop_frame_flag, time_code_hours, time_code_minutes, time_code_seconds, time_code_pictures);
+#endif
+#ifdef DEBUG
+ Boolean closed_gop = (next4Bytes&0x00000040) != 0;
+ Boolean broken_link = (next4Bytes&0x00000020) != 0;
+ fprintf(stderr, "closed_gop: %d, broken_link: %d\n", closed_gop, broken_link);
+#endif
+
+ // Now, copy all bytes that we see, up until we reach a PICTURE_START_CODE:
+ do {
+ saveToNextCode(next4Bytes);
+ } while (next4Bytes != PICTURE_START_CODE);
+
+ // Record the time code:
+ usingSource()->setTimeCode(time_code_hours, time_code_minutes,
+ time_code_seconds, time_code_pictures,
+ fPicturesSinceLastGOP);
+
+ fPicturesSinceLastGOP = 0;
+
+ // Compute this frame's timestamp:
+ usingSource()->computePresentationTime(0);
+
+ setParseState(PARSING_PICTURE_HEADER);
+
+ return curFrameSize();
+}
+
+inline Boolean isSliceStartCode(unsigned fourBytes) {
+ if ((fourBytes&0xFFFFFF00) != 0x00000100) return False;
+
+ unsigned char lastByte = fourBytes&0xFF;
+ return lastByte <= 0xAF && lastByte >= 1;
+}
+
+unsigned MPEG1or2VideoStreamParser::parsePictureHeader() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing picture header\n");
+#endif
+ // Note that we've already read the PICTURE_START_CODE
+ // Next, extract the temporal reference from the next 4 bytes:
+ unsigned next4Bytes = get4Bytes();
+ unsigned short temporal_reference = (next4Bytes&0xFFC00000)>>(32-10);
+ unsigned char picture_coding_type = (next4Bytes&0x00380000)>>19;
+#ifdef DEBUG
+ unsigned short vbv_delay = (next4Bytes&0x0007FFF8)>>3;
+ fprintf(stderr, "temporal_reference: %d, picture_coding_type: %d, vbv_delay: %d\n", temporal_reference, picture_coding_type, vbv_delay);
+#endif
+
+ fSkippingCurrentPicture = fIFramesOnly && picture_coding_type != 1;
+ if (fSkippingCurrentPicture) {
+ // Skip all bytes that we see, up until we reach a slice_start_code:
+ do {
+ skipToNextCode(next4Bytes);
+ } while (!isSliceStartCode(next4Bytes));
+ } else {
+ // Save the PICTURE_START_CODE that we've already read:
+ save4Bytes(PICTURE_START_CODE);
+
+ // Copy all bytes that we see, up until we reach a slice_start_code:
+ do {
+ saveToNextCode(next4Bytes);
+ } while (!isSliceStartCode(next4Bytes));
+ }
+
+ setParseState(PARSING_SLICE);
+
+ fCurrentSliceNumber = next4Bytes&0xFF;
+
+ // Record the temporal reference:
+ fCurPicTemporalReference = temporal_reference;
+
+ // Compute this frame's timestamp:
+ usingSource()->computePresentationTime(fCurPicTemporalReference);
+
+ if (fSkippingCurrentPicture) {
+ return parse(); // try again, until we get a non-skipped frame
+ } else {
+ return curFrameSize();
+ }
+}
+
+unsigned MPEG1or2VideoStreamParser::parseSlice() {
+ // Note that we've already read the slice_start_code:
+ unsigned next4Bytes = PICTURE_START_CODE|fCurrentSliceNumber;
+#ifdef DEBUG_SLICE
+ fprintf(stderr, "parsing slice: 0x%08x\n", next4Bytes);
+#endif
+
+ if (fSkippingCurrentPicture) {
+ // Skip all bytes that we see, up until we reach a code of some sort:
+ skipToNextCode(next4Bytes);
+ } else {
+ // Copy all bytes that we see, up until we reach a code of some sort:
+ saveToNextCode(next4Bytes);
+ }
+
+ // The next thing to parse depends on the code that we just saw:
+ if (isSliceStartCode(next4Bytes)) { // common case
+ setParseState(PARSING_SLICE);
+ fCurrentSliceNumber = next4Bytes&0xFF;
+ } else {
+ // Because we don't see any more slices, we are assumed to have ended
+ // the current picture:
+ ++fPicturesSinceLastGOP;
+ ++usingSource()->fPictureCount;
+ usingSource()->fPictureEndMarker = True; // HACK #####
+
+ switch (next4Bytes) {
+ case SEQUENCE_END_CODE: {
+ setParseState(PARSING_VIDEO_SEQUENCE_HEADER);
+ break;
+ }
+ case VIDEO_SEQUENCE_HEADER_START_CODE: {
+ setParseState(PARSING_VIDEO_SEQUENCE_HEADER_SEEN_CODE);
+ break;
+ }
+ case GROUP_START_CODE: {
+ setParseState(PARSING_GOP_HEADER_SEEN_CODE);
+ break;
+ }
+ case PICTURE_START_CODE: {
+ setParseState(PARSING_PICTURE_HEADER);
+ break;
+ }
+ default: {
+ usingSource()->envir() << "MPEG1or2VideoStreamParser::parseSlice(): Saw unexpected code "
+ << (void*)next4Bytes << "\n";
+ setParseState(PARSING_SLICE); // the safest way to recover...
+ break;
+ }
+ }
+ }
+
+ // Compute this frame's timestamp:
+ usingSource()->computePresentationTime(fCurPicTemporalReference);
+
+ if (fSkippingCurrentPicture) {
+ return parse(); // try again, until we get a non-skipped frame
+ } else {
+ return curFrameSize();
+ }
+}
diff --git a/liveMedia/MPEG2IndexFromTransportStream.cpp b/liveMedia/MPEG2IndexFromTransportStream.cpp
new file mode 100644
index 0000000..96c79a9
--- /dev/null
+++ b/liveMedia/MPEG2IndexFromTransportStream.cpp
@@ -0,0 +1,683 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that produces a sequence of I-frame indices from a MPEG-2 Transport Stream
+// Implementation
+
+#include "MPEG2IndexFromTransportStream.hh"
+
+////////// IndexRecord definition //////////
+
+enum RecordType {
+ RECORD_UNPARSED = 0,
+ RECORD_VSH = 1, // a MPEG Video Sequence Header
+ RECORD_GOP = 2,
+ RECORD_PIC_NON_IFRAME = 3, // includes slices
+ RECORD_PIC_IFRAME = 4, // includes slices
+ RECORD_NAL_H264_SPS = 5, // H.264
+ RECORD_NAL_H264_PPS = 6, // H.264
+ RECORD_NAL_H264_SEI = 7, // H.264
+ RECORD_NAL_H264_NON_IFRAME = 8, // H.264
+ RECORD_NAL_H264_IFRAME = 9, // H.264
+ RECORD_NAL_H264_OTHER = 10, // H.264
+ RECORD_NAL_H265_VPS = 11, // H.265
+ RECORD_NAL_H265_SPS = 12, // H.265
+ RECORD_NAL_H265_PPS = 13, // H.265
+ RECORD_NAL_H265_NON_IFRAME = 14, // H.265
+ RECORD_NAL_H265_IFRAME = 15, // H.265
+ RECORD_NAL_H265_OTHER = 16, // H.265
+ RECORD_JUNK
+};
+
+class IndexRecord {
+public:
+ IndexRecord(u_int8_t startOffset, u_int8_t size,
+ unsigned long transportPacketNumber, float pcr);
+ virtual ~IndexRecord();
+
+ RecordType& recordType() { return fRecordType; }
+ void setFirstFlag() { fRecordType = (RecordType)(((u_int8_t)fRecordType) | 0x80); }
+ u_int8_t startOffset() const { return fStartOffset; }
+ u_int8_t& size() { return fSize; }
+ float pcr() const { return fPCR; }
+ unsigned long transportPacketNumber() const { return fTransportPacketNumber; }
+
+ IndexRecord* next() const { return fNext; }
+ void addAfter(IndexRecord* prev);
+ void unlink();
+
+private:
+ // Index records are maintained in a doubly-linked list:
+ IndexRecord* fNext;
+ IndexRecord* fPrev;
+
+ RecordType fRecordType;
+ u_int8_t fStartOffset; // within the Transport Stream packet
+ u_int8_t fSize; // in bytes, following "fStartOffset".
+ // Note: fStartOffset + fSize <= TRANSPORT_PACKET_SIZE
+ float fPCR;
+ unsigned long fTransportPacketNumber;
+};
+
+#ifdef DEBUG
+static char const* recordTypeStr[] = {
+ "UNPARSED",
+ "VSH",
+ "GOP",
+ "PIC(non-I-frame)",
+ "PIC(I-frame)",
+ "SPS (H.264)",
+ "PPS (H.264)",
+ "SEI (H.264)",
+ "H.264 non-I-frame",
+ "H.264 I-frame",
+ "other NAL unit (H.264)",
+ "VPS (H.265)",
+ "SPS (H.265)",
+ "PPS (H.265)",
+ "H.265 non-I-frame",
+ "H.265 I-frame",
+ "other NAL unit (H.265)",
+ "JUNK"
+};
+
+UsageEnvironment& operator<<(UsageEnvironment& env, IndexRecord& r) {
+ return env << "[" << ((r.recordType()&0x80) != 0 ? "1" : "")
+ << recordTypeStr[r.recordType()&0x7F] << ":"
+ << (unsigned)r.transportPacketNumber() << ":" << r.startOffset()
+ << "(" << r.size() << ")@" << r.pcr() << "]";
+}
+#endif
+
+
+////////// MPEG2IFrameIndexFromTransportStream implementation //////////
+
+MPEG2IFrameIndexFromTransportStream*
+MPEG2IFrameIndexFromTransportStream::createNew(UsageEnvironment& env,
+ FramedSource* inputSource) {
+ return new MPEG2IFrameIndexFromTransportStream(env, inputSource);
+}
+
+// The largest expected frame size (in bytes):
+#define MAX_FRAME_SIZE 400000
+
+// Make our parse buffer twice as large as this, to ensure that at least one
+// complete frame will fit inside it:
+#define PARSE_BUFFER_SIZE (2*MAX_FRAME_SIZE)
+
+// The PID used for the PAT (as defined in the MPEG Transport Stream standard):
+#define PAT_PID 0
+
+MPEG2IFrameIndexFromTransportStream
+::MPEG2IFrameIndexFromTransportStream(UsageEnvironment& env,
+ FramedSource* inputSource)
+ : FramedFilter(env, inputSource),
+ fIsH264(False), fIsH265(False),
+ fInputTransportPacketCounter((unsigned)-1), fClosureNumber(0), fLastContinuityCounter(~0),
+ fFirstPCR(0.0), fLastPCR(0.0), fHaveSeenFirstPCR(False),
+ fPMT_PID(0x10), fVideo_PID(0xE0), // default values
+ fParseBufferSize(PARSE_BUFFER_SIZE),
+ fParseBufferFrameStart(0), fParseBufferParseEnd(4), fParseBufferDataEnd(0),
+ fHeadIndexRecord(NULL), fTailIndexRecord(NULL) {
+ fParseBuffer = new unsigned char[fParseBufferSize];
+}
+
+MPEG2IFrameIndexFromTransportStream::~MPEG2IFrameIndexFromTransportStream() {
+ delete fHeadIndexRecord;
+ delete[] fParseBuffer;
+}
+
+void MPEG2IFrameIndexFromTransportStream::doGetNextFrame() {
+ // Begin by trying to deliver an index record (for an already-parsed frame)
+ // to the client:
+ if (deliverIndexRecord()) return;
+
+ // No more index records are left to deliver, so try to parse a new frame:
+ if (parseFrame()) { // success - try again
+ doGetNextFrame();
+ return;
+ }
+
+ // We need to read some more Transport Stream packets. Check whether we have room:
+ if (fParseBufferSize - fParseBufferDataEnd < TRANSPORT_PACKET_SIZE) {
+ // There's no room left. Compact the buffer, and check again:
+ compactParseBuffer();
+ if (fParseBufferSize - fParseBufferDataEnd < TRANSPORT_PACKET_SIZE) {
+ envir() << "ERROR: parse buffer full; increase MAX_FRAME_SIZE\n";
+ // Treat this as if the input source ended:
+ handleInputClosure1();
+ return;
+ }
+ }
+
+ // Arrange to read a new Transport Stream packet:
+ fInputSource->getNextFrame(fInputBuffer, sizeof fInputBuffer,
+ afterGettingFrame, this,
+ handleInputClosure, this);
+}
+
+void MPEG2IFrameIndexFromTransportStream
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ MPEG2IFrameIndexFromTransportStream* source
+ = (MPEG2IFrameIndexFromTransportStream*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+#define TRANSPORT_SYNC_BYTE 0x47
+
+void MPEG2IFrameIndexFromTransportStream
+::afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ if (frameSize < TRANSPORT_PACKET_SIZE || fInputBuffer[0] != TRANSPORT_SYNC_BYTE) {
+ if (fInputBuffer[0] != TRANSPORT_SYNC_BYTE) {
+ envir() << "Bad TS sync byte: 0x" << fInputBuffer[0] << "\n";
+ }
+ // Handle this as if the source ended:
+ handleInputClosure1();
+ return;
+ }
+
+ ++fInputTransportPacketCounter;
+
+ // Figure out how much of this Transport Packet contains PES data:
+ u_int8_t adaptation_field_control = (fInputBuffer[3]&0x30)>>4;
+ u_int8_t totalHeaderSize
+ = adaptation_field_control <= 1 ? 4 : 5 + fInputBuffer[4];
+ if ((adaptation_field_control == 2 && totalHeaderSize != TRANSPORT_PACKET_SIZE) ||
+ (adaptation_field_control == 3 && totalHeaderSize >= TRANSPORT_PACKET_SIZE)) {
+ envir() << "Bad \"adaptation_field_length\": " << fInputBuffer[4] << "\n";
+ doGetNextFrame();
+ return;
+ }
+
+ // Check for a PCR:
+ if (totalHeaderSize > 5 && (fInputBuffer[5]&0x10) != 0) {
+ // There's a PCR:
+ u_int32_t pcrBaseHigh
+ = (fInputBuffer[6]<<24)|(fInputBuffer[7]<<16)
+ |(fInputBuffer[8]<<8)|fInputBuffer[9];
+ float pcr = pcrBaseHigh/45000.0f;
+ if ((fInputBuffer[10]&0x80) != 0) pcr += 1/90000.0f; // add in low-bit (if set)
+ unsigned short pcrExt = ((fInputBuffer[10]&0x01)<<8) | fInputBuffer[11];
+ pcr += pcrExt/27000000.0f;
+
+ if (!fHaveSeenFirstPCR) {
+ fFirstPCR = pcr;
+ fHaveSeenFirstPCR = True;
+ } else if (pcr < fLastPCR) {
+ // The PCR timestamp has gone backwards. Display a warning about this
+ // (because it indicates buggy Transport Stream data), and compensate for it.
+ envir() << "\nWarning: At about " << fLastPCR-fFirstPCR
+ << " seconds into the file, the PCR timestamp decreased - from "
+ << fLastPCR << " to " << pcr << "\n";
+ fFirstPCR -= (fLastPCR - pcr);
+ }
+ fLastPCR = pcr;
+ }
+
+ // Get the PID from the packet, and check for special tables: the PAT and PMT:
+ u_int16_t PID = ((fInputBuffer[1]&0x1F)<<8) | fInputBuffer[2];
+ if (PID == PAT_PID) {
+ analyzePAT(&fInputBuffer[totalHeaderSize], TRANSPORT_PACKET_SIZE-totalHeaderSize);
+ } else if (PID == fPMT_PID) {
+ analyzePMT(&fInputBuffer[totalHeaderSize], TRANSPORT_PACKET_SIZE-totalHeaderSize);
+ }
+
+ // Ignore transport packets for non-video programs,
+ // or packets with no data, or packets that duplicate the previous packet:
+ u_int8_t continuity_counter = fInputBuffer[3]&0x0F;
+ if ((PID != fVideo_PID) ||
+ !(adaptation_field_control == 1 || adaptation_field_control == 3) ||
+ continuity_counter == fLastContinuityCounter) {
+ doGetNextFrame();
+ return;
+ }
+ fLastContinuityCounter = continuity_counter;
+
+ // Also, if this is the start of a PES packet, then skip over the PES header:
+ Boolean payload_unit_start_indicator = (fInputBuffer[1]&0x40) != 0;
+ if (payload_unit_start_indicator && totalHeaderSize < TRANSPORT_PACKET_SIZE - 8
+ && fInputBuffer[totalHeaderSize] == 0x00 && fInputBuffer[totalHeaderSize+1] == 0x00
+ && fInputBuffer[totalHeaderSize+2] == 0x01) {
+ u_int8_t PES_header_data_length = fInputBuffer[totalHeaderSize+8];
+ totalHeaderSize += 9 + PES_header_data_length;
+ if (totalHeaderSize >= TRANSPORT_PACKET_SIZE) {
+ envir() << "Unexpectedly large PES header size: " << PES_header_data_length << "\n";
+ // Handle this as if the source ended:
+ handleInputClosure1();
+ return;
+ }
+ }
+
+ // The remaining data is Video Elementary Stream data. Add it to our parse buffer:
+ unsigned vesSize = TRANSPORT_PACKET_SIZE - totalHeaderSize;
+ memmove(&fParseBuffer[fParseBufferDataEnd], &fInputBuffer[totalHeaderSize], vesSize);
+ fParseBufferDataEnd += vesSize;
+
+ // And add a new index record noting where it came from:
+ addToTail(new IndexRecord(totalHeaderSize, vesSize, fInputTransportPacketCounter,
+ fLastPCR - fFirstPCR));
+
+ // Try again:
+ doGetNextFrame();
+}
+
+void MPEG2IFrameIndexFromTransportStream::handleInputClosure(void* clientData) {
+ MPEG2IFrameIndexFromTransportStream* source
+ = (MPEG2IFrameIndexFromTransportStream*)clientData;
+ source->handleInputClosure1();
+}
+
+#define VIDEO_SEQUENCE_START_CODE 0xB3 // MPEG-1 or 2
+#define VISUAL_OBJECT_SEQUENCE_START_CODE 0xB0 // MPEG-4
+#define GROUP_START_CODE 0xB8 // MPEG-1 or 2
+#define GROUP_VOP_START_CODE 0xB3 // MPEG-4
+#define PICTURE_START_CODE 0x00 // MPEG-1 or 2
+#define VOP_START_CODE 0xB6 // MPEG-4
+
+void MPEG2IFrameIndexFromTransportStream::handleInputClosure1() {
+ if (++fClosureNumber == 1 && fParseBufferDataEnd > fParseBufferFrameStart
+ && fParseBufferDataEnd <= fParseBufferSize - 4) {
+ // This is the first time we saw EOF, and there's still data remaining to be
+ // parsed. Hack: Append a Picture Header code to the end of the unparsed
+ // data, and try again. This should use up all of the unparsed data.
+ fParseBuffer[fParseBufferDataEnd++] = 0;
+ fParseBuffer[fParseBufferDataEnd++] = 0;
+ fParseBuffer[fParseBufferDataEnd++] = 1;
+ fParseBuffer[fParseBufferDataEnd++] = PICTURE_START_CODE;
+
+ // Try again:
+ doGetNextFrame();
+ } else {
+ // Handle closure in the regular way:
+ handleClosure();
+ }
+}
+
+void MPEG2IFrameIndexFromTransportStream
+::analyzePAT(unsigned char* pkt, unsigned size) {
+ // Get the PMT_PID:
+ while (size >= 17) { // The table is large enough
+ u_int16_t program_number = (pkt[9]<<8) | pkt[10];
+ if (program_number != 0) {
+ fPMT_PID = ((pkt[11]&0x1F)<<8) | pkt[12];
+ return;
+ }
+
+ pkt += 4; size -= 4;
+ }
+}
+
+void MPEG2IFrameIndexFromTransportStream
+::analyzePMT(unsigned char* pkt, unsigned size) {
+ // Scan the "elementary_PID"s in the map, until we see the first video stream.
+
+ // First, get the "section_length", to get the table's size:
+ u_int16_t section_length = ((pkt[2]&0x0F)<<8) | pkt[3];
+ if ((unsigned)(4+section_length) < size) size = (4+section_length);
+
+ // Then, skip any descriptors following the "program_info_length":
+ if (size < 22) return; // not enough data
+ unsigned program_info_length = ((pkt[11]&0x0F)<<8) | pkt[12];
+ pkt += 13; size -= 13;
+ if (size < program_info_length) return; // not enough data
+ pkt += program_info_length; size -= program_info_length;
+
+ // Look at each ("stream_type","elementary_PID") pair, looking for a video stream:
+ while (size >= 9) {
+ u_int8_t stream_type = pkt[0];
+ u_int16_t elementary_PID = ((pkt[1]&0x1F)<<8) | pkt[2];
+ if (stream_type == 1 || stream_type == 2 ||
+ stream_type == 0x1B/*H.264 video*/ || stream_type == 0x24/*H.265 video*/) {
+ if (stream_type == 0x1B) fIsH264 = True;
+ else if (stream_type == 0x24) fIsH265 = True;
+ fVideo_PID = elementary_PID;
+ return;
+ }
+
+ u_int16_t ES_info_length = ((pkt[3]&0x0F)<<8) | pkt[4];
+ pkt += 5; size -= 5;
+ if (size < ES_info_length) return; // not enough data
+ pkt += ES_info_length; size -= ES_info_length;
+ }
+}
+
+Boolean MPEG2IFrameIndexFromTransportStream::deliverIndexRecord() {
+ IndexRecord* head = fHeadIndexRecord;
+ if (head == NULL) return False;
+
+ // Check whether the head record has been parsed yet:
+ if (head->recordType() == RECORD_UNPARSED) return False;
+
+ // Remove the head record (the one whose data we'll be delivering):
+ IndexRecord* next = head->next();
+ head->unlink();
+ if (next == head) {
+ fHeadIndexRecord = fTailIndexRecord = NULL;
+ } else {
+ fHeadIndexRecord = next;
+ }
+
+ if (head->recordType() == RECORD_JUNK) {
+ // Don't actually deliver the data to the client:
+ delete head;
+ // Try to deliver the next record instead:
+ return deliverIndexRecord();
+ }
+
+ // Deliver data from the head record:
+#ifdef DEBUG
+ envir() << "delivering: " << *head << "\n";
+#endif
+ if (fMaxSize < 11) {
+ fFrameSize = 0;
+ } else {
+ fTo[0] = (u_int8_t)(head->recordType());
+ fTo[1] = head->startOffset();
+ fTo[2] = head->size();
+ // Deliver the PCR, as 24 bits (integer part; little endian) + 8 bits (fractional part)
+ float pcr = head->pcr();
+ unsigned pcr_int = (unsigned)pcr;
+ u_int8_t pcr_frac = (u_int8_t)(256*(pcr-pcr_int));
+ fTo[3] = (unsigned char)(pcr_int);
+ fTo[4] = (unsigned char)(pcr_int>>8);
+ fTo[5] = (unsigned char)(pcr_int>>16);
+ fTo[6] = (unsigned char)(pcr_frac);
+ // Deliver the transport packet number (in little-endian order):
+ unsigned long tpn = head->transportPacketNumber();
+ fTo[7] = (unsigned char)(tpn);
+ fTo[8] = (unsigned char)(tpn>>8);
+ fTo[9] = (unsigned char)(tpn>>16);
+ fTo[10] = (unsigned char)(tpn>>24);
+ fFrameSize = 11;
+ }
+
+ // Free the (former) head record (as we're now done with it):
+ delete head;
+
+ // Complete delivery to the client:
+ afterGetting(this);
+ return True;
+}
+
+Boolean MPEG2IFrameIndexFromTransportStream::parseFrame() {
+ // At this point, we have a queue of >=0 (unparsed) index records, representing
+ // the data in the parse buffer from "fParseBufferFrameStart"
+ // to "fParseBufferDataEnd". We now parse through this data, looking for
+ // a complete 'frame', where a 'frame', in this case, means:
+ // for MPEG video: a Video Sequence Header, GOP Header, Picture Header, or Slice
+ // for H.264 or H.265 video: a NAL unit
+
+ // Inspect the frame's initial 4-byte code, to make sure it starts with a system code:
+ if (fParseBufferDataEnd-fParseBufferFrameStart < 4) return False; // not enough data
+ unsigned numInitialBadBytes = 0;
+ unsigned char const* p = &fParseBuffer[fParseBufferFrameStart];
+ if (!(p[0] == 0 && p[1] == 0 && p[2] == 1)) {
+ // There's no system code at the beginning. Parse until we find one:
+ if (fParseBufferParseEnd == fParseBufferFrameStart + 4) {
+ // Start parsing from the beginning of the frame data:
+ fParseBufferParseEnd = fParseBufferFrameStart;
+ }
+ unsigned char nextCode;
+ if (!parseToNextCode(nextCode)) return False;
+
+ numInitialBadBytes = fParseBufferParseEnd - fParseBufferFrameStart;
+ fParseBufferFrameStart = fParseBufferParseEnd;
+ fParseBufferParseEnd += 4; // skip over the code that we just saw
+ p = &fParseBuffer[fParseBufferFrameStart];
+ }
+
+ unsigned char curCode = p[3];
+ if (fIsH264) curCode &= 0x1F; // nal_unit_type
+ else if (fIsH265) curCode = (curCode&0x7E)>>1;
+
+ RecordType curRecordType;
+ unsigned char nextCode;
+ if (fIsH264) {
+ switch (curCode) {
+ case 1: // Coded slice of a non-IDR picture
+ curRecordType = RECORD_NAL_H264_NON_IFRAME;
+ if (!parseToNextCode(nextCode)) return False;
+ break;
+ case 5: // Coded slice of an IDR picture
+ curRecordType = RECORD_NAL_H264_IFRAME;
+ if (!parseToNextCode(nextCode)) return False;
+ break;
+ case 6: // Supplemental enhancement information (SEI)
+ curRecordType = RECORD_NAL_H264_SEI;
+ if (!parseToNextCode(nextCode)) return False;
+ break;
+ case 7: // Sequence parameter set (SPS)
+ curRecordType = RECORD_NAL_H264_SPS;
+ if (!parseToNextCode(nextCode)) return False;
+ break;
+ case 8: // Picture parameter set (PPS)
+ curRecordType = RECORD_NAL_H264_PPS;
+ if (!parseToNextCode(nextCode)) return False;
+ break;
+ default:
+ curRecordType = RECORD_NAL_H264_OTHER;
+ if (!parseToNextCode(nextCode)) return False;
+ break;
+ }
+ } else if (fIsH265) {
+ switch (curCode) {
+ case 19: // Coded slice segment of an IDR picture
+ case 20: // Coded slice segment of an IDR picture
+ curRecordType = RECORD_NAL_H265_IFRAME;
+ if (!parseToNextCode(nextCode)) return False;
+ break;
+ case 32: // Video parameter set (VPS)
+ curRecordType = RECORD_NAL_H265_VPS;
+ if (!parseToNextCode(nextCode)) return False;
+ break;
+ case 33: // Sequence parameter set (SPS)
+ curRecordType = RECORD_NAL_H265_SPS;
+ if (!parseToNextCode(nextCode)) return False;
+ break;
+ case 34: // Picture parameter set (PPS)
+ curRecordType = RECORD_NAL_H265_PPS;
+ if (!parseToNextCode(nextCode)) return False;
+ break;
+ default:
+ curRecordType = (curCode <= 31) ? RECORD_NAL_H265_NON_IFRAME : RECORD_NAL_H265_OTHER;
+ if (!parseToNextCode(nextCode)) return False;
+ break;
+ }
+ } else { // MPEG-1, 2, or 4
+ switch (curCode) {
+ case VIDEO_SEQUENCE_START_CODE:
+ case VISUAL_OBJECT_SEQUENCE_START_CODE:
+ curRecordType = RECORD_VSH;
+ while (1) {
+ if (!parseToNextCode(nextCode)) return False;
+ if (nextCode == GROUP_START_CODE ||
+ nextCode == PICTURE_START_CODE || nextCode == VOP_START_CODE) break;
+ fParseBufferParseEnd += 4; // skip over the code that we just saw
+ }
+ break;
+ case GROUP_START_CODE:
+ curRecordType = RECORD_GOP;
+ while (1) {
+ if (!parseToNextCode(nextCode)) return False;
+ if (nextCode == PICTURE_START_CODE || nextCode == VOP_START_CODE) break;
+ fParseBufferParseEnd += 4; // skip over the code that we just saw
+ }
+ break;
+ default: // picture
+ curRecordType = RECORD_PIC_NON_IFRAME; // may get changed to IFRAME later
+ while (1) {
+ if (!parseToNextCode(nextCode)) return False;
+ if (nextCode == VIDEO_SEQUENCE_START_CODE ||
+ nextCode == VISUAL_OBJECT_SEQUENCE_START_CODE ||
+ nextCode == GROUP_START_CODE || nextCode == GROUP_VOP_START_CODE ||
+ nextCode == PICTURE_START_CODE || nextCode == VOP_START_CODE) break;
+ fParseBufferParseEnd += 4; // skip over the code that we just saw
+ }
+ break;
+ }
+ }
+
+ if (curRecordType == RECORD_PIC_NON_IFRAME) {
+ if (curCode == VOP_START_CODE) { // MPEG-4
+ if ((fParseBuffer[fParseBufferFrameStart+4]&0xC0) == 0) {
+ // This is actually an I-frame. Note it as such:
+ curRecordType = RECORD_PIC_IFRAME;
+ }
+ } else { // MPEG-1 or 2
+ if ((fParseBuffer[fParseBufferFrameStart+5]&0x38) == 0x08) {
+ // This is actually an I-frame. Note it as such:
+ curRecordType = RECORD_PIC_IFRAME;
+ }
+ }
+ }
+
+ // There is now a parsed 'frame', from "fParseBufferFrameStart"
+ // to "fParseBufferParseEnd". Tag the corresponding index records to note this:
+ unsigned frameSize = fParseBufferParseEnd - fParseBufferFrameStart + numInitialBadBytes;
+#ifdef DEBUG
+ envir() << "parsed " << recordTypeStr[curRecordType] << "; length "
+ << frameSize << "\n";
+#endif
+ for (IndexRecord* r = fHeadIndexRecord; ; r = r->next()) {
+ if (numInitialBadBytes >= r->size()) {
+ r->recordType() = RECORD_JUNK;
+ numInitialBadBytes -= r->size();
+ } else {
+ r->recordType() = curRecordType;
+ }
+ if (r == fHeadIndexRecord) r->setFirstFlag();
+ // indicates that this is the first record for this frame
+
+ if (r->size() > frameSize) {
+ // This record contains extra data that's not part of the frame.
+ // Shorten this record, and move the extra data to a new record
+ // that comes afterwards:
+ u_int8_t newOffset = r->startOffset() + frameSize;
+ u_int8_t newSize = r->size() - frameSize;
+ r->size() = frameSize;
+#ifdef DEBUG
+ envir() << "tagged record (modified): " << *r << "\n";
+#endif
+
+ IndexRecord* newRecord
+ = new IndexRecord(newOffset, newSize, r->transportPacketNumber(), r->pcr());
+ newRecord->addAfter(r);
+ if (fTailIndexRecord == r) fTailIndexRecord = newRecord;
+#ifdef DEBUG
+ envir() << "added extra record: " << *newRecord << "\n";
+#endif
+ } else {
+#ifdef DEBUG
+ envir() << "tagged record: " << *r << "\n";
+#endif
+ }
+ frameSize -= r->size();
+ if (frameSize == 0) break;
+ if (r == fTailIndexRecord) { // this shouldn't happen
+ envir() << "!!!!!Internal consistency error!!!!!\n";
+ return False;
+ }
+ }
+
+ // Finally, update our parse state (to skip over the now-parsed data):
+ fParseBufferFrameStart = fParseBufferParseEnd;
+ fParseBufferParseEnd += 4; // to skip over the next code (that we found)
+
+ return True;
+}
+
+Boolean MPEG2IFrameIndexFromTransportStream
+::parseToNextCode(unsigned char& nextCode) {
+ unsigned char const* p = &fParseBuffer[fParseBufferParseEnd];
+ unsigned char const* end = &fParseBuffer[fParseBufferDataEnd];
+ while (p <= end-4) {
+ if (p[2] > 1) p += 3; // common case (optimized)
+ else if (p[2] == 0) ++p;
+ else if (p[0] == 0 && p[1] == 0) { // && p[2] == 1
+ // We found a code here:
+ nextCode = p[3];
+ fParseBufferParseEnd = p - &fParseBuffer[0]; // where we've gotten to
+ return True;
+ } else p += 3;
+ }
+
+ fParseBufferParseEnd = p - &fParseBuffer[0]; // where we've gotten to
+ return False; // no luck this time
+}
+
+void MPEG2IFrameIndexFromTransportStream::compactParseBuffer() {
+#ifdef DEBUG
+ envir() << "Compacting parse buffer: [" << fParseBufferFrameStart
+ << "," << fParseBufferParseEnd << "," << fParseBufferDataEnd << "]";
+#endif
+ memmove(&fParseBuffer[0], &fParseBuffer[fParseBufferFrameStart],
+ fParseBufferDataEnd - fParseBufferFrameStart);
+ fParseBufferDataEnd -= fParseBufferFrameStart;
+ fParseBufferParseEnd -= fParseBufferFrameStart;
+ fParseBufferFrameStart = 0;
+#ifdef DEBUG
+ envir() << "-> [" << fParseBufferFrameStart
+ << "," << fParseBufferParseEnd << "," << fParseBufferDataEnd << "]\n";
+#endif
+}
+
+void MPEG2IFrameIndexFromTransportStream::addToTail(IndexRecord* newIndexRecord) {
+#ifdef DEBUG
+ envir() << "adding new: " << *newIndexRecord << "\n";
+#endif
+ if (fTailIndexRecord == NULL) {
+ fHeadIndexRecord = fTailIndexRecord = newIndexRecord;
+ } else {
+ newIndexRecord->addAfter(fTailIndexRecord);
+ fTailIndexRecord = newIndexRecord;
+ }
+}
+
+////////// IndexRecord implementation //////////
+
+IndexRecord::IndexRecord(u_int8_t startOffset, u_int8_t size,
+ unsigned long transportPacketNumber, float pcr)
+ : fNext(this), fPrev(this), fRecordType(RECORD_UNPARSED),
+ fStartOffset(startOffset), fSize(size),
+ fPCR(pcr), fTransportPacketNumber(transportPacketNumber) {
+}
+
+IndexRecord::~IndexRecord() {
+ IndexRecord* nextRecord = next();
+ unlink();
+ if (nextRecord != this) delete nextRecord;
+}
+
+void IndexRecord::addAfter(IndexRecord* prev) {
+ fNext = prev->fNext;
+ fPrev = prev;
+ prev->fNext->fPrev = this;
+ prev->fNext = this;
+}
+
+void IndexRecord::unlink() {
+ fNext->fPrev = fPrev;
+ fPrev->fNext = fNext;
+ fNext = fPrev = this;
+}
diff --git a/liveMedia/MPEG2TransportFileServerMediaSubsession.cpp b/liveMedia/MPEG2TransportFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..241e97a
--- /dev/null
+++ b/liveMedia/MPEG2TransportFileServerMediaSubsession.cpp
@@ -0,0 +1,352 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a MPEG-2 Transport Stream file.
+// Implementation
+
+#include "MPEG2TransportFileServerMediaSubsession.hh"
+#include "SimpleRTPSink.hh"
+
+MPEG2TransportFileServerMediaSubsession*
+MPEG2TransportFileServerMediaSubsession::createNew(UsageEnvironment& env,
+ char const* fileName,
+ char const* indexFileName,
+ Boolean reuseFirstSource) {
+ MPEG2TransportStreamIndexFile* indexFile;
+ if (indexFileName != NULL && reuseFirstSource) {
+ // It makes no sense to support trick play if all clients use the same source. Fix this:
+ env << "MPEG2TransportFileServerMediaSubsession::createNew(): ignoring the index file name, because \"reuseFirstSource\" is set\n";
+ indexFile = NULL;
+ } else {
+ indexFile = MPEG2TransportStreamIndexFile::createNew(env, indexFileName);
+ }
+ return new MPEG2TransportFileServerMediaSubsession(env, fileName, indexFile,
+ reuseFirstSource);
+}
+
+MPEG2TransportFileServerMediaSubsession
+::MPEG2TransportFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName,
+ MPEG2TransportStreamIndexFile* indexFile,
+ Boolean reuseFirstSource)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource),
+ fIndexFile(indexFile), fDuration(0.0), fClientSessionHashTable(NULL) {
+ if (fIndexFile != NULL) { // we support 'trick play'
+ fDuration = fIndexFile->getPlayingDuration();
+ fClientSessionHashTable = HashTable::create(ONE_WORD_HASH_KEYS);
+ }
+}
+
+MPEG2TransportFileServerMediaSubsession
+::~MPEG2TransportFileServerMediaSubsession() {
+ if (fIndexFile != NULL) { // we support 'trick play'
+ Medium::close(fIndexFile);
+
+ // Clean out the client session hash table:
+ while (1) {
+ ClientTrickPlayState* client
+ = (ClientTrickPlayState*)(fClientSessionHashTable->RemoveNext());
+ if (client == NULL) break;
+ delete client;
+ }
+ delete fClientSessionHashTable;
+ }
+}
+
+#define TRANSPORT_PACKET_SIZE 188
+#define TRANSPORT_PACKETS_PER_NETWORK_PACKET 7
+// The product of these two numbers must be enough to fit within a network packet
+
+void MPEG2TransportFileServerMediaSubsession
+::startStream(unsigned clientSessionId, void* streamToken, TaskFunc* rtcpRRHandler,
+ void* rtcpRRHandlerClientData, unsigned short& rtpSeqNum,
+ unsigned& rtpTimestamp,
+ ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
+ void* serverRequestAlternativeByteHandlerClientData) {
+ if (fIndexFile != NULL) { // we support 'trick play'
+ ClientTrickPlayState* client = lookupClient(clientSessionId);
+ if (client != NULL && client->areChangingScale()) {
+ // First, handle this like a "PAUSE", except that we back up to the previous VSH
+ client->updateStateOnPlayChange(True);
+ OnDemandServerMediaSubsession::pauseStream(clientSessionId, streamToken);
+
+ // Then, adjust for the change of scale:
+ client->updateStateOnScaleChange();
+ }
+ }
+
+ // Call the original, default version of this routine:
+ OnDemandServerMediaSubsession::startStream(clientSessionId, streamToken,
+ rtcpRRHandler, rtcpRRHandlerClientData,
+ rtpSeqNum, rtpTimestamp,
+ serverRequestAlternativeByteHandler, serverRequestAlternativeByteHandlerClientData);
+}
+
+void MPEG2TransportFileServerMediaSubsession
+::pauseStream(unsigned clientSessionId, void* streamToken) {
+ if (fIndexFile != NULL) { // we support 'trick play'
+ ClientTrickPlayState* client = lookupClient(clientSessionId);
+ if (client != NULL) {
+ client->updateStateOnPlayChange(False);
+ }
+ }
+
+ // Call the original, default version of this routine:
+ OnDemandServerMediaSubsession::pauseStream(clientSessionId, streamToken);
+}
+
+void MPEG2TransportFileServerMediaSubsession
+::seekStream(unsigned clientSessionId, void* streamToken, double& seekNPT, double streamDuration, u_int64_t& numBytes) {
+ // Begin by calling the original, default version of this routine:
+ OnDemandServerMediaSubsession::seekStream(clientSessionId, streamToken, seekNPT, streamDuration, numBytes);
+
+ // Then, special handling specific to indexed Transport Stream files:
+ if (fIndexFile != NULL) { // we support 'trick play'
+ ClientTrickPlayState* client = lookupClient(clientSessionId);
+ if (client != NULL) {
+ unsigned long numTSPacketsToStream = client->updateStateFromNPT(seekNPT, streamDuration);
+ numBytes = numTSPacketsToStream*TRANSPORT_PACKET_SIZE;
+ }
+ }
+}
+
+void MPEG2TransportFileServerMediaSubsession
+::setStreamScale(unsigned clientSessionId, void* streamToken, float scale) {
+ if (fIndexFile != NULL) { // we support 'trick play'
+ ClientTrickPlayState* client = lookupClient(clientSessionId);
+ if (client != NULL) {
+ client->setNextScale(scale); // scale won't take effect until the next "PLAY"
+ }
+ }
+
+ // Call the original, default version of this routine:
+ OnDemandServerMediaSubsession::setStreamScale(clientSessionId, streamToken, scale);
+}
+
+void MPEG2TransportFileServerMediaSubsession
+::deleteStream(unsigned clientSessionId, void*& streamToken) {
+ if (fIndexFile != NULL) { // we support 'trick play'
+ ClientTrickPlayState* client = lookupClient(clientSessionId);
+ if (client != NULL) {
+ client->updateStateOnPlayChange(False);
+ }
+ }
+
+ // Call the original, default version of this routine:
+ OnDemandServerMediaSubsession::deleteStream(clientSessionId, streamToken);
+}
+
+ClientTrickPlayState* MPEG2TransportFileServerMediaSubsession::newClientTrickPlayState() {
+ return new ClientTrickPlayState(fIndexFile);
+}
+
+FramedSource* MPEG2TransportFileServerMediaSubsession
+::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) {
+ // Create the video source:
+ unsigned const inputDataChunkSize
+ = TRANSPORT_PACKETS_PER_NETWORK_PACKET*TRANSPORT_PACKET_SIZE;
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(envir(), fFileName, inputDataChunkSize);
+ if (fileSource == NULL) return NULL;
+ fFileSize = fileSource->fileSize();
+
+ // Use the file size and the duration to estimate the stream's bitrate:
+ if (fFileSize > 0 && fDuration > 0.0) {
+ estBitrate = (unsigned)((int64_t)fFileSize/(125*fDuration) + 0.5); // kbps, rounded
+ } else {
+ estBitrate = 5000; // kbps, estimate
+ }
+
+
+ // Create a framer for the Transport Stream:
+ MPEG2TransportStreamFramer* framer
+ = MPEG2TransportStreamFramer::createNew(envir(), fileSource);
+
+ if (fIndexFile != NULL) { // we support 'trick play'
+ // Keep state for this client (if we don't already have it):
+ ClientTrickPlayState* client = lookupClient(clientSessionId);
+ if (client == NULL) {
+ client = newClientTrickPlayState();
+ fClientSessionHashTable->Add((char const*)clientSessionId, client);
+ }
+ client->setSource(framer);
+ }
+
+ return framer;
+}
+
+RTPSink* MPEG2TransportFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char /*rtpPayloadTypeIfDynamic*/,
+ FramedSource* /*inputSource*/) {
+ return SimpleRTPSink::createNew(envir(), rtpGroupsock,
+ 33, 90000, "video", "MP2T",
+ 1, True, False /*no 'M' bit*/);
+}
+
+void MPEG2TransportFileServerMediaSubsession::testScaleFactor(float& scale) {
+ if (fIndexFile != NULL && fDuration > 0.0) {
+ // We support any integral scale, other than 0
+ int iScale = scale < 0.0 ? (int)(scale - 0.5f) : (int)(scale + 0.5f); // round
+ if (iScale == 0) iScale = 1;
+ scale = (float)iScale;
+ } else {
+ scale = 1.0f;
+ }
+}
+
+float MPEG2TransportFileServerMediaSubsession::duration() const {
+ return fDuration;
+}
+
+ClientTrickPlayState* MPEG2TransportFileServerMediaSubsession
+::lookupClient(unsigned clientSessionId) {
+ return (ClientTrickPlayState*)(fClientSessionHashTable->Lookup((char const*)clientSessionId));
+}
+
+
+////////// ClientTrickPlayState implementation //////////
+
+ClientTrickPlayState::ClientTrickPlayState(MPEG2TransportStreamIndexFile* indexFile)
+ : fIndexFile(indexFile),
+ fOriginalTransportStreamSource(NULL),
+ fTrickModeFilter(NULL), fTrickPlaySource(NULL),
+ fFramer(NULL),
+ fScale(1.0f), fNextScale(1.0f), fNPT(0.0f),
+ fTSRecordNum(0), fIxRecordNum(0) {
+}
+
+unsigned long ClientTrickPlayState::updateStateFromNPT(double npt, double streamDuration) {
+ fNPT = (float)npt;
+ // Map "fNPT" to the corresponding Transport Stream and Index record numbers:
+ unsigned long tsRecordNum, ixRecordNum;
+ fIndexFile->lookupTSPacketNumFromNPT(fNPT, tsRecordNum, ixRecordNum);
+
+ updateTSRecordNum();
+ if (tsRecordNum != fTSRecordNum) {
+ fTSRecordNum = tsRecordNum;
+ fIxRecordNum = ixRecordNum;
+
+ // Seek the source to the new record number:
+ reseekOriginalTransportStreamSource();
+ // Note: We assume that we're asked to seek only in normal
+ // (i.e., non trick play) mode, so we don't seek within the trick
+ // play source (if any).
+
+ fFramer->clearPIDStatusTable();
+ }
+
+ unsigned long numTSRecordsToStream = 0;
+ float pcrLimit = 0.0;
+ if (streamDuration > 0.0) {
+ // fNPT might have changed when we looked it up in the index file. Adjust "streamDuration" accordingly:
+ streamDuration += npt - (double)fNPT;
+
+ if (streamDuration > 0.0) {
+ // Specify that we want to stream no more data than this.
+
+ if (fNextScale == 1.0f) {
+ // We'll be streaming from the original file.
+ // Use the index file to figure out how many Transport Packets we get to stream:
+ unsigned long toTSRecordNum, toIxRecordNum;
+ float toNPT = (float)(fNPT + streamDuration);
+ fIndexFile->lookupTSPacketNumFromNPT(toNPT, toTSRecordNum, toIxRecordNum);
+ if (toTSRecordNum > tsRecordNum) { // sanity check
+ numTSRecordsToStream = toTSRecordNum - tsRecordNum;
+ }
+ } else {
+ // We'll be streaming from the trick play stream.
+ // It'd be difficult to figure out how many Transport Packets we need to stream, so instead set a PCR
+ // limit in the trick play stream. (We rely upon the fact that PCRs in the trick play stream start at 0.0)
+ int direction = fNextScale < 0.0 ? -1 : 1;
+ pcrLimit = (float)(streamDuration/(fNextScale*direction));
+ }
+ }
+ }
+ fFramer->setNumTSPacketsToStream(numTSRecordsToStream);
+ fFramer->setPCRLimit(pcrLimit);
+
+ return numTSRecordsToStream;
+}
+
+void ClientTrickPlayState::updateStateOnScaleChange() {
+ fScale = fNextScale;
+
+ // Change our source objects to reflect the change in scale:
+ // First, close the existing trick play source (if any):
+ if (fTrickPlaySource != NULL) {
+ fTrickModeFilter->forgetInputSource();
+ // so that the underlying Transport Stream source doesn't get deleted by:
+ Medium::close(fTrickPlaySource);
+ fTrickPlaySource = NULL;
+ fTrickModeFilter = NULL;
+ }
+ if (fNextScale != 1.0f) {
+ // Create a new trick play filter from the original Transport Stream source:
+ UsageEnvironment& env = fIndexFile->envir(); // alias
+ fTrickModeFilter = MPEG2TransportStreamTrickModeFilter
+ ::createNew(env, fOriginalTransportStreamSource, fIndexFile, int(fNextScale));
+ fTrickModeFilter->seekTo(fTSRecordNum, fIxRecordNum);
+
+ // And generate a Transport Stream from this:
+ fTrickPlaySource = MPEG2TransportStreamFromESSource::createNew(env);
+ fTrickPlaySource->addNewVideoSource(fTrickModeFilter, fIndexFile->mpegVersion());
+
+ fFramer->changeInputSource(fTrickPlaySource);
+ } else {
+ // Switch back to the original Transport Stream source:
+ reseekOriginalTransportStreamSource();
+ fFramer->changeInputSource(fOriginalTransportStreamSource);
+ }
+}
+
+void ClientTrickPlayState::updateStateOnPlayChange(Boolean reverseToPreviousVSH) {
+ updateTSRecordNum();
+ if (fTrickPlaySource == NULL) {
+ // We were in regular (1x) play. Use the index file to look up the
+ // index record number and npt from the current transport number:
+ fIndexFile->lookupPCRFromTSPacketNum(fTSRecordNum, reverseToPreviousVSH, fNPT, fIxRecordNum);
+ } else {
+ // We were in trick mode, and so already have the index record number.
+ // Get the transport record number and npt from this:
+ fIxRecordNum = fTrickModeFilter->nextIndexRecordNum();
+ if ((long)fIxRecordNum < 0) fIxRecordNum = 0; // we were at the start of the file
+ unsigned long transportRecordNum;
+ float pcr;
+ u_int8_t offset, size, recordType; // all dummy
+ if (fIndexFile->readIndexRecordValues(fIxRecordNum, transportRecordNum,
+ offset, size, pcr, recordType)) {
+ fTSRecordNum = transportRecordNum;
+ fNPT = pcr;
+ }
+ }
+}
+
+void ClientTrickPlayState::setSource(MPEG2TransportStreamFramer* framer) {
+ fFramer = framer;
+ fOriginalTransportStreamSource = (ByteStreamFileSource*)(framer->inputSource());
+}
+
+void ClientTrickPlayState::updateTSRecordNum(){
+ if (fFramer != NULL) fTSRecordNum += (unsigned long)(fFramer->tsPacketCount());
+}
+
+void ClientTrickPlayState::reseekOriginalTransportStreamSource() {
+ u_int64_t tsRecordNum64 = (u_int64_t)fTSRecordNum;
+ fOriginalTransportStreamSource->seekToByteAbsolute(tsRecordNum64*TRANSPORT_PACKET_SIZE);
+}
diff --git a/liveMedia/MPEG2TransportStreamAccumulator.cpp b/liveMedia/MPEG2TransportStreamAccumulator.cpp
new file mode 100644
index 0000000..200358b
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamAccumulator.cpp
@@ -0,0 +1,85 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Collects a stream of incoming MPEG Transport Stream packets into
+// a chunk sufficiently large to send in a single outgoing (RTP or UDP) packet.
+// Implementation
+
+#include "MPEG2TransportStreamAccumulator.hh"
+
+MPEG2TransportStreamAccumulator*
+MPEG2TransportStreamAccumulator::createNew(UsageEnvironment& env,
+ FramedSource* inputSource, unsigned maxPacketSize) {
+ return new MPEG2TransportStreamAccumulator(env, inputSource, maxPacketSize);
+}
+
+#ifndef TRANSPORT_PACKET_SIZE
+#define TRANSPORT_PACKET_SIZE 188
+#endif
+
+MPEG2TransportStreamAccumulator
+::MPEG2TransportStreamAccumulator(UsageEnvironment& env,
+ FramedSource* inputSource, unsigned maxPacketSize)
+ : FramedFilter(env, inputSource),
+ fDesiredPacketSize(maxPacketSize < TRANSPORT_PACKET_SIZE ? TRANSPORT_PACKET_SIZE : (maxPacketSize/TRANSPORT_PACKET_SIZE)),
+ fNumBytesGathered(0) {
+}
+
+MPEG2TransportStreamAccumulator::~MPEG2TransportStreamAccumulator() {
+}
+
+void MPEG2TransportStreamAccumulator::doGetNextFrame() {
+ if (fNumBytesGathered >= fDesiredPacketSize) {
+ // Complete the delivery to the client:
+ fFrameSize = fNumBytesGathered;
+ fNumBytesGathered = 0;
+ afterGetting(this);
+ } else {
+ // Ask for more data (delivered directly to the client's buffer);
+ fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+ }
+}
+
+void MPEG2TransportStreamAccumulator
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ MPEG2TransportStreamAccumulator* accumulator
+ = (MPEG2TransportStreamAccumulator*)clientData;
+ accumulator->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+void MPEG2TransportStreamAccumulator
+::afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ if (fNumBytesGathered == 0) { // this is the first frame of the new chunk
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = 0;
+ }
+ fNumBytesGathered += frameSize;
+ fTo += frameSize;
+ fMaxSize -= frameSize;
+ fDurationInMicroseconds += durationInMicroseconds;
+
+ // Try again to complete delivery:
+ doGetNextFrame();
+}
diff --git a/liveMedia/MPEG2TransportStreamDemux.cpp b/liveMedia/MPEG2TransportStreamDemux.cpp
new file mode 100644
index 0000000..e789af7
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamDemux.cpp
@@ -0,0 +1,49 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Demultiplexer for a MPEG Transport Stream
+// Implementation
+
+#include "MPEG2TransportStreamDemux.hh"
+#include "MPEG2TransportStreamParser.hh"
+
+MPEG2TransportStreamDemux* MPEG2TransportStreamDemux
+::createNew(UsageEnvironment& env, FramedSource* inputSource,
+ FramedSource::onCloseFunc* onCloseFunc, void* onCloseClientData) {
+ return new MPEG2TransportStreamDemux(env, inputSource, onCloseFunc, onCloseClientData);
+}
+
+MPEG2TransportStreamDemux
+::MPEG2TransportStreamDemux(UsageEnvironment& env, FramedSource* inputSource,
+ FramedSource::onCloseFunc* onCloseFunc, void* onCloseClientData)
+ : Medium(env),
+ fOnCloseFunc(onCloseFunc), fOnCloseClientData(onCloseClientData) {
+ fParser = new MPEG2TransportStreamParser(inputSource, handleEndOfFile, this);
+}
+
+MPEG2TransportStreamDemux::~MPEG2TransportStreamDemux() {
+ delete fParser;
+}
+
+void MPEG2TransportStreamDemux::handleEndOfFile(void* clientData) {
+ ((MPEG2TransportStreamDemux*)clientData)->handleEndOfFile();
+}
+
+void MPEG2TransportStreamDemux::handleEndOfFile() {
+ if (fOnCloseFunc != NULL) (*fOnCloseFunc)(fOnCloseClientData);
+ delete this;
+}
diff --git a/liveMedia/MPEG2TransportStreamDemuxedTrack.cpp b/liveMedia/MPEG2TransportStreamDemuxedTrack.cpp
new file mode 100644
index 0000000..38bd784
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamDemuxedTrack.cpp
@@ -0,0 +1,34 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A media track, demultiplexed from a MPEG Transport Stream file
+// Implementation
+
+#include "MPEG2TransportStreamParser.hh"
+
+MPEG2TransportStreamDemuxedTrack
+::MPEG2TransportStreamDemuxedTrack(MPEG2TransportStreamParser& ourParser, u_int16_t pid)
+ : FramedSource(ourParser.envir()),
+ fOurParser(ourParser), fPID(pid) {
+}
+
+MPEG2TransportStreamDemuxedTrack::~MPEG2TransportStreamDemuxedTrack() {
+}
+
+void MPEG2TransportStreamDemuxedTrack::doGetNextFrame() {
+ fOurParser.continueParsing();
+}
diff --git a/liveMedia/MPEG2TransportStreamDemuxedTrack.hh b/liveMedia/MPEG2TransportStreamDemuxedTrack.hh
new file mode 100644
index 0000000..886795e
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamDemuxedTrack.hh
@@ -0,0 +1,50 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A media track, demultiplexed from a MPEG Transport Stream file
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_STREAM_DEMUXED_TRACK_HH
+#define _MPEG2_TRANSPORT_STREAM_DEMUXED_TRACK_HH
+
+#ifndef _MPEG2_TRANSPORT_STREAM_DEMUX_HH
+#include "MPEG2TransportStreamDemux.hh"
+#endif
+
+class MPEG2TransportStreamDemuxedTrack: public FramedSource {
+public:
+ MPEG2TransportStreamDemuxedTrack(class MPEG2TransportStreamParser& ourParser, u_int16_t pid);
+ virtual ~MPEG2TransportStreamDemuxedTrack();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private: // We are accessed only by "MPEG2TransportStreamParser" (a friend)
+ friend class MPEG2TransportStreamParser;
+ unsigned char* to() { return fTo; }
+ unsigned maxSize() { return fMaxSize; }
+ unsigned& frameSize() { return fFrameSize; }
+ unsigned& numTruncatedBytes() { return fNumTruncatedBytes; }
+ struct timeval& presentationTime() { return fPresentationTime; }
+
+private:
+ class MPEG2TransportStreamParser& fOurParser;
+ u_int16_t fPID;
+};
+
+#endif
diff --git a/liveMedia/MPEG2TransportStreamFramer.cpp b/liveMedia/MPEG2TransportStreamFramer.cpp
new file mode 100644
index 0000000..2da8081
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamFramer.cpp
@@ -0,0 +1,290 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that passes through (unchanged) chunks that contain an integral number
+// of MPEG-2 Transport Stream packets, but returning (in "fDurationInMicroseconds")
+// an updated estimate of the time gap between chunks.
+// Implementation
+
+#include "MPEG2TransportStreamFramer.hh"
+#include <GroupsockHelper.hh> // for "gettimeofday()"
+
+#define TRANSPORT_PACKET_SIZE 188
+
+////////// Definitions of constants that control the behavior of this code /////////
+
+#if !defined(NEW_DURATION_WEIGHT)
+#define NEW_DURATION_WEIGHT 0.5
+ // How much weight to give to the latest duration measurement (must be <= 1)
+#endif
+
+#if !defined(TIME_ADJUSTMENT_FACTOR)
+#define TIME_ADJUSTMENT_FACTOR 0.8
+ // A factor by which to adjust the duration estimate to ensure that the overall
+ // packet transmission times remains matched with the PCR times (which will be the
+ // times that we expect receivers to play the incoming packets).
+ // (must be <= 1)
+#endif
+
+#if !defined(MAX_PLAYOUT_BUFFER_DURATION)
+#define MAX_PLAYOUT_BUFFER_DURATION 0.1 // (seconds)
+#endif
+
+#if !defined(PCR_PERIOD_VARIATION_RATIO)
+#define PCR_PERIOD_VARIATION_RATIO 0.5
+#endif
+
+////////// PIDStatus //////////
+
+class PIDStatus {
+public:
+ PIDStatus(double _firstClock, double _firstRealTime)
+ : firstClock(_firstClock), lastClock(_firstClock),
+ firstRealTime(_firstRealTime), lastRealTime(_firstRealTime),
+ lastPacketNum(0) {
+ }
+
+ double firstClock, lastClock, firstRealTime, lastRealTime;
+ u_int64_t lastPacketNum;
+};
+
+
+////////// MPEG2TransportStreamFramer //////////
+
+MPEG2TransportStreamFramer* MPEG2TransportStreamFramer
+::createNew(UsageEnvironment& env, FramedSource* inputSource) {
+ return new MPEG2TransportStreamFramer(env, inputSource);
+}
+
+MPEG2TransportStreamFramer
+::MPEG2TransportStreamFramer(UsageEnvironment& env, FramedSource* inputSource)
+ : FramedFilter(env, inputSource),
+ fTSPacketCount(0), fTSPacketDurationEstimate(0.0), fTSPCRCount(0),
+ fLimitNumTSPacketsToStream(False), fNumTSPacketsToStream(0),
+ fLimitTSPacketsToStreamByPCR(False), fPCRLimit(0.0) {
+ fPIDStatusTable = HashTable::create(ONE_WORD_HASH_KEYS);
+}
+
+MPEG2TransportStreamFramer::~MPEG2TransportStreamFramer() {
+ clearPIDStatusTable();
+ delete fPIDStatusTable;
+}
+
+void MPEG2TransportStreamFramer::clearPIDStatusTable() {
+ PIDStatus* pidStatus;
+ while ((pidStatus = (PIDStatus*)fPIDStatusTable->RemoveNext()) != NULL) {
+ delete pidStatus;
+ }
+}
+
+void MPEG2TransportStreamFramer::setNumTSPacketsToStream(unsigned long numTSRecordsToStream) {
+ fNumTSPacketsToStream = numTSRecordsToStream;
+ fLimitNumTSPacketsToStream = numTSRecordsToStream > 0;
+}
+
+void MPEG2TransportStreamFramer::setPCRLimit(float pcrLimit) {
+ fPCRLimit = pcrLimit;
+ fLimitTSPacketsToStreamByPCR = pcrLimit != 0.0;
+}
+
+void MPEG2TransportStreamFramer::doGetNextFrame() {
+ if (fLimitNumTSPacketsToStream) {
+ if (fNumTSPacketsToStream == 0) {
+ handleClosure();
+ return;
+ }
+ if (fNumTSPacketsToStream*TRANSPORT_PACKET_SIZE < fMaxSize) {
+ fMaxSize = fNumTSPacketsToStream*TRANSPORT_PACKET_SIZE;
+ }
+ }
+
+ // Read directly from our input source into our client's buffer:
+ fFrameSize = 0;
+ fInputSource->getNextFrame(fTo, fMaxSize,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+}
+
+void MPEG2TransportStreamFramer::doStopGettingFrames() {
+ FramedFilter::doStopGettingFrames();
+ fTSPacketCount = 0;
+ fTSPCRCount = 0;
+
+ clearPIDStatusTable();
+}
+
+void MPEG2TransportStreamFramer
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned /*numTruncatedBytes*/,
+ struct timeval presentationTime,
+ unsigned /*durationInMicroseconds*/) {
+ MPEG2TransportStreamFramer* framer = (MPEG2TransportStreamFramer*)clientData;
+ framer->afterGettingFrame1(frameSize, presentationTime);
+}
+
+#define TRANSPORT_SYNC_BYTE 0x47
+
+void MPEG2TransportStreamFramer::afterGettingFrame1(unsigned frameSize,
+ struct timeval presentationTime) {
+ fFrameSize += frameSize;
+ unsigned const numTSPackets = fFrameSize/TRANSPORT_PACKET_SIZE;
+ fNumTSPacketsToStream -= numTSPackets;
+ fFrameSize = numTSPackets*TRANSPORT_PACKET_SIZE; // an integral # of TS packets
+ if (fFrameSize == 0) {
+ // We didn't read a complete TS packet; assume that the input source has closed.
+ handleClosure();
+ return;
+ }
+
+ // Make sure the data begins with a sync byte:
+ unsigned syncBytePosition;
+ for (syncBytePosition = 0; syncBytePosition < fFrameSize; ++syncBytePosition) {
+ if (fTo[syncBytePosition] == TRANSPORT_SYNC_BYTE) break;
+ }
+ if (syncBytePosition == fFrameSize) {
+ envir() << "No Transport Stream sync byte in data.";
+ handleClosure();
+ return;
+ } else if (syncBytePosition > 0) {
+ // There's a sync byte, but not at the start of the data. Move the good data
+ // to the start of the buffer, then read more to fill it up again:
+ memmove(fTo, &fTo[syncBytePosition], fFrameSize - syncBytePosition);
+ fFrameSize -= syncBytePosition;
+ fInputSource->getNextFrame(&fTo[fFrameSize], syncBytePosition,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+ return;
+ } // else normal case: the data begins with a sync byte
+
+ fPresentationTime = presentationTime;
+
+ // Scan through the TS packets that we read, and update our estimate of
+ // the duration of each packet:
+ struct timeval tvNow;
+ gettimeofday(&tvNow, NULL);
+ double timeNow = tvNow.tv_sec + tvNow.tv_usec/1000000.0;
+ for (unsigned i = 0; i < numTSPackets; ++i) {
+ if (!updateTSPacketDurationEstimate(&fTo[i*TRANSPORT_PACKET_SIZE], timeNow)) {
+ // We hit a preset limit (based on PCR) within the stream. Handle this as if the input source has closed:
+ handleClosure();
+ return;
+ }
+ }
+
+ fDurationInMicroseconds
+ = numTSPackets * (unsigned)(fTSPacketDurationEstimate*1000000);
+
+ // Complete the delivery to our client:
+ afterGetting(this);
+}
+
+Boolean MPEG2TransportStreamFramer::updateTSPacketDurationEstimate(unsigned char* pkt, double timeNow) {
+ // Sanity check: Make sure we start with the sync byte:
+ if (pkt[0] != TRANSPORT_SYNC_BYTE) {
+ envir() << "Missing sync byte!\n";
+ return True;
+ }
+
+ ++fTSPacketCount;
+
+ // If this packet doesn't contain a PCR, then we're not interested in it:
+ u_int8_t const adaptation_field_control = (pkt[3]&0x30)>>4;
+ if (adaptation_field_control != 2 && adaptation_field_control != 3) return True;
+ // there's no adaptation_field
+
+ u_int8_t const adaptation_field_length = pkt[4];
+ if (adaptation_field_length == 0) return True;
+
+ u_int8_t const discontinuity_indicator = pkt[5]&0x80;
+ u_int8_t const pcrFlag = pkt[5]&0x10;
+ if (pcrFlag == 0) return True; // no PCR
+
+ // There's a PCR. Get it, and the PID:
+ ++fTSPCRCount;
+ u_int32_t pcrBaseHigh = (pkt[6]<<24)|(pkt[7]<<16)|(pkt[8]<<8)|pkt[9];
+ double clock = pcrBaseHigh/45000.0;
+ if ((pkt[10]&0x80) != 0) clock += 1/90000.0; // add in low-bit (if set)
+ unsigned short pcrExt = ((pkt[10]&0x01)<<8) | pkt[11];
+ clock += pcrExt/27000000.0;
+ if (fLimitTSPacketsToStreamByPCR) {
+ if (clock > fPCRLimit) {
+ // We've hit a preset limit within the stream:
+ return False;
+ }
+ }
+
+ unsigned pid = ((pkt[1]&0x1F)<<8) | pkt[2];
+
+ // Check whether we already have a record of a PCR for this PID:
+ PIDStatus* pidStatus = (PIDStatus*)(fPIDStatusTable->Lookup((char*)pid));
+
+ if (pidStatus == NULL) {
+ // We're seeing this PID's PCR for the first time:
+ pidStatus = new PIDStatus(clock, timeNow);
+ fPIDStatusTable->Add((char*)pid, pidStatus);
+#ifdef DEBUG_PCR
+ fprintf(stderr, "PID 0x%x, FIRST PCR 0x%08x+%d:%03x == %f @ %f, pkt #%lu\n", pid, pcrBaseHigh, pkt[10]>>7, pcrExt, clock, timeNow, fTSPacketCount);
+#endif
+ } else {
+ // We've seen this PID's PCR before; update our per-packet duration estimate:
+ int64_t packetsSinceLast = (int64_t)(fTSPacketCount - pidStatus->lastPacketNum);
+ // it's "int64_t" because some compilers can't convert "u_int64_t" -> "double"
+ double durationPerPacket = (clock - pidStatus->lastClock)/packetsSinceLast;
+
+ // Hack (suggested by "Romain"): Don't update our estimate if this PCR appeared unusually quickly.
+ // (This can produce more accurate estimates for wildly VBR streams.)
+ double meanPCRPeriod = 0.0;
+ if (fTSPCRCount > 0) {
+ double tsPacketCount = (double)(int64_t)fTSPacketCount;
+ double tsPCRCount = (double)(int64_t)fTSPCRCount;
+ meanPCRPeriod = tsPacketCount/tsPCRCount;
+ if (packetsSinceLast < meanPCRPeriod*PCR_PERIOD_VARIATION_RATIO) return True;
+ }
+
+ if (fTSPacketDurationEstimate == 0.0) { // we've just started
+ fTSPacketDurationEstimate = durationPerPacket;
+ } else if (discontinuity_indicator == 0 && durationPerPacket >= 0.0) {
+ fTSPacketDurationEstimate
+ = durationPerPacket*NEW_DURATION_WEIGHT
+ + fTSPacketDurationEstimate*(1-NEW_DURATION_WEIGHT);
+
+ // Also adjust the duration estimate to try to ensure that the transmission
+ // rate matches the playout rate:
+ double transmitDuration = timeNow - pidStatus->firstRealTime;
+ double playoutDuration = clock - pidStatus->firstClock;
+ if (transmitDuration > playoutDuration) {
+ fTSPacketDurationEstimate *= TIME_ADJUSTMENT_FACTOR; // reduce estimate
+ } else if (transmitDuration + MAX_PLAYOUT_BUFFER_DURATION < playoutDuration) {
+ fTSPacketDurationEstimate /= TIME_ADJUSTMENT_FACTOR; // increase estimate
+ }
+ } else {
+ // the PCR has a discontinuity from its previous value; don't use it now,
+ // but reset our PCR and real-time values to compensate:
+ pidStatus->firstClock = clock;
+ pidStatus->firstRealTime = timeNow;
+ }
+#ifdef DEBUG_PCR
+ fprintf(stderr, "PID 0x%x, PCR 0x%08x+%d:%03x == %f @ %f (diffs %f @ %f), pkt #%lu, discon %d => this duration %f, new estimate %f, mean PCR period=%f\n", pid, pcrBaseHigh, pkt[10]>>7, pcrExt, clock, timeNow, clock - pidStatus->firstClock, timeNow - pidStatus->firstRealTime, fTSPacketCount, discontinuity_indicator != 0, durationPerPacket, fTSPacketDurationEstimate, meanPCRPeriod );
+#endif
+ }
+
+ pidStatus->lastClock = clock;
+ pidStatus->lastRealTime = timeNow;
+ pidStatus->lastPacketNum = fTSPacketCount;
+
+ return True;
+}
diff --git a/liveMedia/MPEG2TransportStreamFromESSource.cpp b/liveMedia/MPEG2TransportStreamFromESSource.cpp
new file mode 100644
index 0000000..2586e16
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamFromESSource.cpp
@@ -0,0 +1,266 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter for converting one or more MPEG Elementary Streams
+// to a MPEG-2 Transport Stream
+// Implementation
+
+#include "MPEG2TransportStreamFromESSource.hh"
+
+#define SIMPLE_PES_HEADER_SIZE 14
+#define INPUT_BUFFER_SIZE (SIMPLE_PES_HEADER_SIZE + 2*MPEG2TransportStreamFromESSource::maxInputESFrameSize)
+#define LOW_WATER_MARK 1000 // <= MPEG2TransportStreamFromESSource::maxInputESFrameSize
+
+////////// InputESSourceRecord definition //////////
+
+class InputESSourceRecord {
+public:
+ InputESSourceRecord(MPEG2TransportStreamFromESSource& parent,
+ FramedSource* inputSource,
+ u_int8_t streamId, int mpegVersion,
+ InputESSourceRecord* next, int16_t PID = -1);
+ virtual ~InputESSourceRecord();
+
+ InputESSourceRecord* next() const { return fNext; }
+ FramedSource* inputSource() const { return fInputSource; }
+
+ void askForNewData();
+ Boolean deliverBufferToClient();
+
+ unsigned char* buffer() const { return fInputBuffer; }
+ void reset() {
+ // Reset the buffer for future use:
+ fInputBufferBytesAvailable = 0;
+ fInputBufferInUse = False;
+ }
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime);
+
+private:
+ InputESSourceRecord* fNext;
+ MPEG2TransportStreamFromESSource& fParent;
+ FramedSource* fInputSource;
+ u_int8_t fStreamId;
+ int fMPEGVersion;
+ unsigned char* fInputBuffer;
+ unsigned fInputBufferBytesAvailable;
+ Boolean fInputBufferInUse;
+ MPEG1or2Demux::SCR fSCR;
+ int16_t fPID;
+};
+
+
+////////// MPEG2TransportStreamFromESSource implementation //////////
+
+unsigned MPEG2TransportStreamFromESSource::maxInputESFrameSize = 100000; // bytes
+
+MPEG2TransportStreamFromESSource* MPEG2TransportStreamFromESSource
+::createNew(UsageEnvironment& env) {
+ return new MPEG2TransportStreamFromESSource(env);
+}
+
+void MPEG2TransportStreamFromESSource
+::addNewVideoSource(FramedSource* inputSource, int mpegVersion, int16_t PID) {
+ u_int8_t streamId = 0xE0 | (fVideoSourceCounter++&0x0F);
+ addNewInputSource(inputSource, streamId, mpegVersion, PID);
+ fHaveVideoStreams = True;
+}
+
+void MPEG2TransportStreamFromESSource
+::addNewAudioSource(FramedSource* inputSource, int mpegVersion, int16_t PID) {
+ u_int8_t streamId = 0xC0 | (fAudioSourceCounter++&0x0F);
+ addNewInputSource(inputSource, streamId, mpegVersion, PID);
+}
+
+MPEG2TransportStreamFromESSource
+::MPEG2TransportStreamFromESSource(UsageEnvironment& env)
+ : MPEG2TransportStreamMultiplexor(env),
+ fInputSources(NULL), fVideoSourceCounter(0), fAudioSourceCounter(0),
+ fAwaitingBackgroundDelivery(False) {
+ fHaveVideoStreams = False; // unless we add a video source
+}
+
+MPEG2TransportStreamFromESSource::~MPEG2TransportStreamFromESSource() {
+ doStopGettingFrames();
+ delete fInputSources;
+}
+
+void MPEG2TransportStreamFromESSource::doStopGettingFrames() {
+ // Stop each input source:
+ for (InputESSourceRecord* sourceRec = fInputSources; sourceRec != NULL;
+ sourceRec = sourceRec->next()) {
+ sourceRec->inputSource()->stopGettingFrames();
+ }
+}
+
+void MPEG2TransportStreamFromESSource
+::awaitNewBuffer(unsigned char* oldBuffer) {
+ InputESSourceRecord* sourceRec;
+ // Begin by resetting the old buffer:
+ if (oldBuffer != NULL) {
+ for (sourceRec = fInputSources; sourceRec != NULL;
+ sourceRec = sourceRec->next()) {
+ if (sourceRec->buffer() == oldBuffer) {
+ sourceRec->reset();
+ break;
+ }
+ }
+ fAwaitingBackgroundDelivery = False;
+ }
+
+ if (isCurrentlyAwaitingData()) {
+ // Try to deliver one filled-in buffer to the client:
+ for (sourceRec = fInputSources; sourceRec != NULL;
+ sourceRec = sourceRec->next()) {
+ if (sourceRec->deliverBufferToClient()) return;
+ }
+ fAwaitingBackgroundDelivery = True;
+ }
+
+ // No filled-in buffers are available. Ask each of our inputs for data:
+ for (sourceRec = fInputSources; sourceRec != NULL;
+ sourceRec = sourceRec->next()) {
+ sourceRec->askForNewData();
+ }
+}
+
+void MPEG2TransportStreamFromESSource
+::addNewInputSource(FramedSource* inputSource,
+ u_int8_t streamId, int mpegVersion, int16_t PID) {
+ if (inputSource == NULL) return;
+ fInputSources = new InputESSourceRecord(*this, inputSource, streamId,
+ mpegVersion, fInputSources, PID);
+}
+
+
+////////// InputESSourceRecord implementation //////////
+
+InputESSourceRecord
+::InputESSourceRecord(MPEG2TransportStreamFromESSource& parent,
+ FramedSource* inputSource,
+ u_int8_t streamId, int mpegVersion,
+ InputESSourceRecord* next, int16_t PID)
+ : fNext(next), fParent(parent), fInputSource(inputSource),
+ fStreamId(streamId), fMPEGVersion(mpegVersion), fPID(PID) {
+ fInputBuffer = new unsigned char[INPUT_BUFFER_SIZE];
+ reset();
+}
+
+InputESSourceRecord::~InputESSourceRecord() {
+ Medium::close(fInputSource);
+ delete[] fInputBuffer;
+ delete fNext;
+}
+
+void InputESSourceRecord::askForNewData() {
+ if (fInputBufferInUse) return;
+
+ if (fInputBufferBytesAvailable == 0) {
+ // Reset our buffer, by adding a simple PES header at the start:
+ fInputBuffer[0] = 0; fInputBuffer[1] = 0; fInputBuffer[2] = 1;
+ fInputBuffer[3] = fStreamId;
+ fInputBuffer[4] = 0; fInputBuffer[5] = 0; // fill in later with the length
+ fInputBuffer[6] = 0x80;
+ fInputBuffer[7] = 0x80; // include a PTS
+ fInputBuffer[8] = 5; // PES_header_data_length (enough for a PTS)
+ // fInputBuffer[9..13] will be the PTS; fill this in later
+ fInputBufferBytesAvailable = SIMPLE_PES_HEADER_SIZE;
+ }
+ if (fInputBufferBytesAvailable < LOW_WATER_MARK &&
+ !fInputSource->isCurrentlyAwaitingData()) {
+ // We don't yet have enough data in our buffer. Arrange to read more:
+ fInputSource->getNextFrame(&fInputBuffer[fInputBufferBytesAvailable],
+ INPUT_BUFFER_SIZE-fInputBufferBytesAvailable,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, &fParent);
+ }
+}
+
+Boolean InputESSourceRecord::deliverBufferToClient() {
+ if (fInputBufferInUse || fInputBufferBytesAvailable < LOW_WATER_MARK) return False;
+
+ // Fill in the PES_packet_length field that we left unset before:
+ unsigned PES_packet_length = fInputBufferBytesAvailable - 6;
+ if (PES_packet_length > 0xFFFF) {
+ // Set the PES_packet_length field to 0. This indicates an unbounded length (see ISO 13818-1, 2.4.3.7)
+ PES_packet_length = 0;
+ }
+ fInputBuffer[4] = PES_packet_length>>8;
+ fInputBuffer[5] = PES_packet_length;
+
+ // Fill in the PES PTS (from our SCR):
+ fInputBuffer[9] = 0x20|(fSCR.highBit<<3)|(fSCR.remainingBits>>29)|0x01;
+ fInputBuffer[10] = fSCR.remainingBits>>22;
+ fInputBuffer[11] = (fSCR.remainingBits>>14)|0x01;
+ fInputBuffer[12] = fSCR.remainingBits>>7;
+ fInputBuffer[13] = (fSCR.remainingBits<<1)|0x01;
+
+ fInputBufferInUse = True;
+
+ // Do the delivery:
+ fParent.handleNewBuffer(fInputBuffer, fInputBufferBytesAvailable,
+ fMPEGVersion, fSCR, fPID);
+
+ return True;
+}
+
+void InputESSourceRecord
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned /*durationInMicroseconds*/) {
+ InputESSourceRecord* source = (InputESSourceRecord*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime);
+}
+void InputESSourceRecord
+::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime) {
+ if (numTruncatedBytes > 0) {
+ fParent.envir() << "MPEG2TransportStreamFromESSource: input buffer too small; increase \"MPEG2TransportStreamFromESSource::maxInputESFrameSize\" by at least "
+ << numTruncatedBytes << " bytes!\n";
+ }
+
+ if (fInputBufferBytesAvailable == SIMPLE_PES_HEADER_SIZE) {
+ // Use this presentationTime for our SCR:
+ fSCR.highBit
+ = ((presentationTime.tv_sec*45000 + (presentationTime.tv_usec*9)/200)&
+ 0x80000000) != 0;
+ fSCR.remainingBits
+ = presentationTime.tv_sec*90000 + (presentationTime.tv_usec*9)/100;
+ fSCR.extension = (presentationTime.tv_usec*9)%100;
+#ifdef DEBUG_SCR
+ fprintf(stderr, "PES header: stream_id 0x%02x, pts: %u.%06u => SCR 0x%x%08x:%03x\n", fStreamId, (unsigned)presentationTime.tv_sec, (unsigned)presentationTime.tv_usec, fSCR.highBit, fSCR.remainingBits, fSCR.extension);
+#endif
+ }
+
+ fInputBufferBytesAvailable += frameSize;
+
+ fParent.fPresentationTime = presentationTime;
+
+ // Now that we have new input data, check if we can deliver to the client:
+ if (fParent.fAwaitingBackgroundDelivery) {
+ fParent.fAwaitingBackgroundDelivery = False;
+ fParent.awaitNewBuffer(NULL);
+ }
+}
diff --git a/liveMedia/MPEG2TransportStreamFromPESSource.cpp b/liveMedia/MPEG2TransportStreamFromPESSource.cpp
new file mode 100644
index 0000000..45d8cbc
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamFromPESSource.cpp
@@ -0,0 +1,74 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter for converting a stream of MPEG PES packets to a MPEG-2 Transport Stream
+// Implementation
+
+#include "MPEG2TransportStreamFromPESSource.hh"
+
+#define MAX_PES_PACKET_SIZE (6+65535)
+
+MPEG2TransportStreamFromPESSource* MPEG2TransportStreamFromPESSource
+::createNew(UsageEnvironment& env, MPEG1or2DemuxedElementaryStream* inputSource) {
+ return new MPEG2TransportStreamFromPESSource(env, inputSource);
+}
+
+MPEG2TransportStreamFromPESSource
+::MPEG2TransportStreamFromPESSource(UsageEnvironment& env,
+ MPEG1or2DemuxedElementaryStream* inputSource)
+ : MPEG2TransportStreamMultiplexor(env),
+ fInputSource(inputSource) {
+ fInputBuffer = new unsigned char[MAX_PES_PACKET_SIZE];
+}
+
+MPEG2TransportStreamFromPESSource::~MPEG2TransportStreamFromPESSource() {
+ Medium::close(fInputSource);
+ delete[] fInputBuffer;
+}
+
+void MPEG2TransportStreamFromPESSource::doStopGettingFrames() {
+ fInputSource->stopGettingFrames();
+}
+
+void MPEG2TransportStreamFromPESSource
+::awaitNewBuffer(unsigned char* /*oldBuffer*/) {
+ fInputSource->getNextFrame(fInputBuffer, MAX_PES_PACKET_SIZE,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+}
+
+void MPEG2TransportStreamFromPESSource
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ MPEG2TransportStreamFromPESSource* source
+ = (MPEG2TransportStreamFromPESSource*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+void MPEG2TransportStreamFromPESSource
+::afterGettingFrame1(unsigned frameSize,
+ unsigned /*numTruncatedBytes*/,
+ struct timeval /*presentationTime*/,
+ unsigned /*durationInMicroseconds*/) {
+ if (frameSize < 4) return;
+
+ handleNewBuffer(fInputBuffer, frameSize,
+ fInputSource->mpegVersion(), fInputSource->lastSeenSCR());
+}
diff --git a/liveMedia/MPEG2TransportStreamIndexFile.cpp b/liveMedia/MPEG2TransportStreamIndexFile.cpp
new file mode 100644
index 0000000..8499959
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamIndexFile.cpp
@@ -0,0 +1,349 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class that encapsulates MPEG-2 Transport Stream 'index files'/
+// These index files are used to implement 'trick play' operations
+// (seek-by-time, fast forward, reverse play) on Transport Stream files.
+//
+// Implementation
+
+#include "MPEG2TransportStreamIndexFile.hh"
+#include "InputFile.hh"
+
+MPEG2TransportStreamIndexFile
+::MPEG2TransportStreamIndexFile(UsageEnvironment& env, char const* indexFileName)
+ : Medium(env),
+ fFileName(strDup(indexFileName)), fFid(NULL), fMPEGVersion(0), fCurrentIndexRecordNum(0),
+ fCachedPCR(0.0f), fCachedTSPacketNumber(0), fNumIndexRecords(0) {
+ // Get the file size, to determine how many index records it contains:
+ u_int64_t indexFileSize = GetFileSize(indexFileName, NULL);
+ if (indexFileSize % INDEX_RECORD_SIZE != 0) {
+ env << "Warning: Size of the index file \"" << indexFileName
+ << "\" (" << (unsigned)indexFileSize
+ << ") is not a multiple of the index record size ("
+ << INDEX_RECORD_SIZE << ")\n";
+ }
+ fNumIndexRecords = (unsigned long)(indexFileSize/INDEX_RECORD_SIZE);
+}
+
+MPEG2TransportStreamIndexFile* MPEG2TransportStreamIndexFile
+::createNew(UsageEnvironment& env, char const* indexFileName) {
+ if (indexFileName == NULL) return NULL;
+ MPEG2TransportStreamIndexFile* indexFile
+ = new MPEG2TransportStreamIndexFile(env, indexFileName);
+
+ // Reject empty or non-existent index files:
+ if (indexFile->getPlayingDuration() == 0.0f) {
+ delete indexFile;
+ indexFile = NULL;
+ }
+
+ return indexFile;
+}
+
+MPEG2TransportStreamIndexFile::~MPEG2TransportStreamIndexFile() {
+ closeFid();
+ delete[] fFileName;
+}
+
+void MPEG2TransportStreamIndexFile
+::lookupTSPacketNumFromNPT(float& npt, unsigned long& tsPacketNumber,
+ unsigned long& indexRecordNumber) {
+ if (npt <= 0.0 || fNumIndexRecords == 0) { // Fast-track a common case:
+ npt = 0.0f;
+ tsPacketNumber = indexRecordNumber = 0;
+ return;
+ }
+
+ // If "npt" is the same as the one that we last looked up, return its cached result:
+ if (npt == fCachedPCR) {
+ tsPacketNumber = fCachedTSPacketNumber;
+ indexRecordNumber = fCachedIndexRecordNumber;
+ return;
+ }
+
+ // Search for the pair of neighboring index records whose PCR values span "npt".
+ // Use the 'regula-falsi' method.
+ Boolean success = False;
+ unsigned long ixFound = 0;
+ do {
+ unsigned long ixLeft = 0, ixRight = fNumIndexRecords-1;
+ float pcrLeft = 0.0f, pcrRight;
+ if (!readIndexRecord(ixRight)) break;
+ pcrRight = pcrFromBuf();
+ if (npt > pcrRight) npt = pcrRight;
+ // handle "npt" too large by seeking to the last frame of the file
+
+ while (ixRight-ixLeft > 1 && pcrLeft < npt && npt <= pcrRight) {
+ unsigned long ixNew = ixLeft
+ + (unsigned long)(((npt-pcrLeft)/(pcrRight-pcrLeft))*(ixRight-ixLeft));
+ if (ixNew == ixLeft || ixNew == ixRight) {
+ // use bisection instead:
+ ixNew = (ixLeft+ixRight)/2;
+ }
+ if (!readIndexRecord(ixNew)) break;
+ float pcrNew = pcrFromBuf();
+ if (pcrNew < npt) {
+ pcrLeft = pcrNew;
+ ixLeft = ixNew;
+ } else {
+ pcrRight = pcrNew;
+ ixRight = ixNew;
+ }
+ }
+ if (ixRight-ixLeft > 1 || npt <= pcrLeft || npt > pcrRight) break; // bad PCR values in index file?
+
+ ixFound = ixRight;
+ // "Rewind' until we reach the start of a Video Sequence or GOP header:
+ success = rewindToCleanPoint(ixFound);
+ } while (0);
+
+ if (success && readIndexRecord(ixFound)) {
+ // Return (and cache) information from record "ixFound":
+ npt = fCachedPCR = pcrFromBuf();
+ tsPacketNumber = fCachedTSPacketNumber = tsPacketNumFromBuf();
+ indexRecordNumber = fCachedIndexRecordNumber = ixFound;
+ } else {
+ // An error occurred: Return the default values, for npt == 0:
+ npt = 0.0f;
+ tsPacketNumber = indexRecordNumber = 0;
+ }
+ closeFid();
+}
+
+void MPEG2TransportStreamIndexFile
+::lookupPCRFromTSPacketNum(unsigned long& tsPacketNumber, Boolean reverseToPreviousCleanPoint,
+ float& pcr, unsigned long& indexRecordNumber) {
+ if (tsPacketNumber == 0 || fNumIndexRecords == 0) { // Fast-track a common case:
+ pcr = 0.0f;
+ indexRecordNumber = 0;
+ return;
+ }
+
+ // If "tsPacketNumber" is the same as the one that we last looked up, return its cached result:
+ if (tsPacketNumber == fCachedTSPacketNumber) {
+ pcr = fCachedPCR;
+ indexRecordNumber = fCachedIndexRecordNumber;
+ return;
+ }
+
+ // Search for the pair of neighboring index records whose TS packet #s span "tsPacketNumber".
+ // Use the 'regula-falsi' method.
+ Boolean success = False;
+ unsigned long ixFound = 0;
+ do {
+ unsigned long ixLeft = 0, ixRight = fNumIndexRecords-1;
+ unsigned long tsLeft = 0, tsRight;
+ if (!readIndexRecord(ixRight)) break;
+ tsRight = tsPacketNumFromBuf();
+ if (tsPacketNumber > tsRight) tsPacketNumber = tsRight;
+ // handle "tsPacketNumber" too large by seeking to the last frame of the file
+
+ while (ixRight-ixLeft > 1 && tsLeft < tsPacketNumber && tsPacketNumber <= tsRight) {
+ unsigned long ixNew = ixLeft
+ + (unsigned long)(((tsPacketNumber-tsLeft)/(tsRight-tsLeft))*(ixRight-ixLeft));
+ if (ixNew == ixLeft || ixNew == ixRight) {
+ // Use bisection instead:
+ ixNew = (ixLeft+ixRight)/2;
+ }
+ if (!readIndexRecord(ixNew)) break;
+ unsigned long tsNew = tsPacketNumFromBuf();
+ if (tsNew < tsPacketNumber) {
+ tsLeft = tsNew;
+ ixLeft = ixNew;
+ } else {
+ tsRight = tsNew;
+ ixRight = ixNew;
+ }
+ }
+ if (ixRight-ixLeft > 1 || tsPacketNumber <= tsLeft || tsPacketNumber > tsRight) break; // bad PCR values in index file?
+
+ ixFound = ixRight;
+ if (reverseToPreviousCleanPoint) {
+ // "Rewind' until we reach the start of a Video Sequence or GOP header:
+ success = rewindToCleanPoint(ixFound);
+ } else {
+ success = True;
+ }
+ } while (0);
+
+ if (success && readIndexRecord(ixFound)) {
+ // Return (and cache) information from record "ixFound":
+ pcr = fCachedPCR = pcrFromBuf();
+ fCachedTSPacketNumber = tsPacketNumFromBuf();
+ if (reverseToPreviousCleanPoint) tsPacketNumber = fCachedTSPacketNumber;
+ indexRecordNumber = fCachedIndexRecordNumber = ixFound;
+ } else {
+ // An error occurred: Return the default values, for tsPacketNumber == 0:
+ pcr = 0.0f;
+ indexRecordNumber = 0;
+ }
+ closeFid();
+}
+
+Boolean MPEG2TransportStreamIndexFile
+::readIndexRecordValues(unsigned long indexRecordNum,
+ unsigned long& transportPacketNum, u_int8_t& offset,
+ u_int8_t& size, float& pcr, u_int8_t& recordType) {
+ if (!readIndexRecord(indexRecordNum)) return False;
+
+ transportPacketNum = tsPacketNumFromBuf();
+ offset = offsetFromBuf();
+ size = sizeFromBuf();
+ pcr = pcrFromBuf();
+ recordType = recordTypeFromBuf();
+ return True;
+}
+
+float MPEG2TransportStreamIndexFile::getPlayingDuration() {
+ if (fNumIndexRecords == 0 || !readOneIndexRecord(fNumIndexRecords-1)) return 0.0f;
+
+ return pcrFromBuf();
+}
+
+int MPEG2TransportStreamIndexFile::mpegVersion() {
+ if (fMPEGVersion != 0) return fMPEGVersion; // we already know it
+
+ // Read the first index record, and figure out the MPEG version from its type:
+ if (!readOneIndexRecord(0)) return 0; // unknown; perhaps the indecx file is empty?
+
+ setMPEGVersionFromRecordType(recordTypeFromBuf());
+ return fMPEGVersion;
+}
+
+Boolean MPEG2TransportStreamIndexFile::openFid() {
+ if (fFid == NULL && fFileName != NULL) {
+ if ((fFid = OpenInputFile(envir(), fFileName)) != NULL) {
+ fCurrentIndexRecordNum = 0;
+ }
+ }
+
+ return fFid != NULL;
+}
+
+Boolean MPEG2TransportStreamIndexFile::seekToIndexRecord(unsigned long indexRecordNumber) {
+ if (!openFid()) return False;
+
+ if (indexRecordNumber == fCurrentIndexRecordNum) return True; // we're already there
+
+ if (SeekFile64(fFid, (int64_t)(indexRecordNumber*INDEX_RECORD_SIZE), SEEK_SET) != 0) return False;
+ fCurrentIndexRecordNum = indexRecordNumber;
+ return True;
+}
+
+Boolean MPEG2TransportStreamIndexFile::readIndexRecord(unsigned long indexRecordNum) {
+ do {
+ if (!seekToIndexRecord(indexRecordNum)) break;
+ if (fread(fBuf, INDEX_RECORD_SIZE, 1, fFid) != 1) break;
+ ++fCurrentIndexRecordNum;
+
+ return True;
+ } while (0);
+
+ return False; // an error occurred
+}
+
+Boolean MPEG2TransportStreamIndexFile::readOneIndexRecord(unsigned long indexRecordNum) {
+ Boolean result = readIndexRecord(indexRecordNum);
+ closeFid();
+
+ return result;
+}
+
+void MPEG2TransportStreamIndexFile::closeFid() {
+ if (fFid != NULL) {
+ CloseInputFile(fFid);
+ fFid = NULL;
+ }
+}
+
+float MPEG2TransportStreamIndexFile::pcrFromBuf() {
+ unsigned pcr_int = (fBuf[5]<<16) | (fBuf[4]<<8) | fBuf[3];
+ u_int8_t pcr_frac = fBuf[6];
+ return pcr_int + pcr_frac/256.0f;
+}
+
+unsigned long MPEG2TransportStreamIndexFile::tsPacketNumFromBuf() {
+ return (fBuf[10]<<24) | (fBuf[9]<<16) | (fBuf[8]<<8) | fBuf[7];
+}
+
+void MPEG2TransportStreamIndexFile::setMPEGVersionFromRecordType(u_int8_t recordType) {
+ if (fMPEGVersion != 0) return; // we already know it
+
+ u_int8_t const recordTypeWithoutStartBit = recordType&~0x80;
+ if (recordTypeWithoutStartBit >= 1 && recordTypeWithoutStartBit <= 4) fMPEGVersion = 2;
+ else if (recordTypeWithoutStartBit >= 5 && recordTypeWithoutStartBit <= 10) fMPEGVersion = 5;
+ // represents H.264
+ else if (recordTypeWithoutStartBit >= 11 && recordTypeWithoutStartBit <= 16) fMPEGVersion = 6;
+ // represents H.265
+}
+
+Boolean MPEG2TransportStreamIndexFile::rewindToCleanPoint(unsigned long&ixFound) {
+ Boolean success = False; // until we learn otherwise
+
+ while (ixFound > 0) {
+ if (!readIndexRecord(ixFound)) break;
+
+ u_int8_t recordType = recordTypeFromBuf();
+ setMPEGVersionFromRecordType(recordType);
+
+ // A 'clean point' is the start of a 'frame' from which a decoder can cleanly resume
+ // handling the stream. For H.264, this is a SPS. For H.265, this is a VPS.
+ // For MPEG-2, this is a Video Sequence Header, or a GOP.
+
+ if ((recordType&0x80) != 0) { // This is the start of a 'frame'
+ recordType &=~ 0x80; // remove the 'start of frame' bit
+ if (fMPEGVersion == 5) { // H.264
+ if (recordType == 5/*SPS*/) {
+ success = True;
+ break;
+ }
+ } else if (fMPEGVersion == 6) { // H.265
+ if (recordType == 11/*VPS*/) {
+ success = True;
+ break;
+ }
+ } else { // MPEG-1, 2, or 4
+ if (recordType == 1/*VSH*/) {
+ success = True;
+ break;
+ } else if (recordType == 2/*GOP*/) {
+ // Hack: If the preceding record is for a Video Sequence Header, then use it instead:
+ unsigned long newIxFound = ixFound;
+
+ while (--newIxFound > 0) {
+ if (!readIndexRecord(newIxFound)) break;
+ recordType = recordTypeFromBuf();
+ if ((recordType&0x7F) != 1) break; // not a Video Sequence Header
+ if ((recordType&0x80) != 0) { // this is the start of the VSH; use it
+ ixFound = newIxFound;
+ break;
+ }
+ }
+ }
+ success = True;
+ break;
+ }
+ }
+
+ // Keep checking, from the previous record:
+ --ixFound;
+ }
+ if (ixFound == 0) success = True; // use record 0 anyway
+
+ return success;
+}
diff --git a/liveMedia/MPEG2TransportStreamMultiplexor.cpp b/liveMedia/MPEG2TransportStreamMultiplexor.cpp
new file mode 100644
index 0000000..81d7997
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamMultiplexor.cpp
@@ -0,0 +1,484 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class for generating MPEG-2 Transport Stream from one or more input
+// Elementary Stream data sources
+// Implementation
+
+#include "MPEG2TransportStreamMultiplexor.hh"
+
+#define TRANSPORT_PACKET_SIZE 188
+
+#define PAT_PERIOD_IF_UNTIMED 100 // # of packets between Program Association Tables (if not timed)
+#define PMT_PERIOD_IF_UNTIMED 500 // # of packets between Program Map Tables (if not timed)
+
+void MPEG2TransportStreamMultiplexor
+::setTimedSegmentation(unsigned segmentationDuration,
+ onEndOfSegmentFunc* onEndOfSegmentFunc,
+ void* onEndOfSegmentClientData) {
+ fSegmentationDuration = segmentationDuration;
+ fOnEndOfSegmentFunc = onEndOfSegmentFunc;
+ fOnEndOfSegmentClientData = onEndOfSegmentClientData;
+}
+
+MPEG2TransportStreamMultiplexor
+::MPEG2TransportStreamMultiplexor(UsageEnvironment& env)
+ : FramedSource(env),
+ fHaveVideoStreams(True/*by default*/),
+ fOutgoingPacketCounter(0), fProgramMapVersion(0xFF),
+ fPreviousInputProgramMapVersion(0xFF), fCurrentInputProgramMapVersion(0),
+ fPCR_PID(0), fCurrentPID(0),
+ fInputBuffer(NULL), fInputBufferSize(0), fInputBufferBytesUsed(0),
+ fIsFirstAdaptationField(True), fSegmentationDuration(0), fSegmentationIndication(1),
+ fCurrentSegmentDuration(0.0), fPreviousPTS(0.0),
+ fOnEndOfSegmentFunc(NULL), fOnEndOfSegmentClientData(NULL) {
+ for (unsigned i = 0; i < PID_TABLE_SIZE; ++i) {
+ fPIDState[i].counter = 0;
+ fPIDState[i].streamType = 0;
+ }
+}
+
+MPEG2TransportStreamMultiplexor::~MPEG2TransportStreamMultiplexor() {
+}
+
+Boolean MPEG2TransportStreamMultiplexor::isMPEG2TransportStreamMultiplexor() const {
+ return True;
+}
+
+void MPEG2TransportStreamMultiplexor::doGetNextFrame() {
+ if (fInputBufferBytesUsed >= fInputBufferSize) {
+ // No more bytes are available from the current buffer.
+ // Arrange to read a new one.
+ awaitNewBuffer(fInputBuffer);
+ return;
+ }
+
+ do {
+ // Periodically return a Program Association Table packet instead:
+ if ((segmentationIsTimed() && fSegmentationIndication == 1)
+ || (!segmentationIsTimed() && fOutgoingPacketCounter % PAT_PERIOD_IF_UNTIMED == 0)) {
+ ++fOutgoingPacketCounter;
+ deliverPATPacket();
+ fSegmentationIndication = 2; // for next time
+ break;
+ }
+ ++fOutgoingPacketCounter;
+
+ // Periodically (or when we see a new PID) return a Program Map Table instead:
+ Boolean programMapHasChanged = fCurrentInputProgramMapVersion != fPreviousInputProgramMapVersion;
+ if (programMapHasChanged
+ || (segmentationIsTimed() && fSegmentationIndication == 2)
+ || (!segmentationIsTimed() && fOutgoingPacketCounter % PMT_PERIOD_IF_UNTIMED == 0)) {
+ if (programMapHasChanged) { // reset values for next time:
+ fPreviousInputProgramMapVersion = fCurrentInputProgramMapVersion;
+ }
+ deliverPMTPacket(programMapHasChanged);
+ fSegmentationIndication = 0; // for next time
+ break;
+ }
+
+ // Normal case: Deliver (or continue delivering) the recently-read data:
+ deliverDataToClient(fCurrentPID, fInputBuffer, fInputBufferSize,
+ fInputBufferBytesUsed);
+ } while (0);
+
+ // NEED TO SET fPresentationTime, durationInMicroseconds #####
+ // Complete the delivery to the client:
+ if ((fOutgoingPacketCounter%10) == 0) {
+ // To avoid excessive recursion (and stack overflow) caused by excessively large input frames,
+ // occasionally return to the event loop to do this:
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(0, (TaskFunc*)FramedSource::afterGetting, this);
+ } else {
+ afterGetting(this);
+ }
+}
+
+void MPEG2TransportStreamMultiplexor
+::handleNewBuffer(unsigned char* buffer, unsigned bufferSize,
+ int mpegVersion, MPEG1or2Demux::SCR scr, int16_t PID) {
+ if (bufferSize < 4) return;
+ fInputBuffer = buffer;
+ fInputBufferSize = bufferSize;
+ fInputBufferBytesUsed = 0;
+
+ u_int8_t stream_id = fInputBuffer[3];
+ // Use "stream_id" directly as our PID.
+ // Also, figure out the Program Map 'stream type' from this.
+ if (stream_id == 0xBE) { // padding_stream; ignore
+ fInputBufferSize = 0;
+ } else if (stream_id == 0xBC) { // program_stream_map
+ setProgramStreamMap(fInputBufferSize);
+ fInputBufferSize = 0; // then, ignore the buffer
+ } else {
+ if (PID == -1)
+ fCurrentPID = stream_id;
+ else
+ fCurrentPID = PID;
+
+ // Set the stream's type:
+ u_int8_t& streamType = fPIDState[fCurrentPID].streamType; // alias
+
+ if (streamType == 0) {
+ // Instead, set the stream's type to default values, based on whether
+ // the stream is audio or video, and whether it's MPEG-1 or MPEG-2:
+ if ((stream_id&0xF0) == 0xE0) { // video
+ streamType = mpegVersion == 1 ? 1 : mpegVersion == 2 ? 2 : mpegVersion == 4 ? 0x10 :
+ mpegVersion == 5/*H.264*/ ? 0x1B : 0x24/*assume H.265*/;
+ } else if ((stream_id&0xE0) == 0xC0) { // audio
+ streamType = mpegVersion == 1 ? 3 : mpegVersion == 2 ? 4 : mpegVersion == 3 ? 6 : 0xF;
+ } else if (stream_id == 0xBD) { // private_stream1 (usually AC-3 or Opus)
+ streamType = 0x06; // for DVB or Opus; for ATSC, use 0x81
+ } else { // something else
+ streamType = 0x81; // private
+ }
+ }
+
+ if (fPCR_PID == 0) { // set it to this stream, if it's appropriate:
+ if ((!fHaveVideoStreams && (streamType == 3 || streamType == 4 || streamType == 6 || streamType == 0xF))/* audio stream */ ||
+ (streamType == 1 || streamType == 2 || streamType == 0x10 || streamType == 0x1B || streamType == 0x24)/* video stream */) {
+ fPCR_PID = fCurrentPID; // use this stream's SCR for PCR
+ }
+ }
+ if (fCurrentPID == fPCR_PID) {
+ // Record the input's current SCR timestamp, for use as our PCR:
+ fPCR = scr;
+ }
+ }
+
+ // Now that we have new input data, retry the last delivery to the client:
+ doGetNextFrame();
+}
+
+void MPEG2TransportStreamMultiplexor
+::deliverDataToClient(u_int16_t pid, unsigned char* buffer, unsigned bufferSize,
+ unsigned& startPositionInBuffer) {
+ // Construct a new Transport packet, and deliver it to the client:
+ if (fMaxSize < TRANSPORT_PACKET_SIZE) {
+ fFrameSize = 0; // the client hasn't given us enough space; deliver nothing
+ fNumTruncatedBytes = TRANSPORT_PACKET_SIZE;
+ } else {
+ fFrameSize = TRANSPORT_PACKET_SIZE;
+ Boolean willAddPCR = pid == fPCR_PID && startPositionInBuffer == 0
+ && !(fPCR.highBit == 0 && fPCR.remainingBits == 0 && fPCR.extension == 0);
+ unsigned const numBytesAvailable = bufferSize - startPositionInBuffer;
+ unsigned numHeaderBytes = 4; // by default
+ unsigned numPCRBytes = 0; // by default
+ unsigned numPaddingBytes = 0; // by default
+ unsigned numDataBytes;
+ u_int8_t adaptation_field_control;
+ if (willAddPCR) {
+ adaptation_field_control = 0x30;
+ numHeaderBytes += 2; // for the "adaptation_field_length" and flags
+ numPCRBytes = 6;
+ if (numBytesAvailable >= TRANSPORT_PACKET_SIZE - numHeaderBytes - numPCRBytes) {
+ numDataBytes = TRANSPORT_PACKET_SIZE - numHeaderBytes - numPCRBytes;
+ } else {
+ numDataBytes = numBytesAvailable;
+ numPaddingBytes
+ = TRANSPORT_PACKET_SIZE - numHeaderBytes - numPCRBytes - numDataBytes;
+ }
+ } else if (numBytesAvailable >= TRANSPORT_PACKET_SIZE - numHeaderBytes) {
+ // This is the common case
+ adaptation_field_control = 0x10;
+ numDataBytes = TRANSPORT_PACKET_SIZE - numHeaderBytes;
+ } else {
+ adaptation_field_control = 0x30;
+ ++numHeaderBytes; // for the "adaptation_field_length"
+ // ASSERT: numBytesAvailable <= TRANSPORT_PACKET_SIZE - numHeaderBytes
+ numDataBytes = numBytesAvailable;
+ if (numDataBytes < TRANSPORT_PACKET_SIZE - numHeaderBytes) {
+ ++numHeaderBytes; // for the adaptation field flags
+ numPaddingBytes = TRANSPORT_PACKET_SIZE - numHeaderBytes - numDataBytes;
+ }
+ }
+ // ASSERT: numHeaderBytes+numPCRBytes+numPaddingBytes+numDataBytes
+ // == TRANSPORT_PACKET_SIZE
+
+ // Fill in the header of the Transport Stream packet:
+ unsigned char* header = fTo;
+ *header++ = 0x47; // sync_byte
+ *header++ = ((startPositionInBuffer == 0) ? 0x40 : 0x00)|(pid>>8);
+ // transport_error_indicator, payload_unit_start_indicator, transport_priority,
+ // first 5 bits of PID
+ *header++ = pid;
+ // last 8 bits of PID
+ unsigned& continuity_counter = fPIDState[pid].counter; // alias
+ *header++ = adaptation_field_control|(continuity_counter&0x0F);
+ // transport_scrambling_control, adaptation_field_control, continuity_counter
+ ++continuity_counter;
+ if (adaptation_field_control == 0x30) {
+ // Add an adaptation field:
+ u_int8_t adaptation_field_length
+ = (numHeaderBytes == 5) ? 0 : 1 + numPCRBytes + numPaddingBytes;
+ *header++ = adaptation_field_length;
+ if (numHeaderBytes > 5) {
+ u_int8_t flags = willAddPCR ? 0x10 : 0x00;
+ if (fIsFirstAdaptationField) {
+ flags |= 0x80; // discontinuity_indicator
+ fIsFirstAdaptationField = False;
+ }
+ *header++ = flags;
+ if (willAddPCR) {
+ u_int32_t pcrHigh32Bits = (fPCR.highBit<<31) | (fPCR.remainingBits>>1);
+ u_int8_t pcrLowBit = fPCR.remainingBits&1;
+ u_int8_t extHighBit = (fPCR.extension&0x100)>>8;
+ *header++ = pcrHigh32Bits>>24;
+ *header++ = pcrHigh32Bits>>16;
+ *header++ = pcrHigh32Bits>>8;
+ *header++ = pcrHigh32Bits;
+ *header++ = (pcrLowBit<<7)|0x7E|extHighBit;
+ *header++ = (u_int8_t)fPCR.extension; // low 8 bits of extension
+
+ if (fSegmentationDuration > 0) {
+ // Use the PCR to compute the duration of the segment so far, to check whether
+ // segmentation needs to occur now:
+ double pts = fPCR.highBit ? 0x80000000/45000.0 : 0.0;
+ pts += fPCR.remainingBits/90000.0;
+ pts += fPCR.extension/27000000.0;
+
+ double lastSubSegmentDuration = fPreviousPTS == 0.0 ? 0.0 : pts - fPreviousPTS;
+ fCurrentSegmentDuration += lastSubSegmentDuration;
+
+ // Check whether we need to segment the stream now:
+ if (fCurrentSegmentDuration > (double)fSegmentationDuration
+ || fCurrentSegmentDuration + lastSubSegmentDuration > (double)fSegmentationDuration) {
+ // It's time to segment the stream.
+ if (fOnEndOfSegmentFunc != NULL) {
+ (*fOnEndOfSegmentFunc)(fOnEndOfSegmentClientData, fCurrentSegmentDuration);
+ }
+
+ fCurrentSegmentDuration = 0.0; // for next time
+ fSegmentationIndication = 1; // output a PAT next
+ }
+
+ fPreviousPTS = pts; // for next time
+ }
+ }
+ }
+ }
+
+ // Add any padding bytes:
+ for (unsigned i = 0; i < numPaddingBytes; ++i) *header++ = 0xFF;
+
+ // Finally, add the data bytes:
+ memmove(header, &buffer[startPositionInBuffer], numDataBytes);
+ startPositionInBuffer += numDataBytes;
+ }
+}
+
+#define PAT_PID 0
+#ifndef OUR_PROGRAM_NUMBER
+#define OUR_PROGRAM_NUMBER 1
+#endif
+#define OUR_PROGRAM_MAP_PID 0x1000
+
+void MPEG2TransportStreamMultiplexor::deliverPATPacket() {
+ // First, create a new buffer for the PAT packet:
+ unsigned const patSize = TRANSPORT_PACKET_SIZE - 4; // allow for the 4-byte header
+ unsigned char* patBuffer = new unsigned char[patSize];
+
+ // and fill it in:
+ unsigned char* pat = patBuffer;
+ *pat++ = 0; // pointer_field
+ *pat++ = 0; // table_id
+ *pat++ = 0xB0; // section_syntax_indicator; 0; reserved, section_length (high)
+ *pat++ = 13; // section_length (low)
+ *pat++ = 0; *pat++ = 1; // transport_stream_id
+ *pat++ = 0xC1; // reserved; version_number; current_next_indicator
+ *pat++ = 0; // section_number
+ *pat++ = 0; // last_section_number
+ *pat++ = OUR_PROGRAM_NUMBER>>8; *pat++ = OUR_PROGRAM_NUMBER; // program_number
+ *pat++ = 0xE0|(OUR_PROGRAM_MAP_PID>>8); // reserved; program_map_PID (high)
+ *pat++ = OUR_PROGRAM_MAP_PID&0xFF; // program_map_PID (low)
+
+ // Compute the CRC from the bytes we currently have (not including "pointer_field"):
+ u_int32_t crc = calculateCRC(patBuffer+1, pat - (patBuffer+1));
+ *pat++ = crc>>24; *pat++ = crc>>16; *pat++ = crc>>8; *pat++ = crc;
+
+ // Fill in the rest of the packet with padding bytes:
+ while (pat < &patBuffer[patSize]) *pat++ = 0xFF;
+
+ // Deliver the packet:
+ unsigned startPosition = 0;
+ deliverDataToClient(PAT_PID, patBuffer, patSize, startPosition);
+
+ // Finally, remove the new buffer:
+ delete[] patBuffer;
+}
+
+void MPEG2TransportStreamMultiplexor::deliverPMTPacket(Boolean hasChanged) {
+ if (hasChanged) ++fProgramMapVersion;
+
+ // First, create a new buffer for the PMT packet:
+ unsigned const pmtSize = TRANSPORT_PACKET_SIZE - 4; // allow for the 4-byte header
+ unsigned char* pmtBuffer = new unsigned char[pmtSize];
+
+ // and fill it in:
+ unsigned char* pmt = pmtBuffer;
+ *pmt++ = 0; // pointer_field
+ *pmt++ = 2; // table_id
+ *pmt++ = 0xB0; // section_syntax_indicator; 0; reserved, section_length (high)
+ unsigned char* section_lengthPtr = pmt; // save for later
+ *pmt++ = 0; // section_length (low) (fill in later)
+ *pmt++ = OUR_PROGRAM_NUMBER>>8; *pmt++ = OUR_PROGRAM_NUMBER; // program_number
+ *pmt++ = 0xC1|((fProgramMapVersion&0x1F)<<1); // reserved; version_number; current_next_indicator
+ *pmt++ = 0; // section_number
+ *pmt++ = 0; // last_section_number
+ *pmt++ = 0xE0|(fPCR_PID>>8); // reserved; PCR_PID (high)
+ *pmt++ = fPCR_PID; // PCR_PID (low)
+ *pmt++ = 0xF0; // reserved; program_info_length (high)
+ *pmt++ = 0; // program_info_length (low)
+ for (int pid = 0; pid < PID_TABLE_SIZE; ++pid) {
+ if (fPIDState[pid].streamType != 0) {
+ // This PID gets recorded in the table
+ *pmt++ = fPIDState[pid].streamType;
+ *pmt++ = 0xE0|(pid>>8); // reserved; elementary_pid (high)
+ *pmt++ = pid; // elementary_pid (low)
+ *pmt++ = 0xF0; // reserved; ES_info_length (high)
+ *pmt++ = 0; // ES_info_length (low)
+ }
+ }
+ unsigned section_length = pmt - (section_lengthPtr+1) + 4 /*for CRC*/;
+ *section_lengthPtr = section_length;
+
+ // Compute the CRC from the bytes we currently have (not including "pointer_field"):
+ u_int32_t crc = calculateCRC(pmtBuffer+1, pmt - (pmtBuffer+1));
+ *pmt++ = crc>>24; *pmt++ = crc>>16; *pmt++ = crc>>8; *pmt++ = crc;
+
+ // Fill in the rest of the packet with padding bytes:
+ while (pmt < &pmtBuffer[pmtSize]) *pmt++ = 0xFF;
+
+ // Deliver the packet:
+ unsigned startPosition = 0;
+ deliverDataToClient(OUR_PROGRAM_MAP_PID, pmtBuffer, pmtSize, startPosition);
+
+ // Finally, remove the new buffer:
+ delete[] pmtBuffer;
+}
+
+void MPEG2TransportStreamMultiplexor::setProgramStreamMap(unsigned frameSize) {
+ if (frameSize <= 16) return; // program_stream_map is too small to be useful
+ if (frameSize > 0xFF) return; // program_stream_map is too large
+
+ u_int16_t program_stream_map_length = (fInputBuffer[4]<<8) | fInputBuffer[5];
+ if ((u_int16_t)frameSize > 6+program_stream_map_length) {
+ frameSize = 6+program_stream_map_length;
+ }
+
+ u_int8_t versionByte = fInputBuffer[6];
+ if ((versionByte&0x80) == 0) return; // "current_next_indicator" is not set
+ fCurrentInputProgramMapVersion = versionByte&0x1F;
+
+ u_int16_t program_stream_info_length = (fInputBuffer[8]<<8) | fInputBuffer[9];
+ unsigned offset = 10 + program_stream_info_length; // skip over 'descriptors'
+
+ u_int16_t elementary_stream_map_length
+ = (fInputBuffer[offset]<<8) | fInputBuffer[offset+1];
+ offset += 2;
+ frameSize -= 4; // sizeof CRC_32
+ if (frameSize > offset + elementary_stream_map_length) {
+ frameSize = offset + elementary_stream_map_length;
+ }
+
+ while (offset + 4 <= frameSize) {
+ u_int8_t stream_type = fInputBuffer[offset];
+ u_int8_t elementary_stream_id = fInputBuffer[offset+1];
+
+ fPIDState[elementary_stream_id].streamType = stream_type;
+
+ u_int16_t elementary_stream_info_length
+ = (fInputBuffer[offset+2]<<8) | fInputBuffer[offset+3];
+ offset += 4 + elementary_stream_info_length;
+ }
+}
+
+static u_int32_t const CRC32[256] = {
+ 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9,
+ 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005,
+ 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
+ 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd,
+ 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9,
+ 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
+ 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011,
+ 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd,
+ 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
+ 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5,
+ 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81,
+ 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
+ 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49,
+ 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95,
+ 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
+ 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d,
+ 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae,
+ 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
+ 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16,
+ 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca,
+ 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
+ 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02,
+ 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066,
+ 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
+ 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e,
+ 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692,
+ 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
+ 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a,
+ 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e,
+ 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
+ 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686,
+ 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a,
+ 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
+ 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb,
+ 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f,
+ 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
+ 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47,
+ 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b,
+ 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
+ 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623,
+ 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7,
+ 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
+ 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f,
+ 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3,
+ 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
+ 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b,
+ 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f,
+ 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
+ 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640,
+ 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c,
+ 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
+ 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24,
+ 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30,
+ 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
+ 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088,
+ 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654,
+ 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
+ 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c,
+ 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18,
+ 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
+ 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0,
+ 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c,
+ 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
+ 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
+};
+
+u_int32_t calculateCRC(u_int8_t const* data, unsigned dataLength, u_int32_t initialValue) {
+ u_int32_t crc = initialValue;
+
+ while (dataLength-- > 0) {
+ crc = (crc<<8) ^ CRC32[(crc>>24) ^ (u_int32_t)(*data++)];
+ }
+
+ return crc;
+}
diff --git a/liveMedia/MPEG2TransportStreamParser.cpp b/liveMedia/MPEG2TransportStreamParser.cpp
new file mode 100644
index 0000000..76d2220
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamParser.cpp
@@ -0,0 +1,328 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A parser for a MPEG Transport Stream
+// Implementation
+
+#include "MPEG2TransportStreamParser.hh"
+
+#define NUM_PIDS 0x10000
+
+StreamType StreamTypes[0x100];
+
+MPEG2TransportStreamParser
+::MPEG2TransportStreamParser(FramedSource* inputSource,
+ FramedSource::onCloseFunc* onEndFunc, void* onEndClientData)
+ : StreamParser(inputSource, onEndFunc, onEndClientData, continueParsing, this),
+ fInputSource(inputSource), fAmCurrentlyParsing(False),
+ fOnEndFunc(onEndFunc), fOnEndClientData(onEndClientData),
+ fLastSeenPCR(0.0) {
+ if (StreamTypes[0x01].dataType == StreamType::UNKNOWN) { // initialize array with known values
+ StreamTypes[0x01] = StreamType("MPEG-1 video", StreamType::VIDEO, ".mpv");
+ StreamTypes[0x02] = StreamType("MPEG-2 video", StreamType::VIDEO, ".mpv");
+ StreamTypes[0x03] = StreamType("MPEG-1 audio", StreamType::AUDIO, ".mpa");
+ StreamTypes[0x04] = StreamType("MPEG-2 audio", StreamType::AUDIO, ".mpa");
+ StreamTypes[0x05] = StreamType("privately-defined data", StreamType::DATA);
+ StreamTypes[0x06] = StreamType("privately-defined data", StreamType::DATA);
+ StreamTypes[0x0F] = StreamType("AAC audio", StreamType::AUDIO, ".aac");
+ StreamTypes[0x10] = StreamType("MPEG-4 H.263 based video", StreamType::VIDEO, ".mpv");
+ StreamTypes[0x1B] = StreamType("H.264 video", StreamType::VIDEO, ".h264");
+ StreamTypes[0x1C] = StreamType("MPEG-4 raw audio", StreamType::AUDIO, ".mpa");
+ StreamTypes[0x1D] = StreamType("MPEG-4 text", StreamType::TEXT, ".txt");
+ StreamTypes[0x21] = StreamType("JPEG 2000 video", StreamType::VIDEO, ".mjpg");
+ StreamTypes[0x24] = StreamType("H.265 video", StreamType::VIDEO, ".h265");
+ StreamTypes[0x81] = StreamType("AC-3 audio", StreamType::AUDIO, ".ac3");
+ }
+
+ // Create our 'PID state' array:
+ fPIDState = new PIDState*[NUM_PIDS];
+ for (unsigned i = 0; i < NUM_PIDS; ++i) fPIDState[i] = NULL;
+
+ // Initially, the only PID we know is 0x0000: a Program Association Table:
+ fPIDState[0x0000] = new PIDState_PAT(*this, 0x0000);
+
+ // Begin parsing:
+ continueParsing();
+}
+
+MPEG2TransportStreamParser::~MPEG2TransportStreamParser() {
+ for (unsigned i = 0; i < NUM_PIDS; ++i) delete fPIDState[i];
+ delete[] fPIDState;
+}
+
+UsageEnvironment& MPEG2TransportStreamParser::envir() {
+ return fInputSource->envir();
+}
+
+void MPEG2TransportStreamParser
+::continueParsing(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime) {
+ ((MPEG2TransportStreamParser*)clientData)->continueParsing();
+}
+
+void MPEG2TransportStreamParser::continueParsing() {
+ if (fAmCurrentlyParsing) return; // don't allow recursive calls to parse()
+
+ if (fInputSource != NULL) {
+ fAmCurrentlyParsing = True;
+ Boolean parseSucceeded = parse();
+ fAmCurrentlyParsing = False;
+
+ if (!parseSucceeded) {
+ // We didn't complete the parsing, because we had to read more data from the source,
+ // or because we're waiting for another read from downstream.
+ // Once that happens, we'll get called again.
+ return;
+ }
+ }
+
+ // We successfully parsed the file. Call our 'done' function now:
+ if (fOnEndFunc != NULL) (*fOnEndFunc)(fOnEndClientData);
+}
+
+#define TRANSPORT_SYNC_BYTE 0x47
+#define TRANSPORT_PACKET_SIZE 188
+
+Boolean MPEG2TransportStreamParser::parse() {
+ if (fInputSource->isCurrentlyAwaitingData()) return False;
+ // Our input source is currently being read. Wait until that read completes
+
+ try {
+ while (1) {
+ // Make sure we start with a 'sync byte':
+ do {
+ saveParserState();
+ } while (get1Byte() != TRANSPORT_SYNC_BYTE);
+
+ // Parse and process each (remaining 187 bytes of a) 'Transport Stream Packet' at a time.
+ // (Because these are a lot smaller than the "StreamParser" BANK_SIZE, we don't save
+ // parser state in the middle of processing each such 'Transport Stream Packet'.
+ // Therefore, processing of each 'Transport Stream Packet' needs to be idempotent.)
+
+ u_int16_t flagsPlusPID = get2Bytes();
+ // Check the "transport_error_indicator" flag; reject the packet if it's set:
+ if ((flagsPlusPID&0x8000) != 0) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parse() Rejected packet with \"transport_error_indicator\" flag set!\n");
+#endif
+ continue;
+ }
+ Boolean pusi = (flagsPlusPID&0x4000) != 0; // payload_unit_start_indicator
+ // Ignore "transport_priority"
+ u_int16_t PID = flagsPlusPID&0x1FFF;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\nTransport Packet: payload_unit_start_indicator: %d; PID: 0x%04x\n",
+ pusi, PID);
+#endif
+
+ u_int8_t controlPlusContinuity_counter = get1Byte();
+ // Reject any packets where the "transport_scrambling_control" field is not zero:
+ if ((controlPlusContinuity_counter&0xC0) != 0) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parse() Rejected packet with \"transport_scrambling_control\" set to non-zero value %d!\n", (controlPlusContinuity_counter&0xC0)>>6);
+#endif
+ continue;
+ }
+ u_int8_t adaptation_field_control = (controlPlusContinuity_counter&0x30)>>4; // 2 bits
+#ifdef DEBUG_CONTENTS
+ u_int8_t continuity_counter = (controlPlusContinuity_counter&0x0F); // 4 bits
+ fprintf(stderr, "adaptation_field_control: %d; continuity_counter: 0x%X\n", adaptation_field_control, continuity_counter);
+#endif
+
+ u_int8_t totalAdaptationFieldSize = adaptation_field_control < 2 ? 0 : parseAdaptationField();
+#ifdef DEBUG_ERRORS
+ if (adaptation_field_control == 2 && totalAdaptationFieldSize != 1+183) {
+ fprintf(stderr, "MPEG2TransportStreamParser::parse() Warning: Got an inconsistent \"totalAdaptationFieldSize\" %d for adaptation_field_control == 2\n", totalAdaptationFieldSize);
+ }
+#endif
+
+ int numDataBytes = TRANSPORT_PACKET_SIZE-4-totalAdaptationFieldSize;
+ if (numDataBytes > 0) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "+%d data bytes:\n", numDataBytes);
+#endif
+ if (!processDataBytes(PID, pusi, numDataBytes)) {
+ // The parsing got deferred (to be resumed later when a pending read happens)
+ restoreSavedParserState(); // so that we later resume parsing at the start of the packet
+ return False;
+ }
+ }
+ }
+ } catch (int /*e*/) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "MPEG2TransportStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
+#endif
+ return False; // the parsing got interrupted
+ }
+}
+
+u_int8_t MPEG2TransportStreamParser::parseAdaptationField() {
+ unsigned startPos = curOffset();
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\tAdaptation Field:\n");
+#endif
+ u_int8_t adaptation_field_length = get1Byte();
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\tadaptation_field_length: %d\n", adaptation_field_length);
+#endif
+ if (adaptation_field_length > 0) {
+ u_int8_t flags = get1Byte();
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\tadaptation field flags: 0x%02x\n", flags);
+#endif
+ if ((flags&0x10) != 0) { // PCR_flag
+ u_int32_t first32PCRBits = get4Bytes();
+ u_int16_t last16PCRBits = get2Bytes();
+ // program_clock_reference_base = "first32PCRBits" and high bit of "last16PCRBits" (33 bits)
+ // program_clock_reference_extension = last 9 bits of "last16PCRBits" (9 bits)
+ double PCR = first32PCRBits/45000.0;
+ if ((last16PCRBits&0x8000) != 0) PCR += 1/90000.0; // add in low-bit (if set)
+ PCR += (last16PCRBits&0x01FF)/27000000.0; // add in extension
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\tPCR: %.10f\n", PCR);
+#endif
+ }
+ if ((flags&0x08) != 0) { // OPCR_flag
+ u_int32_t first32OPCRBits = get4Bytes();
+ u_int16_t last16OPCRBits = get2Bytes();
+ // original_program_clock_reference_base = "first32OPCRBits" and high bit of "last16OPCRBits" (33 bits)
+ // original_program_clock_reference_extension = last 9 bits of "last16OPCRBits" (9 bits)
+ double OPCR = first32OPCRBits/45000.0;
+ if ((last16OPCRBits&0x8000) != 0) OPCR += 1/90000.0; // add in low-bit (if set)
+ OPCR += (last16OPCRBits&0x01FF)/27000000.0; // add in extension
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\tOPCR: %.10f\n", OPCR);
+#endif
+ }
+ if ((flags&0x04) != 0) { // splicing_point_flag
+ skipBytes(1); // splice_countdown
+ }
+ if ((flags&0x02) != 0) { // transport_private_data_flag
+ u_int8_t transport_private_data_length = get1Byte();
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\ttransport_private_data_length: %d\n", transport_private_data_length);
+#endif
+ skipBytes(transport_private_data_length); // "private_data_byte"s
+ }
+ if ((flags&0x01) != 0) { // adaptation_field_extension_flag
+#ifdef DEBUG_CONTENTS
+ u_int8_t adaptation_field_extension_length = get1Byte();
+ fprintf(stderr, "\t\tadaptation_field_extension_length: %d\n", adaptation_field_extension_length);
+#else
+ skipBytes(1); // adaptation_field_extension_length
+#endif
+ u_int8_t flagsPlusReserved = get1Byte();
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\tflagsPlusReserved: 0x%02x\n", flagsPlusReserved);
+#endif
+ if ((flagsPlusReserved&0x80) != 0) { // ltw_flag
+ skipBytes(2); // "ltw_valid_flag" + "ltw_offset"
+ }
+ if ((flagsPlusReserved&0x40) != 0) { // piecewise_rate_flag
+ skipBytes(3); // reserved + "piecewise_rate"
+ }
+ if ((flagsPlusReserved&0x20) != 0) { // seamless_splice_flag
+ skipBytes(5); // DTS_next_...
+ }
+ // Skip reserved bytes to the end of the adaptation_field:
+ int numBytesLeft = (1 + adaptation_field_length) - (curOffset() - startPos);
+ if (numBytesLeft > 0) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t+%d reserved bytes\n", numBytesLeft);
+#endif
+ skipBytes(numBytesLeft);
+ }
+ }
+ // Skip "stuffing_byte"s to the end of the adaptation_field:
+ int numBytesLeft = (1 + adaptation_field_length) - (curOffset() - startPos);
+ if (numBytesLeft > 0) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t+%d stuffing bytes\n", numBytesLeft);
+#endif
+#ifdef DEBUG_ERRORS
+ for (int i = 0; i < numBytesLeft; ++i) {
+ if (get1Byte() != 0xFF) {
+ fprintf(stderr, "WARNING: non-stuffing byte in adaptation_field\n");
+ }
+ }
+#else
+ skipBytes(numBytesLeft);
+#endif
+ }
+ }
+
+ // Finally, figure out how many bytes we parsed, and compare it to what we expected:
+ unsigned totalAdaptationFieldSize = curOffset() - startPos;
+#ifdef DEBUG_ERRORS
+ if (totalAdaptationFieldSize != 1 + adaptation_field_length) {
+ fprintf(stderr, "MPEG2TransportStreamParser::parseAdaptationField() Warning: Got an inconsistent \"totalAdaptationFieldSize\" %d; expected %d\n", totalAdaptationFieldSize, 1 + adaptation_field_length);
+ }
+#endif
+ return totalAdaptationFieldSize;
+}
+
+Boolean MPEG2TransportStreamParser
+::processDataBytes(u_int16_t PID, Boolean pusi, unsigned numDataBytes) {
+ PIDState* pidState = fPIDState[PID];
+
+ if (pidState == NULL) { // unknown PID
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\tUnknown PID\n");
+#endif
+ skipBytes(numDataBytes);
+ return True;
+ }
+
+ switch (pidState->type) {
+ case PAT: {
+ parsePAT(pusi, numDataBytes);
+ return True;
+ }
+ case PMT: {
+ parsePMT((PIDState_PMT*)pidState, pusi, numDataBytes);
+ return True;
+ }
+ case STREAM: {
+ return processStreamPacket((PIDState_STREAM*)pidState, pusi, numDataBytes);
+ }
+ default: { // Never reached, but eliminates a possible error with dumb compilers
+ return False;
+ }
+ }
+}
+
+void MPEG2TransportStreamParser::restoreSavedParserState() {
+ StreamParser::restoreSavedParserState();
+ fAmCurrentlyParsing = False;
+}
+
+
+//########## PIDState implementation ##########
+
+PIDState::PIDState(MPEG2TransportStreamParser& parser, u_int16_t pid, PIDType pidType)
+ : ourParser(parser), PID(pid), type(pidType) {
+}
+
+PIDState::~PIDState() {
+}
+
+
+//######### StreamType implementation ########
+
+StreamType
+::StreamType(char const* description, enum dataType dataType, char const* filenameSuffix)
+ : description(description), dataType(dataType), filenameSuffix(filenameSuffix) {
+}
diff --git a/liveMedia/MPEG2TransportStreamParser.hh b/liveMedia/MPEG2TransportStreamParser.hh
new file mode 100644
index 0000000..307f348
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamParser.hh
@@ -0,0 +1,132 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A parser for a MPEG Transport Stream
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_STREAM_PARSER_HH
+
+#ifndef _STREAM_PARSER_HH
+#include "StreamParser.hh"
+#endif
+#ifndef _MPEG2_TRANSPORT_STREAM_DEMUXED_TRACK_HH
+#include "MPEG2TransportStreamDemuxedTrack.hh"
+#endif
+#ifndef _MEDIA_SINK_HH
+#include "MediaSink.hh"
+#endif
+
+// A descriptor that describes the state of each known PID:
+enum PIDType { PAT, PMT, STREAM };
+
+class PIDState {
+protected: // we're a virtual base class
+ PIDState(MPEG2TransportStreamParser& parser, u_int16_t pid, PIDType pidType);
+public:
+ virtual ~PIDState();
+
+public:
+ MPEG2TransportStreamParser& ourParser;
+ u_int16_t PID;
+ PIDType type;
+};
+
+class PIDState_PAT : public PIDState {
+public:
+ PIDState_PAT(MPEG2TransportStreamParser& parser, u_int16_t pid);
+protected:
+ virtual ~PIDState_PAT();
+};
+
+class PIDState_PMT : public PIDState {
+public:
+ PIDState_PMT(MPEG2TransportStreamParser& parser, u_int16_t pid, u_int16_t programNumber);
+protected:
+ virtual ~PIDState_PMT();
+
+public:
+ u_int16_t program_number;
+};
+
+class PIDState_STREAM : public PIDState {
+public:
+ PIDState_STREAM(MPEG2TransportStreamParser& parser, u_int16_t pid, u_int16_t programNumber, u_int8_t streamType);
+protected:
+ virtual ~PIDState_STREAM();
+
+public:
+ u_int16_t program_number;
+ u_int8_t stream_type;
+ double lastSeenPTS;
+ MPEG2TransportStreamDemuxedTrack* streamSource;
+ MediaSink* streamSink;
+};
+
+
+// Descriptions of known "stream_type"s:
+class StreamType {
+public:
+ char const* description;
+ enum dataType { AUDIO, VIDEO, DATA, TEXT, UNKNOWN } dataType;
+ char const* filenameSuffix;
+
+public:
+ StreamType(char const* description = "unknown", enum dataType dataType = UNKNOWN,
+ char const* filenameSuffix = "");
+};
+
+
+class MPEG2TransportStreamParser: public StreamParser {
+public:
+ MPEG2TransportStreamParser(FramedSource* inputSource,
+ FramedSource::onCloseFunc* onEndFunc, void* onEndClientData);
+ virtual ~MPEG2TransportStreamParser();
+
+ UsageEnvironment& envir();
+
+ // StreamParser 'client continue' function:
+ static void continueParsing(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime);
+ void continueParsing();
+
+private:
+ // Parsing functions:
+ friend class MPEG2TransportStreamDemuxedTrack;
+ Boolean parse(); // returns True iff we have finished parsing all BOS pages (on initialization)
+
+ u_int8_t parseAdaptationField();
+ Boolean processDataBytes(u_int16_t PID, Boolean pusi, unsigned numDataBytes);
+
+ void parsePAT(Boolean pusi, unsigned numDataBytes);
+ void parsePMT(PIDState_PMT* pidState, Boolean pusi, unsigned numDataBytes);
+ void parseStreamDescriptors(unsigned numDescriptorBytes);
+ Boolean processStreamPacket(PIDState_STREAM* pidState, Boolean pusi, unsigned numDataBytes);
+ unsigned parsePESHeader(PIDState_STREAM* pidState, unsigned numDataBytes);
+
+private: // redefined virtual functions
+ virtual void restoreSavedParserState();
+
+private:
+ // General state for parsing:
+ FramedSource* fInputSource;
+ Boolean fAmCurrentlyParsing;
+ FramedSource::onCloseFunc* fOnEndFunc;
+ void* fOnEndClientData;
+ PIDState** fPIDState;
+ double fLastSeenPCR;
+};
+
+#endif
diff --git a/liveMedia/MPEG2TransportStreamParser_PAT.cpp b/liveMedia/MPEG2TransportStreamParser_PAT.cpp
new file mode 100644
index 0000000..3fd0862
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamParser_PAT.cpp
@@ -0,0 +1,111 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A parser for a MPEG Transport Stream
+// Implementation
+
+#include "MPEG2TransportStreamParser.hh"
+
+void MPEG2TransportStreamParser::parsePAT(Boolean pusi, unsigned numDataBytes) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\tProgram Association Table\n");
+#endif
+ unsigned startPos = curOffset();
+
+ do {
+ if (pusi) {
+ u_int8_t pointer_field = get1Byte();
+ skipBytes(pointer_field); // usually 0
+ }
+
+ u_int8_t table_id = get1Byte();
+ if (table_id != 0x00) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePAT(%d, %d): bad table_id: 0x%02x\n",
+ pusi, numDataBytes, table_id);
+#endif
+ break;
+ }
+
+ u_int16_t flagsPlusSection_length = get2Bytes();
+ u_int16_t section_length = flagsPlusSection_length&0x0FFF;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\tsection_length: %d\n", section_length);
+#endif
+ if (section_length < 9/*too small for remaining fields + CRC*/ ||
+ section_length > 1021/*as per specification*/) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePAT(%d, %d): Bad section_length: %d\n",
+ pusi, numDataBytes, section_length);
+#endif
+ break;
+ }
+
+ unsigned endPos = curOffset() + section_length;
+ if (endPos - startPos > numDataBytes) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePAT(%d, %d): section_length %d gives us a total size %d that's too large!\n",
+ pusi, numDataBytes, section_length, endPos - startPos);
+#endif
+ break;
+ }
+
+#ifdef DEBUG_CONTENTS
+ u_int16_t transport_stream_id = get2Bytes();
+ fprintf(stderr, "\t\ttransport_stream_id: 0x%04x\n", transport_stream_id);
+ u_int8_t version_number_byte = get1Byte();
+ u_int8_t version_number = (version_number_byte&0x1E)>>1;
+ u_int8_t section_number = get1Byte();
+ u_int8_t last_section_number = get1Byte();
+ fprintf(stderr, "\t\tversion_number: %d; section_number: %d; last_section_number: %d\n",
+ version_number, section_number, last_section_number);
+#else
+ skipBytes(5);
+#endif
+
+ while (curOffset() <= endPos - 4/*for CRC*/ - 4/*for a program_number+PID*/) {
+ u_int16_t program_number = get2Bytes();
+ u_int16_t pid = get2Bytes()&0x1FFF;
+
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\tprogram_number: %d; PID: 0x%04x\n", program_number, pid);
+#endif
+ if (program_number != 0x0000) {
+ if (fPIDState[pid] == NULL) fPIDState[pid] = new PIDState_PMT(*this, pid, program_number);
+ }
+ }
+ } while (0);
+
+ // Skip (ignore) all remaining bytes in this packet (including the CRC):
+ int numBytesLeft = numDataBytes - (curOffset() - startPos);
+ if (numBytesLeft > 0) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t+%d CRC and stuffing bytes\n", numBytesLeft);
+#endif
+ skipBytes(numBytesLeft);
+ }
+}
+
+
+//########## PIDState_PAT implementation ##########
+
+PIDState_PAT::PIDState_PAT(MPEG2TransportStreamParser& parser, u_int16_t pid)
+ : PIDState(parser, pid, PAT) {
+}
+
+PIDState_PAT::~PIDState_PAT() {
+}
diff --git a/liveMedia/MPEG2TransportStreamParser_PMT.cpp b/liveMedia/MPEG2TransportStreamParser_PMT.cpp
new file mode 100644
index 0000000..cdbe0f5
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamParser_PMT.cpp
@@ -0,0 +1,471 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A parser for a MPEG Transport Stream
+// Implementation
+
+#include "MPEG2TransportStreamParser.hh"
+
+void MPEG2TransportStreamParser
+::parsePMT(PIDState_PMT* pidState, Boolean pusi, unsigned numDataBytes) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\tProgram Map Table\n");
+#endif
+ unsigned startPos = curOffset();
+
+ do {
+ if (pusi) {
+ u_int8_t pointer_field = get1Byte();
+ skipBytes(pointer_field); // usually 0
+ }
+
+ u_int8_t table_id = get1Byte();
+ if (table_id != 0x02) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePMT(0x%04x, %d, %d): bad table_id: 0x%02x\n",
+ pidState->PID, pusi, numDataBytes, table_id);
+#endif
+ break;
+ }
+
+ u_int16_t flagsPlusSection_length = get2Bytes();
+ u_int16_t section_length = flagsPlusSection_length&0x0FFF;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\tsection_length: %d\n", section_length);
+#endif
+ if (section_length < 13/*too small for remaining fields + CRC*/ ||
+ section_length > 1021/*as per specification*/) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePMT(0x%04x, %d, %d): Bad section_length: %d\n",
+ pidState->PID, pusi, numDataBytes, section_length);
+#endif
+ break;
+ }
+ unsigned endPos = curOffset() + section_length;
+ if (endPos - startPos > numDataBytes) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePMT(0x%04x, %d, %d): section_length %d gives us a total size %d that's too large!\n",
+ pidState->PID, pusi, numDataBytes, section_length, endPos - startPos);
+#endif
+ break;
+ }
+
+ u_int16_t program_number = get2Bytes();
+ if (program_number != pidState->program_number) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePMT(0x%04x, %d, %d): program_number %d does not match the value %d that was given to us in the PAT!\n",
+ pidState->PID, pusi, numDataBytes, program_number, pidState->program_number);
+#endif
+ break;
+ }
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\tprogram_number: %d\n", program_number);
+
+ u_int8_t version_number_byte = get1Byte();
+ u_int8_t version_number = (version_number_byte&0x1E)>>1;
+ u_int8_t section_number = get1Byte();
+ u_int8_t last_section_number = get1Byte();
+ fprintf(stderr, "\t\tversion_number: %d; section_number: %d; last_section_number: %d\n",
+ version_number, section_number, last_section_number);
+ u_int16_t PCR_PID = get2Bytes(); PCR_PID &= 0x1FFF;
+ fprintf(stderr, "\t\tPCR_PID: 0x%04x\n", PCR_PID);
+#else
+ skipBytes(5);
+#endif
+
+ u_int16_t program_info_length = get2Bytes(); program_info_length &= 0x0FFF;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\tprogram_info_length: %d\n", program_info_length);
+#endif
+ unsigned endOfDescriptors = curOffset() + program_info_length;
+ if (endOfDescriptors + 4/*CRC*/ - startPos > numDataBytes) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePMT(0x%04x, %d, %d): program_info_length %d gives us a total size %d that's too large!\n",
+ pidState->PID, pusi, numDataBytes, program_info_length, endOfDescriptors + 4 - startPos);
+#endif
+ break;
+ }
+ parseStreamDescriptors(program_info_length);
+
+ while (curOffset() <= endPos - 4/*for CRC*/ - 5/*for mapping fields*/) {
+ u_int8_t stream_type = get1Byte();
+ u_int16_t elementary_PID = get2Bytes(); elementary_PID &= 0x1FFF;
+ u_int16_t ES_info_length = get2Bytes(); ES_info_length &= 0x0FFF;
+#ifdef DEBUG_CONTENTS
+ extern StreamType StreamTypes[];
+ char const* const streamTypeDesc = StreamTypes[stream_type].description;
+ fprintf(stderr, "\t\tstream_type: 0x%02x (%s); elementary_PID: 0x%04x; ES_info_length: %d\n",
+ stream_type, streamTypeDesc == NULL ? "???" : streamTypeDesc, elementary_PID, ES_info_length);
+#endif
+ endOfDescriptors = curOffset() + ES_info_length;
+ if (endOfDescriptors + 4/*CRC*/ - startPos > numDataBytes) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePMT(0x%04x, %d, %d): ES_info_length %d gives us a total size %d that's too large!\n",
+ pidState->PID, pusi, numDataBytes, ES_info_length, endOfDescriptors + 4 - startPos);
+#endif
+ break;
+ }
+ parseStreamDescriptors(ES_info_length);
+
+ if (fPIDState[elementary_PID] == NULL) {
+ fPIDState[elementary_PID]
+ = new PIDState_STREAM(*this, elementary_PID, program_number, stream_type);
+ }
+ }
+ } while (0);
+
+ // Skip (ignore) all remaining bytes in this packet (including the CRC):
+ int numBytesLeft = numDataBytes - (curOffset() - startPos);
+ if (numBytesLeft > 0) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t+%d CRC and stuffing bytes\n", numBytesLeft);
+#endif
+ skipBytes(numBytesLeft);
+ }
+}
+
+#ifdef DEBUG_CONTENTS
+#define pDesc(str) do { fprintf(stderr, "\t\t\tdescriptor_tag: 0x%02x (%s); descriptor_length: %d\n",descriptor_tag, (str), descriptor_length); } while (0)
+#else
+#define pDesc(str)
+#endif
+
+void MPEG2TransportStreamParser::parseStreamDescriptors(unsigned numDescriptorBytes) {
+ while (numDescriptorBytes >= 2/* enough for "descriptor_tag" and "descriptor_length" */) {
+ u_int8_t descriptor_tag = get1Byte();
+ u_int8_t descriptor_length = get1Byte();
+ numDescriptorBytes -= 2;
+
+ if (descriptor_length > numDescriptorBytes) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parseStreamDescriptors() error: Saw descriptor_length %d > remaining bytes %d\n",
+ descriptor_length, numDescriptorBytes);
+#endif
+ skipBytes(numDescriptorBytes); numDescriptorBytes = 0;
+ break;
+ }
+
+ Boolean parsedDescriptor = False;
+ switch (descriptor_tag) {
+ // Note: These are the tags that we've seen to date. Add more when we see more.
+ case 0x02: {
+ pDesc("video");
+ if (descriptor_length < 1) break;
+ u_int8_t flags = get1Byte();
+ Boolean MPEG_1_only_flag = (flags&0x04) != 0;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\t\tflags: 0x%02x (frame_rate_code 0x%1x; MPEG_1_only_flag %d)\n",
+ flags, (flags&0x78)>>3, MPEG_1_only_flag);
+#endif
+ if (MPEG_1_only_flag == 0) {
+ if (descriptor_length < 3) break;
+#ifdef DEBUG_CONTENTS
+ u_int8_t profile_and_level_indication = get1Byte();
+ flags = get1Byte();
+ fprintf(stderr, "\t\t\t\tprofile_and_level_indication 0x%02x; flags 0x%02x (chroma_format 0x%1x)\n",
+ profile_and_level_indication, flags, (flags&0xC0)>>6);
+#else
+ skipBytes(2);
+#endif
+ }
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x03: {
+ pDesc("audio");
+ if (descriptor_length < 1) break;
+#ifdef DEBUG_CONTENTS
+ u_int8_t flags = get1Byte();
+ fprintf(stderr, "\t\t\t\tflags: 0x%02x (layer %d)\n", flags, (flags&0x30)>>4);
+#else
+ skipBytes(1);
+#endif
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x05: {
+ pDesc("registration");
+ if (descriptor_length < 4) break;
+#ifdef DEBUG_CONTENTS
+ u_int32_t format_identifier = get4Bytes();
+ fprintf(stderr, "\t\t\t\tformat_identifier: 0x%08x (%c%c%c%c)\n",
+ format_identifier,
+ format_identifier>>24, format_identifier>>16, format_identifier>>8, format_identifier);
+ if (descriptor_length > 4) {
+ fprintf(stderr, "\t\t\t\tadditional_identification_info: ");
+ for (unsigned i = 4; i < descriptor_length; ++i) fprintf(stderr, "%02x:", get1Byte());
+ fprintf(stderr, "\n");
+ }
+#else
+ skipBytes(descriptor_length);
+#endif
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x06: {
+ pDesc("data stream alignment");
+ if (descriptor_length < 1) break;
+#ifdef DEBUG_CONTENTS
+ u_int8_t alignment_type = get1Byte();
+ fprintf(stderr, "\t\t\t\talignment_type: 0x%02x\n", alignment_type);
+#else
+ skipBytes(1);
+#endif
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x0a: {
+ pDesc("ISO 639 language descriptor");
+ for (unsigned i = 0; i < descriptor_length/4; ++i) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\t\tISO_639_language_code: %c%c%c; audio_type: 0x%02x\n",
+ get1Byte(), get1Byte(), get1Byte(), get1Byte());
+#else
+ skipBytes(4);
+#endif
+ }
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x0b: {
+ pDesc("system clock");
+ if (descriptor_length < 2) break;
+#ifdef DEBUG_CONTENTS
+ u_int8_t flags = get1Byte();
+ Boolean external_clock_ref = (flags&0x80) != 0;
+ u_int8_t clock_accuracy_integer = flags&0x3F;
+
+ u_int8_t clock_accuracy_exponent = get1Byte(); clock_accuracy_exponent >>= 5;
+ float ppm = clock_accuracy_integer*1.0;
+ for (unsigned i = 0; i < clock_accuracy_exponent; ++i) ppm /= 10.0;
+ fprintf(stderr, "\t\t\t\texternal_clock: %d; clock_accuracy int: %d, exp: %d -> %f ppm\n",
+ external_clock_ref, clock_accuracy_integer, clock_accuracy_exponent, ppm);
+#else
+ skipBytes(2);
+#endif
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x0e: {
+ pDesc("maximum bitrate");
+ if (descriptor_length < 3) break;
+#ifdef DEBUG_CONTENTS
+ u_int32_t maximum_bitrate = ((get1Byte()&0x3F)<<16)|get2Bytes(); // 22 bits
+ fprintf(stderr, "\t\t\t\tmaximum_bitrate: %d => %f Mbps\n",
+ maximum_bitrate, (maximum_bitrate*50*8)/1000000.0);
+#else
+ skipBytes(3);
+#endif
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x10: {
+ pDesc("smoothing buffer");
+ if (descriptor_length < 6) break;
+#ifdef DEBUG_CONTENTS
+ u_int32_t sb_leak_rate = ((get1Byte()&0x3F)<<16)|get2Bytes(); // 22 bits
+ u_int32_t sb_size = ((get1Byte()&0x3F)<<16)|get2Bytes(); // 22 bits
+ fprintf(stderr, "\t\t\t\tsb_leak_rate: %d => %f Mbps; sb_size: %d bytes\n",
+ sb_leak_rate, (sb_leak_rate*400)/1000000.0, sb_size);
+#else
+ skipBytes(6);
+#endif
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x1d: {
+ pDesc("IOD parameters for ISO/IEC 14496-1");
+ // Note: We don't know how to parse this. (Where's a document that describes this?)
+ skipBytes(descriptor_length); numDescriptorBytes -= descriptor_length;
+ parsedDescriptor = True;
+ break;
+ }
+ case 0x28: {
+ pDesc("H.264 video parameters");
+ if (descriptor_length < 4) break;
+#ifdef DEBUG_CONTENTS
+ u_int8_t profile_idc = get1Byte();
+ u_int8_t flags1 = get1Byte();
+ u_int8_t level_idc = get1Byte();
+ u_int8_t flags2 = get1Byte();
+ fprintf(stderr, "\t\t\t\tprofile_idc: 0x%02x, flags1: 0x%02x, level_idc: 0x%02x, flags2: 0x%02x\n",
+ profile_idc, flags1, level_idc, flags2);
+#else
+ skipBytes(4);
+#endif
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x52: {
+ pDesc("stream identifier");
+ if (descriptor_length < 1) break;
+#ifdef DEBUG_CONTENTS
+ u_int8_t component_tag = get1Byte();
+ fprintf(stderr, "\t\t\t\tcomponent_tag: %d\n", component_tag);
+#else
+ skipBytes(1);
+#endif
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x56: {
+ pDesc("teletext");
+ for (unsigned i = 0; i < descriptor_length/5; ++i) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\t\tISO_639_language_code: %c%c%c",
+ get1Byte(), get1Byte(), get1Byte());
+ u_int8_t typePlusMagazine = get1Byte();
+ fprintf(stderr, "; type: 0x%02x; magazine: %d; page: %d\n",
+ typePlusMagazine>>3, typePlusMagazine&0x07, get1Byte());
+#else
+ skipBytes(5);
+#endif
+ }
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x59: {
+ pDesc("subtitling");
+ for (unsigned i = 0; i < descriptor_length/8; ++i) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\t\tISO_639_language_code: %c%c%c",
+ get1Byte(), get1Byte(), get1Byte());
+ fprintf(stderr, "; subtitling_type: 0x%02x; composition_page_id: 0x%04x; ancillary_page_id: 0x%04x\n",
+ get1Byte(), get2Bytes(), get2Bytes());
+#else
+ skipBytes(8);
+#endif
+ }
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x6f: {
+ pDesc("application signalling");
+ for (unsigned i = 0; i < descriptor_length/3; ++i) {
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\t\tapplication_type: 0x%04x; AIT_version_number: %d\n",
+ get2Bytes()&0x7FFF, get1Byte()&0x1F);
+#else
+ skipBytes(3);
+#endif
+ }
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x7a: {
+ pDesc("enhanced AC-3");
+ if (descriptor_length < 1) break;
+#ifdef DEBUG_CONTENTS
+ u_int8_t flags = get1Byte();
+ fprintf(stderr, "\t\t\t\tflags: 0x%02x", flags);
+ if (descriptor_length > 1) {
+ fprintf(stderr, "; extra bytes: ");
+ for (unsigned i = 1; i < descriptor_length; ++i) fprintf(stderr, "0x%02x ", get1Byte());
+ }
+ fprintf(stderr, "\n");
+#else
+ skipBytes(descriptor_length);
+#endif
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x81: {
+ pDesc("AC-3 audio");
+ if (descriptor_length < 3) break;
+#ifdef DEBUG_CONTENTS
+ u_int8_t flags = get1Byte();
+ fprintf(stderr, "\t\t\t\tsample_rate_code: %d; bsid: 0x%02x",
+ flags>>5, flags&0x1F);
+ flags = get1Byte();
+ fprintf(stderr, "; bit_rate_code: %d; surround_mode: %d",
+ flags>>2, flags&0x03);
+ flags = get1Byte();
+ fprintf(stderr, "; bsmod: %d; num_channels: %d; full_svc: %d",
+ flags>>5, (flags&0x1E)>>1, (flags&0x01));
+ if (descriptor_length > 3) {
+ fprintf(stderr, "; extra bytes: ");
+ for (unsigned i = 3; i < descriptor_length; ++i) fprintf(stderr, "0x%02x ", get1Byte());
+ }
+ fprintf(stderr, "\n");
+#else
+ skipBytes(descriptor_length);
+#endif
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ case 0x86: {
+ pDesc("caption service");
+ if (descriptor_length < 1) break;
+ u_int8_t number_of_services = get1Byte()&0x1F;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\t\tnumber_of_services: %d\n", number_of_services);
+#endif
+ if (descriptor_length < number_of_services*6) break;
+#ifdef DEBUG_CONTENTS
+ for (unsigned i = 0; i < number_of_services; ++i) {
+ fprintf(stderr, "\t\t\t\t\tlanguage: %c%c%c", get1Byte(), get1Byte(), get1Byte());
+
+ u_int8_t flags = get1Byte();
+ Boolean digital_cc = (flags&0x80) != 0;
+ fprintf(stderr, "; digital_cc %d", digital_cc);
+ if (digital_cc == 0) {
+ fprintf(stderr, "; line21_field: %d", flags&0x01);
+ } else {
+ fprintf(stderr, "; caption_service_number: %d", flags&0x3F);
+ }
+
+ u_int16_t flags2 = get2Bytes();
+ fprintf(stderr, "; easy_reader: %d; wide_aspect_ratio: %d\n",
+ (flags2&0x8000) != 0, (flags2&0x4000) != 0);
+ }
+#else
+ skipBytes(number_of_services*6);
+#endif
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ default: {
+ pDesc("???");
+ skipBytes(descriptor_length);
+ numDescriptorBytes -= descriptor_length; parsedDescriptor = True;
+ break;
+ }
+ }
+ if (!parsedDescriptor) break; // an error occurred
+ }
+
+ // Skip over any remaining descriptor bytes (as a result of a parsing error):
+ if (numDescriptorBytes > 0) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parseStreamDescriptors() Parsing error left %d bytes unparsed\n",
+ numDescriptorBytes);
+#endif
+ skipBytes(numDescriptorBytes);
+ }
+}
+
+
+//########## PIDState_PMT implementation ##########
+
+PIDState_PMT
+::PIDState_PMT(MPEG2TransportStreamParser& parser, u_int16_t pid, u_int16_t programNumber)
+ : PIDState(parser, pid, PMT),
+ program_number(programNumber) {
+}
+
+PIDState_PMT::~PIDState_PMT() {
+}
diff --git a/liveMedia/MPEG2TransportStreamParser_STREAM.cpp b/liveMedia/MPEG2TransportStreamParser_STREAM.cpp
new file mode 100644
index 0000000..ccc1f29
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamParser_STREAM.cpp
@@ -0,0 +1,310 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A parser for a MPEG Transport Stream
+// Implementation
+
+#include "MPEG2TransportStreamParser.hh"
+#include "FileSink.hh"
+#include <time.h> // for time_t
+
+Boolean MPEG2TransportStreamParser
+::processStreamPacket(PIDState_STREAM* pidState, Boolean pusi, unsigned numDataBytes) {
+#ifdef DEBUG_CONTENTS
+ extern StreamType StreamTypes[];
+ fprintf(stderr, "\t%s stream (stream_type 0x%02x)\n",
+ StreamTypes[pidState->stream_type].description, pidState->stream_type);
+#endif
+ do {
+ MPEG2TransportStreamDemuxedTrack* streamSource = pidState->streamSource;
+ if (streamSource == NULL) {
+ // There's no source for this track; just skip the data:
+ skipBytes(numDataBytes);
+ break;
+ }
+
+ if (!streamSource->isCurrentlyAwaitingData()) {
+ // Wait until the source next gets read from. (The parsing will continue then.)
+ return False;
+ }
+
+ // If the data begins with a PES header, parse it first
+ unsigned pesHeaderSize = 0;
+ if (pusi && pidState->stream_type != 0x05/*these special private streams don't have PES hdrs*/) {
+ pesHeaderSize = parsePESHeader(pidState, numDataBytes);
+ if (pesHeaderSize == 0) break; // PES header parsing failed
+ }
+
+ // Deliver the data:
+ unsigned numBytesToDeliver = numDataBytes - pesHeaderSize;
+ if (numBytesToDeliver > streamSource->maxSize()) {
+ streamSource->frameSize() = streamSource->maxSize();
+ streamSource->numTruncatedBytes() = numBytesToDeliver - streamSource->maxSize();
+ } else {
+ streamSource->frameSize() = numBytesToDeliver;
+ streamSource->numTruncatedBytes() = 0;
+ }
+ getBytes(streamSource->to(), streamSource->frameSize());
+ skipBytes(streamSource->numTruncatedBytes());
+
+ double pts = pidState->lastSeenPTS == 0.0 ? fLastSeenPCR : pidState->lastSeenPTS;
+ streamSource->presentationTime().tv_sec = (time_t)pts;
+ streamSource->presentationTime().tv_usec = int(pts*1000000.0)%1000000;
+
+ FramedSource::afterGetting(streamSource); // completes delivery
+ } while (0);
+
+ return True;
+}
+
+static Boolean isSpecialStreamId[0x100];
+
+unsigned MPEG2TransportStreamParser
+::parsePESHeader(PIDState_STREAM* pidState, unsigned numDataBytes) {
+ static Boolean haveInitializedIsSpecialStreamId = False;
+ if (!haveInitializedIsSpecialStreamId) {
+ for (unsigned i = 0; i < 0x100; ++i) isSpecialStreamId[i] = False;
+ isSpecialStreamId[0xBC] = True; // program_stream_map
+ isSpecialStreamId[0xBE] = True; // padding_stream
+ isSpecialStreamId[0xBF] = True; // private_stream_2
+ isSpecialStreamId[0xF0] = True; // ECM_stream
+ isSpecialStreamId[0xF1] = True; // EMM_stream
+ isSpecialStreamId[0xF2] = True; // DSMCC_stream
+ isSpecialStreamId[0xF8] = True; // ITU-T Rec. H.222.1 type E
+ isSpecialStreamId[0xFF] = True; // program_stream_directory
+
+ haveInitializedIsSpecialStreamId = True; // from now on
+ }
+
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\tPES Header:\n");
+#endif
+ unsigned startPos = curOffset();
+
+ do {
+ u_int32_t startCodePlusStreamId = get4Bytes();
+ if ((startCodePlusStreamId&0xFFFFFF00) != 0x00000100) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePESHeader(0x%02x, %d): Bad start code: 0x%06x\n",
+ pidState->PID, numDataBytes, startCodePlusStreamId>>8);
+#endif
+ break;
+ }
+ u_int8_t stream_id = startCodePlusStreamId&0xFF;
+
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\tstream_id: 0x%02x; PES_packet_length: %d\n",
+ stream_id, get2Bytes());
+#else
+ skipBytes(2);
+#endif
+
+ if (!isSpecialStreamId[stream_id]) {
+ u_int16_t flags = get2Bytes();
+ if ((flags&0xC000) != 0x8000) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePESHeader(0x%02x, %d): Bad flags: 0x%04x\n",
+ pidState->PID, numDataBytes, flags);
+#endif
+ break;
+ }
+ u_int8_t PTS_DTS_flags = (flags&0x00C0)>>6;
+ Boolean ESCR_flag = (flags&0x0020) != 0;
+ Boolean ES_rate_flag = (flags&0x0010) != 0;
+ Boolean DSM_trick_mode_flag = (flags&0x0008) != 0;
+ Boolean additional_copy_info_flag = (flags&0x0004) != 0;
+ Boolean PES_CRC_flag = (flags&0x0002) != 0;
+ Boolean PES_extension_flag = (flags&0x0001) != 0;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\tflags: 0x%04x (PTS_DTS:%d; ESCR:%d; ES_rate:%d; DSM_trick_mode:%d; additional_copy_info:%d; PES_CRC:%d; PES_extension:%d)\n",
+ flags, PTS_DTS_flags, ESCR_flag, ES_rate_flag, DSM_trick_mode_flag, additional_copy_info_flag, PES_CRC_flag, PES_extension_flag);
+#endif
+
+ u_int8_t PES_header_data_length = get1Byte();
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\tPES_header_data_length: %d\n", PES_header_data_length);
+#endif
+
+ if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
+ // Begin with a PTS:
+ u_int8_t first8PTSBits = get1Byte();
+ u_int32_t last32PTSBits = get4Bytes();
+ if ((first8PTSBits&0xF1) != ((PTS_DTS_flags<<4)|0x01) ||
+ (last32PTSBits&0x00010001) != 0x00010001) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePESHeader(0x%02x, %d): Bad PTS bits: 0x%02x,0x%08x\n",
+ pidState->PID, numDataBytes, first8PTSBits, last32PTSBits);
+#endif
+ break;
+ }
+ u_int32_t ptsUpper32 = ((first8PTSBits&0x0E)<<28) | ((last32PTSBits&0xFFFE0000)>>3) | ((last32PTSBits&0x0000FFFC)>>2);
+ u_int8_t ptsLowBit = (last32PTSBits&0x00000002)>>1;
+ double PTS = ptsUpper32/45000.0;
+ if (ptsLowBit) PTS += 1/90000.0;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\tPTS: 0x%02x%08x => 0x%08x+%d => %.10f\n",
+ first8PTSBits, last32PTSBits, ptsUpper32, ptsLowBit, PTS);
+#endif
+ // Record this PTS:
+ pidState->lastSeenPTS = PTS;
+ }
+
+ if (PTS_DTS_flags == 3) {
+ // Continue with a DTS:
+ u_int8_t first8DTSBits = get1Byte();
+ u_int32_t last32DTSBits = get4Bytes();
+ if ((first8DTSBits&0x11) != 0x11 ||
+ (last32DTSBits&0x00010001) != 0x00010001) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePESHeader(0x%02x, %d): Bad DTS bits: 0x%02x,0x%08x\n",
+ pidState->PID, numDataBytes, first8DTSBits, last32DTSBits);
+#endif
+ break;
+ }
+ u_int32_t dtsUpper32 = ((first8DTSBits&0x0E)<<28) | ((last32DTSBits&0xFFFE0000)>>3) | ((last32DTSBits&0x0000FFFC)>>2);
+ u_int8_t dtsLowBit = (last32DTSBits&0x00000002)>>1;
+ double DTS = dtsUpper32/45000.0;
+ if (dtsLowBit) DTS += 1/90000.0;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\tDTS: 0x%02x%08x => 0x%08x+%d => %.10f\n",
+ first8DTSBits, last32DTSBits, dtsUpper32, dtsLowBit, DTS);
+#endif
+ }
+
+ if (ESCR_flag) {
+ // Skip over the ESCR
+ skipBytes(6);
+ }
+
+ if (ES_rate_flag) {
+ // Skip over the ES_rate
+ skipBytes(6);
+ }
+
+ if (DSM_trick_mode_flag) {
+ // Skip over this
+ skipBytes(1);
+ }
+
+ if (additional_copy_info_flag) {
+ // Skip over this
+ skipBytes(1);
+ }
+
+ if (PES_CRC_flag) {
+ // Skip over this
+ skipBytes(2);
+ }
+
+ if (PES_extension_flag) {
+ u_int8_t flags = get1Byte();
+ Boolean PES_private_data_flag = (flags&0x80) != 0;
+ Boolean pack_header_field_flag = (flags&0x40) != 0;
+ Boolean program_packet_sequence_counter_flag = (flags&0x20) != 0;
+ Boolean P_STD_buffer_flag = (flags&0x10) != 0;
+ Boolean PES_extension_flag_2 = (flags&0x01) != 0;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\tPES_extension: flags: 0x%02x (PES_private_data:%d; pack_header_field:%d; program_packet_sequence_counter:%d; P_STD_buffer:%d; PES_extension_2:%d\n",
+ flags, PES_private_data_flag, pack_header_field_flag, program_packet_sequence_counter_flag, P_STD_buffer_flag, PES_extension_flag_2);
+#endif
+ if (PES_private_data_flag) {
+ // Skip over this
+ skipBytes(16);
+ }
+ if (pack_header_field_flag) {
+ // Skip over this
+ skipBytes(1 + 12); // "pack_header()" is 12 bytes in size
+ }
+ if (program_packet_sequence_counter_flag) {
+ // Skip over this
+ skipBytes(2);
+ }
+ if (P_STD_buffer_flag) {
+ // Skip over this
+ skipBytes(2);
+ }
+ if (PES_extension_flag_2) {
+ u_int8_t PES_extension_field_length = get1Byte()&0x7F;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\t\tPES_extension_field_length: %d\n", PES_extension_field_length);
+#endif
+ skipBytes(PES_extension_field_length);
+ }
+ }
+
+ // Make sure that the number of header bytes parsed is consistent with "PES_header_data_length"
+ // (and skip over any remasining 'stuffing' bytes):
+ if (curOffset() - startPos > 9 + PES_header_data_length) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePESHeader(0x%02x, %d): Error: Parsed %d PES header bytes; expected %d (based on \"PES_header_data_length\": %d)\n",
+ pidState->PID, numDataBytes, curOffset() - startPos, 9 + PES_header_data_length,
+ PES_header_data_length);
+#endif
+ break;
+ }
+ skipBytes(9 + PES_header_data_length - (curOffset() - startPos)); // >= 0
+ }
+
+ unsigned PESHeaderSize = curOffset() - startPos;
+#ifdef DEBUG_CONTENTS
+ fprintf(stderr, "\t\t\t=> PES header size: %d\n", PESHeaderSize);
+#endif
+ if (PESHeaderSize > numDataBytes) {
+#ifdef DEBUG_ERRORS
+ fprintf(stderr, "MPEG2TransportStreamParser::parsePESHeader(0x%02x, %d): Error: PES header size %d is larger than the number of bytes available (%d)\n",
+ pidState->PID, numDataBytes, PESHeaderSize, numDataBytes);
+#endif
+ break;
+ }
+ return PESHeaderSize;
+ } while (0);
+
+ // An error occurred. Skip over any remaining bytes in the packet:
+ int numBytesLeft = numDataBytes - (curOffset() - startPos);
+ if (numBytesLeft > 0) skipBytes((unsigned)numBytesLeft);
+ return 0;
+}
+
+
+//########## PIDState_STREAM implementation ##########
+
+PIDState_STREAM::PIDState_STREAM(MPEG2TransportStreamParser& parser,
+ u_int16_t pid, u_int16_t programNumber, u_int8_t streamType)
+ : PIDState(parser, pid, STREAM),
+ program_number(programNumber), stream_type(streamType), lastSeenPTS(0.0) {
+ // Create the 'source' and 'sink' objects for this track, and 'start playing' them:
+ streamSource = new MPEG2TransportStreamDemuxedTrack(parser, pid);
+
+ char fileName[100];
+ extern StreamType StreamTypes[];
+ StreamType& st = StreamTypes[streamType]; // alias
+ sprintf(fileName, "%s-0x%04x-0x%04x%s",
+ st.dataType == StreamType::AUDIO ? "AUDIO" :
+ st.dataType == StreamType::VIDEO ? "VIDEO" :
+ st.dataType == StreamType::DATA ? "DATA" :
+ st.dataType == StreamType::TEXT ? "TEXT" :
+ "UNKNOWN",
+ program_number, pid, st.filenameSuffix);
+ fprintf(stderr, "Creating new output file \"%s\"\n", fileName);
+ streamSink = FileSink::createNew(parser.envir(), fileName);
+ streamSink->startPlaying(*streamSource, NULL, NULL);
+}
+
+PIDState_STREAM::~PIDState_STREAM() {
+ Medium::close(streamSink);
+ Medium::close(streamSource);
+}
diff --git a/liveMedia/MPEG2TransportStreamTrickModeFilter.cpp b/liveMedia/MPEG2TransportStreamTrickModeFilter.cpp
new file mode 100644
index 0000000..62c6e26
--- /dev/null
+++ b/liveMedia/MPEG2TransportStreamTrickModeFilter.cpp
@@ -0,0 +1,266 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that converts a MPEG Transport Stream file - with corresponding index file
+// - to a corresponding Video Elementary Stream. It also uses a "scale" parameter
+// to implement 'trick mode' (fast forward or reverse play, using I-frames) on
+// the video stream.
+// Implementation
+
+#include "MPEG2TransportStreamTrickModeFilter.hh"
+#include <ByteStreamFileSource.hh>
+
+// Define the following to be True if we want the output file to have the same frame rate as the original file.
+// (Because the output file contains I-frames only, this means that each I-frame will appear in the output file
+// several times, and therefore the output file's bitrate will be significantly higher than that of the original.)
+// Define the following to be False if we want the output file to include each I-frame no more than once.
+// (This means that - except for high 'scale' values - both the output frame rate and the output bit rate
+// will be less than that of the original.)
+#define KEEP_ORIGINAL_FRAME_RATE False
+
+MPEG2TransportStreamTrickModeFilter* MPEG2TransportStreamTrickModeFilter
+::createNew(UsageEnvironment& env, FramedSource* inputSource,
+ MPEG2TransportStreamIndexFile* indexFile, int scale) {
+ return new MPEG2TransportStreamTrickModeFilter(env, inputSource, indexFile, scale);
+}
+
+MPEG2TransportStreamTrickModeFilter
+::MPEG2TransportStreamTrickModeFilter(UsageEnvironment& env, FramedSource* inputSource,
+ MPEG2TransportStreamIndexFile* indexFile, int scale)
+ : FramedFilter(env, inputSource),
+ fHaveStarted(False), fIndexFile(indexFile), fScale(scale), fDirection(1),
+ fState(SKIPPING_FRAME), fFrameCount(0),
+ fNextIndexRecordNum(0), fNextTSPacketNum(0),
+ fCurrentTSPacketNum((unsigned long)(-1)), fUseSavedFrameNextTime(False) {
+ if (fScale < 0) { // reverse play
+ fScale = -fScale;
+ fDirection = -1;
+ }
+}
+
+MPEG2TransportStreamTrickModeFilter::~MPEG2TransportStreamTrickModeFilter() {
+}
+
+Boolean MPEG2TransportStreamTrickModeFilter::seekTo(unsigned long tsPacketNumber,
+ unsigned long indexRecordNumber) {
+ seekToTransportPacket(tsPacketNumber);
+ fNextIndexRecordNum = indexRecordNumber;
+ return True;
+}
+
+#define isIFrameStart(type) ((type) == 0x81/*actually, a VSH*/ || (type) == 0x85/*actually, a SPS, for H.264*/ || (type) == 0x8B/*actually, a VPS, for H.265*/)
+ // This relies upon I-frames always being preceded by a VSH+GOP (for MPEG-2 data),
+ // by a SPS (for H.264 data), or by a VPS (for H.265 data)
+#define isNonIFrameStart(type) ((type) == 0x83 || (type) == 0x88/*for H.264*/ || (type) == 0x8E/*for H.265*/)
+
+void MPEG2TransportStreamTrickModeFilter::doGetNextFrame() {
+ // fprintf(stderr, "#####DGNF1\n");
+ // If our client's buffer size is too small, then deliver
+ // a 0-byte 'frame', to tell it to process all of the data that it has
+ // already read, before asking for more data from us:
+ if (fMaxSize < TRANSPORT_PACKET_SIZE) {
+ fFrameSize = 0;
+ afterGetting(this);
+ return;
+ }
+
+ while (1) {
+ // Get the next record from our index file.
+ // This tells us the type of frame this data is, which Transport Stream packet
+ // (from the input source) the data comes from, and where in the Transport Stream
+ // packet it comes from:
+ u_int8_t recordType;
+ float recordPCR;
+ Boolean endOfIndexFile = False;
+ if (!fIndexFile->readIndexRecordValues(fNextIndexRecordNum,
+ fDesiredTSPacketNum, fDesiredDataOffset,
+ fDesiredDataSize, recordPCR,
+ recordType)) {
+ // We ran off the end of the index file. If we're not delivering a
+ // pre-saved frame, then handle this the same way as if the
+ // input Transport Stream source ended.
+ if (fState != DELIVERING_SAVED_FRAME) {
+ onSourceClosure1();
+ return;
+ }
+ endOfIndexFile = True;
+ } else if (!fHaveStarted) {
+ fFirstPCR = recordPCR;
+ fHaveStarted = True;
+ }
+ // fprintf(stderr, "#####read index record %ld: ts %ld: %c, PCR %f\n", fNextIndexRecordNum, fDesiredTSPacketNum, isIFrameStart(recordType) ? 'I' : isNonIFrameStart(recordType) ? 'j' : 'x', recordPCR);
+ fNextIndexRecordNum
+ += (fState == DELIVERING_SAVED_FRAME) ? 1 : fDirection;
+
+ // Handle this index record, depending on the record type and our current state:
+ switch (fState) {
+ case SKIPPING_FRAME:
+ case SAVING_AND_DELIVERING_FRAME: {
+ // if (fState == SKIPPING_FRAME) fprintf(stderr, "\tSKIPPING_FRAME\n"); else fprintf(stderr, "\tSAVING_AND_DELIVERING_FRAME\n");//#####
+ if (isIFrameStart(recordType)) {
+ // Save a record of this frame:
+ fSavedFrameIndexRecordStart = fNextIndexRecordNum - fDirection;
+ fUseSavedFrameNextTime = True;
+ // fprintf(stderr, "\trecording\n");//#####
+ if ((fFrameCount++)%fScale == 0 && fUseSavedFrameNextTime) {
+ // A frame is due now.
+ fFrameCount = 1; // reset to avoid overflow
+ if (fDirection > 0) {
+ // Begin delivering this frame, as we're scanning it:
+ fState = SAVING_AND_DELIVERING_FRAME;
+ // fprintf(stderr, "\tdelivering\n");//#####
+ fDesiredDataPCR = recordPCR; // use this frame's PCR
+ attemptDeliveryToClient();
+ return;
+ } else {
+ // Deliver this frame, then resume normal scanning:
+ // (This relies on the index records having begun with an I-frame.)
+ fState = DELIVERING_SAVED_FRAME;
+ fSavedSequentialIndexRecordNum = fNextIndexRecordNum;
+ fDesiredDataPCR = recordPCR;
+ // use this frame's (not the saved frame's) PCR
+ fNextIndexRecordNum = fSavedFrameIndexRecordStart;
+ // fprintf(stderr, "\tbeginning delivery of saved frame\n");//#####
+ }
+ } else {
+ // No frame is needed now:
+ fState = SKIPPING_FRAME;
+ }
+ } else if (isNonIFrameStart(recordType)) {
+ if ((fFrameCount++)%fScale == 0 && fUseSavedFrameNextTime) {
+ // A frame is due now, so begin delivering the one that we had saved:
+ // (This relies on the index records having begun with an I-frame.)
+ fFrameCount = 1; // reset to avoid overflow
+ fState = DELIVERING_SAVED_FRAME;
+ fSavedSequentialIndexRecordNum = fNextIndexRecordNum;
+ fDesiredDataPCR = recordPCR;
+ // use this frame's (not the saved frame's) PCR
+ fNextIndexRecordNum = fSavedFrameIndexRecordStart;
+ // fprintf(stderr, "\tbeginning delivery of saved frame\n");//#####
+ } else {
+ // No frame is needed now:
+ fState = SKIPPING_FRAME;
+ }
+ } else {
+ // Not the start of a frame, but deliver it, if it's needed:
+ if (fState == SAVING_AND_DELIVERING_FRAME) {
+ // fprintf(stderr, "\tdelivering\n");//#####
+ fDesiredDataPCR = recordPCR; // use this frame's PCR
+ attemptDeliveryToClient();
+ return;
+ }
+ }
+ break;
+ }
+ case DELIVERING_SAVED_FRAME: {
+ // fprintf(stderr, "\tDELIVERING_SAVED_FRAME\n");//#####
+ if (endOfIndexFile
+ || (isIFrameStart(recordType)
+ && fNextIndexRecordNum-1 != fSavedFrameIndexRecordStart)
+ || isNonIFrameStart(recordType)) {
+ // fprintf(stderr, "\tended delivery of saved frame\n");//#####
+ // We've reached the end of the saved frame, so revert to the
+ // original sequence of index records:
+ fNextIndexRecordNum = fSavedSequentialIndexRecordNum;
+ fUseSavedFrameNextTime = KEEP_ORIGINAL_FRAME_RATE;
+ fState = SKIPPING_FRAME;
+ } else {
+ // Continue delivering:
+ // fprintf(stderr, "\tdelivering\n");//#####
+ attemptDeliveryToClient();
+ return;
+ }
+ break;
+ }
+ }
+ }
+}
+
+void MPEG2TransportStreamTrickModeFilter::doStopGettingFrames() {
+ FramedFilter::doStopGettingFrames();
+ fIndexFile->stopReading();
+}
+
+void MPEG2TransportStreamTrickModeFilter::attemptDeliveryToClient() {
+ if (fCurrentTSPacketNum == fDesiredTSPacketNum) {
+ // fprintf(stderr, "\t\tdelivering ts %d:%d, %d bytes, PCR %f\n", fCurrentTSPacketNum, fDesiredDataOffset, fDesiredDataSize, fDesiredDataPCR);//#####
+ // We already have the Transport Packet that we want. Deliver its data:
+ memmove(fTo, &fInputBuffer[fDesiredDataOffset], fDesiredDataSize);
+ fFrameSize = fDesiredDataSize;
+ float deliveryPCR = fDirection*(fDesiredDataPCR - fFirstPCR)/fScale;
+ if (deliveryPCR < 0.0) deliveryPCR = 0.0;
+ fPresentationTime.tv_sec = (unsigned long)deliveryPCR;
+ fPresentationTime.tv_usec
+ = (unsigned long)((deliveryPCR - fPresentationTime.tv_sec)*1000000.0f);
+ // fprintf(stderr, "#####DGNF9\n");
+
+ afterGetting(this);
+ } else {
+ // Arrange to read the Transport Packet that we want:
+ readTransportPacket(fDesiredTSPacketNum);
+ }
+}
+
+void MPEG2TransportStreamTrickModeFilter::seekToTransportPacket(unsigned long tsPacketNum) {
+ if (tsPacketNum == fNextTSPacketNum) return; // we're already there
+
+ ByteStreamFileSource* tsFile = (ByteStreamFileSource*)fInputSource;
+ u_int64_t tsPacketNum64 = (u_int64_t)tsPacketNum;
+ tsFile->seekToByteAbsolute(tsPacketNum64*TRANSPORT_PACKET_SIZE);
+
+ fNextTSPacketNum = tsPacketNum;
+}
+
+void MPEG2TransportStreamTrickModeFilter::readTransportPacket(unsigned long tsPacketNum) {
+ seekToTransportPacket(tsPacketNum);
+ fInputSource->getNextFrame(fInputBuffer, TRANSPORT_PACKET_SIZE,
+ afterGettingFrame, this,
+ onSourceClosure, this);
+}
+
+void MPEG2TransportStreamTrickModeFilter
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned /*numTruncatedBytes*/,
+ struct timeval presentationTime,
+ unsigned /*durationInMicroseconds*/) {
+ MPEG2TransportStreamTrickModeFilter* filter = (MPEG2TransportStreamTrickModeFilter*)clientData;
+ filter->afterGettingFrame1(frameSize);
+}
+
+void MPEG2TransportStreamTrickModeFilter::afterGettingFrame1(unsigned frameSize) {
+ if (frameSize != TRANSPORT_PACKET_SIZE) {
+ // Treat this as if the input source ended:
+ onSourceClosure1();
+ return;
+ }
+
+ fCurrentTSPacketNum = fNextTSPacketNum; // i.e., the one that we just read
+ ++fNextTSPacketNum;
+
+ // Attempt deliver again:
+ attemptDeliveryToClient();
+}
+
+void MPEG2TransportStreamTrickModeFilter::onSourceClosure(void* clientData) {
+ MPEG2TransportStreamTrickModeFilter* filter = (MPEG2TransportStreamTrickModeFilter*)clientData;
+ filter->onSourceClosure1();
+}
+
+void MPEG2TransportStreamTrickModeFilter::onSourceClosure1() {
+ fIndexFile->stopReading();
+ handleClosure();
+}
diff --git a/liveMedia/MPEG2TransportUDPServerMediaSubsession.cpp b/liveMedia/MPEG2TransportUDPServerMediaSubsession.cpp
new file mode 100644
index 0000000..8a9df3a
--- /dev/null
+++ b/liveMedia/MPEG2TransportUDPServerMediaSubsession.cpp
@@ -0,0 +1,75 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an incoming UDP (or RTP/UDP) MPEG-2 Transport Stream
+// Implementation
+
+#include "MPEG2TransportUDPServerMediaSubsession.hh"
+#include "BasicUDPSource.hh"
+#include "SimpleRTPSource.hh"
+#include "MPEG2TransportStreamFramer.hh"
+#include "SimpleRTPSink.hh"
+#include "GroupsockHelper.hh"
+
+
+MPEG2TransportUDPServerMediaSubsession*
+MPEG2TransportUDPServerMediaSubsession::createNew(UsageEnvironment& env,
+ char const* inputAddressStr, Port const& inputPort, Boolean inputStreamIsRawUDP) {
+ return new MPEG2TransportUDPServerMediaSubsession(env, inputAddressStr, inputPort, inputStreamIsRawUDP);
+}
+
+MPEG2TransportUDPServerMediaSubsession
+::MPEG2TransportUDPServerMediaSubsession(UsageEnvironment& env,
+ char const* inputAddressStr, Port const& inputPort, Boolean inputStreamIsRawUDP)
+ : OnDemandServerMediaSubsession(env, True/*reuseFirstSource*/),
+ fInputPort(inputPort), fInputGroupsock(NULL), fInputStreamIsRawUDP(inputStreamIsRawUDP) {
+ fInputAddressStr = strDup(inputAddressStr);
+}
+
+MPEG2TransportUDPServerMediaSubsession::
+~MPEG2TransportUDPServerMediaSubsession() {
+ delete fInputGroupsock;
+ delete[] (char*)fInputAddressStr;
+}
+
+FramedSource* MPEG2TransportUDPServerMediaSubsession
+::createNewStreamSource(unsigned/* clientSessionId*/, unsigned& estBitrate) {
+ estBitrate = 5000; // kbps, estimate
+
+ if (fInputGroupsock == NULL) {
+ // Create a 'groupsock' object for receiving the input stream:
+ struct in_addr inputAddress;
+ inputAddress.s_addr = fInputAddressStr == NULL ? 0 : our_inet_addr(fInputAddressStr);
+ fInputGroupsock = new Groupsock(envir(), inputAddress, fInputPort, 255);
+ }
+
+ FramedSource* transportStreamSource;
+ if (fInputStreamIsRawUDP) {
+ transportStreamSource = BasicUDPSource::createNew(envir(), fInputGroupsock);
+ } else {
+ transportStreamSource = SimpleRTPSource::createNew(envir(), fInputGroupsock, 33, 90000, "video/MP2T", 0, False /*no 'M' bit*/);
+ }
+ return MPEG2TransportStreamFramer::createNew(envir(), transportStreamSource);
+}
+
+RTPSink* MPEG2TransportUDPServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char /*rtpPayloadTypeIfDynamic*/, FramedSource* /*inputSource*/) {
+ return SimpleRTPSink::createNew(envir(), rtpGroupsock,
+ 33, 90000, "video", "MP2T",
+ 1, True, False /*no 'M' bit*/);
+}
diff --git a/liveMedia/MPEG4ESVideoRTPSink.cpp b/liveMedia/MPEG4ESVideoRTPSink.cpp
new file mode 100644
index 0000000..26fe61a
--- /dev/null
+++ b/liveMedia/MPEG4ESVideoRTPSink.cpp
@@ -0,0 +1,142 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for MPEG-4 Elementary Stream video (RFC 3016)
+// Implementation
+
+#include "MPEG4ESVideoRTPSink.hh"
+#include "MPEG4VideoStreamFramer.hh"
+#include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()"
+
+MPEG4ESVideoRTPSink
+::MPEG4ESVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency,
+ u_int8_t profileAndLevelIndication, char const* configStr)
+ : VideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "MP4V-ES"),
+ fVOPIsPresent(False), fProfileAndLevelIndication(profileAndLevelIndication), fFmtpSDPLine(NULL) {
+ fConfigBytes = parseGeneralConfigStr(configStr, fNumConfigBytes);
+}
+
+MPEG4ESVideoRTPSink::~MPEG4ESVideoRTPSink() {
+ delete[] fFmtpSDPLine;
+ delete[] fConfigBytes;
+}
+
+MPEG4ESVideoRTPSink*
+MPEG4ESVideoRTPSink::createNew(UsageEnvironment& env,
+ Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency) {
+ return new MPEG4ESVideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency);
+}
+
+MPEG4ESVideoRTPSink*
+MPEG4ESVideoRTPSink::createNew(UsageEnvironment& env,
+ Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency,
+ u_int8_t profileAndLevelIndication, char const* configStr) {
+ return new MPEG4ESVideoRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, profileAndLevelIndication, configStr);
+}
+
+Boolean MPEG4ESVideoRTPSink::sourceIsCompatibleWithUs(MediaSource& source) {
+ // Our source must be an appropriate framer:
+ return source.isMPEG4VideoStreamFramer();
+}
+
+#define VOP_START_CODE 0x000001B6
+
+void MPEG4ESVideoRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ if (fragmentationOffset == 0) {
+ // Begin by inspecting the 4-byte code at the start of the frame:
+ if (numBytesInFrame < 4) return; // shouldn't happen
+ u_int32_t startCode
+ = (frameStart[0]<<24) | (frameStart[1]<<16) | (frameStart[2]<<8) | frameStart[3];
+
+ fVOPIsPresent = startCode == VOP_START_CODE;
+ }
+
+ // Set the RTP 'M' (marker) bit iff this frame ends a VOP
+ // (and there are no fragments remaining).
+ // This relies on the source being a "MPEG4VideoStreamFramer".
+ MPEG4VideoStreamFramer* framerSource = (MPEG4VideoStreamFramer*)fSource;
+ if (framerSource != NULL && framerSource->pictureEndMarker()
+ && numRemainingBytes == 0) {
+ setMarkerBit();
+ framerSource->pictureEndMarker() = False;
+ }
+
+ // Also set the RTP timestamp. (We do this for each frame
+ // in the packet, to ensure that the timestamp of the VOP (if present)
+ // gets used.)
+ setTimestamp(framePresentationTime);
+}
+
+Boolean MPEG4ESVideoRTPSink::allowFragmentationAfterStart() const {
+ return True;
+}
+
+Boolean MPEG4ESVideoRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // Once we've packed a VOP into the packet, then no other
+ // frame can be packed into it:
+ return !fVOPIsPresent;
+}
+
+char const* MPEG4ESVideoRTPSink::auxSDPLine() {
+ // Generate a new "a=fmtp:" line each time, using our own 'configuration' information (if we have it),
+ // otherwise parameters from our framer source (in case they've changed since the last time that
+ // we were called):
+ unsigned configLength = fNumConfigBytes;
+ unsigned char* config = fConfigBytes;
+ if (fProfileAndLevelIndication == 0 || config == NULL) {
+ // We need to get this information from our framer source:
+ MPEG4VideoStreamFramer* framerSource = (MPEG4VideoStreamFramer*)fSource;
+ if (framerSource == NULL) return NULL; // we don't yet have a source
+
+ fProfileAndLevelIndication = framerSource->profile_and_level_indication();
+ if (fProfileAndLevelIndication == 0) return NULL; // our source isn't ready
+
+ config = framerSource->getConfigBytes(configLength);
+ if (config == NULL) return NULL; // our source isn't ready
+ }
+
+ char const* fmtpFmt =
+ "a=fmtp:%d "
+ "profile-level-id=%d;"
+ "config=";
+ unsigned fmtpFmtSize = strlen(fmtpFmt)
+ + 3 /* max char len */
+ + 3 /* max char len */
+ + 2*configLength /* 2*, because each byte prints as 2 chars */
+ + 2 /* trailing \r\n */;
+ char* fmtp = new char[fmtpFmtSize];
+ sprintf(fmtp, fmtpFmt, rtpPayloadType(), fProfileAndLevelIndication);
+ char* endPtr = &fmtp[strlen(fmtp)];
+ for (unsigned i = 0; i < configLength; ++i) {
+ sprintf(endPtr, "%02X", config[i]);
+ endPtr += 2;
+ }
+ sprintf(endPtr, "\r\n");
+
+ delete[] fFmtpSDPLine;
+ fFmtpSDPLine = strDup(fmtp);
+ delete[] fmtp;
+ return fFmtpSDPLine;
+}
diff --git a/liveMedia/MPEG4ESVideoRTPSource.cpp b/liveMedia/MPEG4ESVideoRTPSource.cpp
new file mode 100644
index 0000000..9402397
--- /dev/null
+++ b/liveMedia/MPEG4ESVideoRTPSource.cpp
@@ -0,0 +1,65 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MP4V-ES video RTP stream sources
+// Implementation
+
+#include "MPEG4ESVideoRTPSource.hh"
+
+///////// MPEG4ESVideoRTPSource implementation ////////
+
+//##### NOTE: INCOMPLETE!!! #####
+
+MPEG4ESVideoRTPSource*
+MPEG4ESVideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new MPEG4ESVideoRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+MPEG4ESVideoRTPSource
+::MPEG4ESVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency) {
+}
+
+MPEG4ESVideoRTPSource::~MPEG4ESVideoRTPSource() {
+}
+
+Boolean MPEG4ESVideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ // The packet begins a frame iff its data begins with a system code
+ // (i.e., 0x000001??)
+ fCurrentPacketBeginsFrame
+ = packet->dataSize() >= 4 && (packet->data())[0] == 0
+ && (packet->data())[1] == 0 && (packet->data())[2] == 1;
+
+ // The RTP "M" (marker) bit indicates the last fragment of a frame:
+ fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
+
+ // There is no special header
+ resultSpecialHeaderSize = 0;
+ return True;
+}
+
+char const* MPEG4ESVideoRTPSource::MIMEtype() const {
+ return "video/MP4V-ES";
+}
diff --git a/liveMedia/MPEG4GenericRTPSink.cpp b/liveMedia/MPEG4GenericRTPSink.cpp
new file mode 100644
index 0000000..de0e72f
--- /dev/null
+++ b/liveMedia/MPEG4GenericRTPSink.cpp
@@ -0,0 +1,142 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MPEG4-GENERIC ("audio", "video", or "application") RTP stream sinks
+// Implementation
+
+#include "MPEG4GenericRTPSink.hh"
+#include "Locale.hh"
+#include <ctype.h> // needed on some systems to define "tolower()"
+
+MPEG4GenericRTPSink
+::MPEG4GenericRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency,
+ char const* sdpMediaTypeString,
+ char const* mpeg4Mode, char const* configString,
+ unsigned numChannels)
+ : MultiFramedRTPSink(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency, "MPEG4-GENERIC", numChannels),
+ fSDPMediaTypeString(strDup(sdpMediaTypeString)),
+ fMPEG4Mode(strDup(mpeg4Mode)), fConfigString(strDup(configString)) {
+ // Check whether "mpeg4Mode" is one that we handle:
+ if (mpeg4Mode == NULL) {
+ env << "MPEG4GenericRTPSink error: NULL \"mpeg4Mode\" parameter\n";
+ } else {
+ // To ease comparison, convert "mpeg4Mode" to lower case:
+ size_t const len = strlen(mpeg4Mode) + 1;
+ char* m = new char[len];
+
+ Locale l("POSIX");
+ for (size_t i = 0; i < len; ++i) m[i] = tolower(mpeg4Mode[i]);
+
+ if (strcmp(m, "aac-hbr") != 0) {
+ env << "MPEG4GenericRTPSink error: Unknown \"mpeg4Mode\" parameter: \"" << mpeg4Mode << "\"\n";
+ }
+ delete[] m;
+ }
+
+ // Set up the "a=fmtp:" SDP line for this stream:
+ char const* fmtpFmt =
+ "a=fmtp:%d "
+ "streamtype=%d;profile-level-id=1;"
+ "mode=%s;sizelength=13;indexlength=3;indexdeltalength=3;"
+ "config=%s\r\n";
+ unsigned fmtpFmtSize = strlen(fmtpFmt)
+ + 3 /* max char len */
+ + 3 /* max char len */
+ + strlen(fMPEG4Mode)
+ + strlen(fConfigString);
+ char* fmtp = new char[fmtpFmtSize];
+ sprintf(fmtp, fmtpFmt,
+ rtpPayloadType(),
+ strcmp(fSDPMediaTypeString, "video") == 0 ? 4 : 5,
+ fMPEG4Mode,
+ fConfigString);
+ fFmtpSDPLine = strDup(fmtp);
+ delete[] fmtp;
+}
+
+MPEG4GenericRTPSink::~MPEG4GenericRTPSink() {
+ delete[] fFmtpSDPLine;
+ delete[] (char*)fConfigString;
+ delete[] (char*)fMPEG4Mode;
+ delete[] (char*)fSDPMediaTypeString;
+}
+
+MPEG4GenericRTPSink*
+MPEG4GenericRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency,
+ char const* sdpMediaTypeString,
+ char const* mpeg4Mode,
+ char const* configString, unsigned numChannels) {
+ return new MPEG4GenericRTPSink(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency,
+ sdpMediaTypeString, mpeg4Mode,
+ configString, numChannels);
+}
+
+Boolean MPEG4GenericRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // (For now) allow at most 1 frame in a single packet:
+ return False;
+}
+
+void MPEG4GenericRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ // Set the "AU Header Section". This is 4 bytes: 2 bytes for the
+ // initial "AU-headers-length" field, and 2 bytes for the first
+ // (and only) "AU Header":
+ unsigned fullFrameSize
+ = fragmentationOffset + numBytesInFrame + numRemainingBytes;
+ unsigned char headers[4];
+ headers[0] = 0; headers[1] = 16 /* bits */; // AU-headers-length
+ headers[2] = fullFrameSize >> 5; headers[3] = (fullFrameSize&0x1F)<<3;
+
+ setSpecialHeaderBytes(headers, sizeof headers);
+
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the RTP 'M' ('marker') bit:
+ setMarkerBit();
+ }
+
+ // Important: Also call our base class's doSpecialFrameHandling(),
+ // to set the packet's timestamp:
+ MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
+ frameStart, numBytesInFrame,
+ framePresentationTime,
+ numRemainingBytes);
+}
+
+unsigned MPEG4GenericRTPSink::specialHeaderSize() const {
+ return 2 + 2;
+}
+
+char const* MPEG4GenericRTPSink::sdpMediaType() const {
+ return fSDPMediaTypeString;
+}
+
+char const* MPEG4GenericRTPSink::auxSDPLine() {
+ return fFmtpSDPLine;
+}
diff --git a/liveMedia/MPEG4GenericRTPSource.cpp b/liveMedia/MPEG4GenericRTPSource.cpp
new file mode 100644
index 0000000..e10c024
--- /dev/null
+++ b/liveMedia/MPEG4GenericRTPSource.cpp
@@ -0,0 +1,234 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MPEG4-GENERIC ("audio", "video", or "application") RTP stream sources
+// Implementation
+
+#include "MPEG4GenericRTPSource.hh"
+#include "BitVector.hh"
+#include "MPEG4LATMAudioRTPSource.hh" // for parseGeneralConfigStr()
+
+////////// MPEG4GenericBufferedPacket and MPEG4GenericBufferedPacketFactory
+
+class MPEG4GenericBufferedPacket: public BufferedPacket {
+public:
+ MPEG4GenericBufferedPacket(MPEG4GenericRTPSource* ourSource);
+ virtual ~MPEG4GenericBufferedPacket();
+
+private: // redefined virtual functions
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+private:
+ MPEG4GenericRTPSource* fOurSource;
+};
+
+class MPEG4GenericBufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+
+////////// AUHeader //////////
+struct AUHeader {
+ unsigned size;
+ unsigned index; // indexDelta for the 2nd & subsequent headers
+};
+
+
+///////// MPEG4GenericRTPSource implementation ////////
+
+//##### NOTE: INCOMPLETE!!! Support more modes, and interleaving #####
+
+MPEG4GenericRTPSource*
+MPEG4GenericRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* mediumName,
+ char const* mode,
+ unsigned sizeLength, unsigned indexLength,
+ unsigned indexDeltaLength
+ ) {
+ return new MPEG4GenericRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency, mediumName,
+ mode, sizeLength, indexLength,
+ indexDeltaLength
+ );
+}
+
+MPEG4GenericRTPSource
+::MPEG4GenericRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* mediumName,
+ char const* mode,
+ unsigned sizeLength, unsigned indexLength,
+ unsigned indexDeltaLength
+ )
+ : MultiFramedRTPSource(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency,
+ new MPEG4GenericBufferedPacketFactory),
+ fSizeLength(sizeLength), fIndexLength(indexLength),
+ fIndexDeltaLength(indexDeltaLength),
+ fNumAUHeaders(0), fNextAUHeader(0), fAUHeaders(NULL) {
+ unsigned mimeTypeLength =
+ strlen(mediumName) + 14 /* strlen("/MPEG4-GENERIC") */ + 1;
+ fMIMEType = new char[mimeTypeLength];
+ if (fMIMEType != NULL) {
+ sprintf(fMIMEType, "%s/MPEG4-GENERIC", mediumName);
+ }
+
+ fMode = strDup(mode);
+ // Check for a "mode" that we don't yet support: //#####
+ if (mode == NULL ||
+ (strcmp(mode, "aac-hbr") != 0 && strcmp(mode, "generic") != 0)) {
+ envir() << "MPEG4GenericRTPSource Warning: Unknown or unsupported \"mode\": "
+ << mode << "\n";
+ }
+}
+
+MPEG4GenericRTPSource::~MPEG4GenericRTPSource() {
+ delete[] fAUHeaders;
+ delete[] fMode;
+ delete[] fMIMEType;
+}
+
+Boolean MPEG4GenericRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame;
+ // whether the *previous* packet ended a frame
+
+ // The RTP "M" (marker) bit indicates the last fragment of a frame:
+ fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
+
+ // default values:
+ resultSpecialHeaderSize = 0;
+ fNumAUHeaders = 0;
+ fNextAUHeader = 0;
+ delete[] fAUHeaders; fAUHeaders = NULL;
+
+ if (fSizeLength > 0) {
+ // The packet begins with a "AU Header Section". Parse it, to
+ // determine the "AU-header"s for each frame present in this packet:
+ resultSpecialHeaderSize += 2;
+ if (packetSize < resultSpecialHeaderSize) return False;
+
+ unsigned AU_headers_length = (headerStart[0]<<8)|headerStart[1];
+ unsigned AU_headers_length_bytes = (AU_headers_length+7)/8;
+ if (packetSize
+ < resultSpecialHeaderSize + AU_headers_length_bytes) return False;
+ resultSpecialHeaderSize += AU_headers_length_bytes;
+
+ // Figure out how many AU-headers are present in the packet:
+ int bitsAvail = AU_headers_length - (fSizeLength + fIndexLength);
+ if (bitsAvail >= 0 && (fSizeLength + fIndexDeltaLength) > 0) {
+ fNumAUHeaders = 1 + bitsAvail/(fSizeLength + fIndexDeltaLength);
+ }
+ if (fNumAUHeaders > 0) {
+ fAUHeaders = new AUHeader[fNumAUHeaders];
+ // Fill in each header:
+ BitVector bv(&headerStart[2], 0, AU_headers_length);
+ fAUHeaders[0].size = bv.getBits(fSizeLength);
+ fAUHeaders[0].index = bv.getBits(fIndexLength);
+
+ for (unsigned i = 1; i < fNumAUHeaders; ++i) {
+ fAUHeaders[i].size = bv.getBits(fSizeLength);
+ fAUHeaders[i].index = bv.getBits(fIndexDeltaLength);
+ }
+ }
+
+ }
+
+ return True;
+}
+
+char const* MPEG4GenericRTPSource::MIMEtype() const {
+ return fMIMEType;
+}
+
+
+////////// MPEG4GenericBufferedPacket
+////////// and MPEG4GenericBufferedPacketFactory implementation
+
+MPEG4GenericBufferedPacket
+::MPEG4GenericBufferedPacket(MPEG4GenericRTPSource* ourSource)
+ : fOurSource(ourSource) {
+}
+
+MPEG4GenericBufferedPacket::~MPEG4GenericBufferedPacket() {
+}
+
+unsigned MPEG4GenericBufferedPacket
+::nextEnclosedFrameSize(unsigned char*& /*framePtr*/, unsigned dataSize) {
+ // WE CURRENTLY DON'T IMPLEMENT INTERLEAVING. FIX THIS! #####
+ AUHeader* auHeader = fOurSource->fAUHeaders;
+ if (auHeader == NULL) return dataSize;
+ unsigned numAUHeaders = fOurSource->fNumAUHeaders;
+
+ if (fOurSource->fNextAUHeader >= numAUHeaders) {
+ fOurSource->envir() << "MPEG4GenericBufferedPacket::nextEnclosedFrameSize("
+ << dataSize << "): data error ("
+ << auHeader << "," << fOurSource->fNextAUHeader
+ << "," << numAUHeaders << ")!\n";
+ return dataSize;
+ }
+
+ auHeader = &auHeader[fOurSource->fNextAUHeader++];
+ return auHeader->size <= dataSize ? auHeader->size : dataSize;
+}
+
+BufferedPacket* MPEG4GenericBufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* ourSource) {
+ return new MPEG4GenericBufferedPacket((MPEG4GenericRTPSource*)ourSource);
+}
+
+
+////////// samplingFrequencyFromAudioSpecificConfig() implementation //////////
+
+static unsigned const samplingFrequencyFromIndex[16] = {
+ 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050,
+ 16000, 12000, 11025, 8000, 7350, 0, 0, 0
+};
+
+unsigned samplingFrequencyFromAudioSpecificConfig(char const* configStr) {
+ unsigned char* config = NULL;
+ unsigned result = 0; // if returned, indicates an error
+
+ do {
+ // Begin by parsing the config string:
+ unsigned configSize;
+ config = parseGeneralConfigStr(configStr, configSize);
+ if (config == NULL) break;
+
+ if (configSize < 2) break;
+ unsigned char samplingFrequencyIndex = ((config[0]&0x07)<<1) | (config[1]>>7);
+ if (samplingFrequencyIndex < 15) {
+ result = samplingFrequencyFromIndex[samplingFrequencyIndex];
+ break;
+ }
+
+ // Index == 15 means that the actual frequency is next (24 bits):
+ if (configSize < 5) break;
+ result = ((config[1]&0x7F)<<17) | (config[2]<<9) | (config[3]<<1) | (config[4]>>7);
+ } while (0);
+
+ delete[] config;
+ return result;
+}
diff --git a/liveMedia/MPEG4LATMAudioRTPSink.cpp b/liveMedia/MPEG4LATMAudioRTPSink.cpp
new file mode 100644
index 0000000..f033dd3
--- /dev/null
+++ b/liveMedia/MPEG4LATMAudioRTPSink.cpp
@@ -0,0 +1,95 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for MPEG-4 audio, using LATM multiplexing (RFC 3016)
+// Implementation
+
+#include "MPEG4LATMAudioRTPSink.hh"
+
+MPEG4LATMAudioRTPSink
+::MPEG4LATMAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency,
+ char const* streamMuxConfigString,
+ unsigned numChannels,
+ Boolean allowMultipleFramesPerPacket)
+ : AudioRTPSink(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency, "MP4A-LATM", numChannels),
+ fStreamMuxConfigString(strDup(streamMuxConfigString)),
+ fAllowMultipleFramesPerPacket(allowMultipleFramesPerPacket) {
+ // Set up the "a=fmtp:" SDP line for this stream:
+ char const* fmtpFmt =
+ "a=fmtp:%d "
+ "cpresent=0;config=%s\r\n";
+ unsigned fmtpFmtSize = strlen(fmtpFmt)
+ + 3 /* max char len */
+ + strlen(fStreamMuxConfigString);
+ char* fmtp = new char[fmtpFmtSize];
+ sprintf(fmtp, fmtpFmt,
+ rtpPayloadType(),
+ fStreamMuxConfigString);
+ fFmtpSDPLine = strDup(fmtp);
+ delete[] fmtp;
+}
+
+MPEG4LATMAudioRTPSink::~MPEG4LATMAudioRTPSink() {
+ delete[] fFmtpSDPLine;
+ delete[] (char*)fStreamMuxConfigString;
+}
+
+MPEG4LATMAudioRTPSink*
+MPEG4LATMAudioRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency,
+ char const* streamMuxConfigString,
+ unsigned numChannels,
+ Boolean allowMultipleFramesPerPacket) {
+ return new MPEG4LATMAudioRTPSink(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency, streamMuxConfigString,
+ numChannels,
+ allowMultipleFramesPerPacket);
+}
+
+Boolean MPEG4LATMAudioRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ return fAllowMultipleFramesPerPacket;
+}
+
+void MPEG4LATMAudioRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the RTP 'M' ('marker') bit:
+ setMarkerBit();
+ }
+
+ // Important: Also call our base class's doSpecialFrameHandling(),
+ // to set the packet's timestamp:
+ MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
+ frameStart, numBytesInFrame,
+ framePresentationTime,
+ numRemainingBytes);
+}
+
+char const* MPEG4LATMAudioRTPSink::auxSDPLine() {
+ return fFmtpSDPLine;
+}
diff --git a/liveMedia/MPEG4LATMAudioRTPSource.cpp b/liveMedia/MPEG4LATMAudioRTPSource.cpp
new file mode 100644
index 0000000..d0b6c60
--- /dev/null
+++ b/liveMedia/MPEG4LATMAudioRTPSource.cpp
@@ -0,0 +1,264 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MPEG-4 audio, using LATM multiplexing
+// Implementation
+
+#include "MPEG4LATMAudioRTPSource.hh"
+
+////////// LATMBufferedPacket and LATMBufferedPacketFactory //////////
+
+class LATMBufferedPacket: public BufferedPacket {
+public:
+ LATMBufferedPacket(Boolean includeLATMDataLengthField);
+ virtual ~LATMBufferedPacket();
+
+private: // redefined virtual functions
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+
+private:
+ Boolean fIncludeLATMDataLengthField;
+};
+
+class LATMBufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+///////// MPEG4LATMAudioRTPSource implementation ////////
+
+MPEG4LATMAudioRTPSource*
+MPEG4LATMAudioRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new MPEG4LATMAudioRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+MPEG4LATMAudioRTPSource
+::MPEG4LATMAudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency,
+ new LATMBufferedPacketFactory),
+ fIncludeLATMDataLengthField(True) {
+}
+
+MPEG4LATMAudioRTPSource::~MPEG4LATMAudioRTPSource() {
+}
+
+void MPEG4LATMAudioRTPSource::omitLATMDataLengthField() {
+ fIncludeLATMDataLengthField = False;
+}
+
+Boolean MPEG4LATMAudioRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame;
+ // whether the *previous* packet ended a frame
+
+ // The RTP "M" (marker) bit indicates the last fragment of a frame:
+ fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
+
+ // There is no special header
+ resultSpecialHeaderSize = 0;
+ return True;
+}
+
+char const* MPEG4LATMAudioRTPSource::MIMEtype() const {
+ return "audio/MP4A-LATM";
+}
+
+
+////////// LATMBufferedPacket and LATMBufferedPacketFactory implementation
+
+LATMBufferedPacket::LATMBufferedPacket(Boolean includeLATMDataLengthField)
+ : fIncludeLATMDataLengthField(includeLATMDataLengthField) {
+}
+
+LATMBufferedPacket::~LATMBufferedPacket() {
+}
+
+unsigned LATMBufferedPacket
+::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
+ // Look at the LATM data length byte(s), to determine the size
+ // of the LATM payload.
+ unsigned resultFrameSize = 0;
+ unsigned i;
+ for (i = 0; i < dataSize; ++i) {
+ resultFrameSize += framePtr[i];
+ if (framePtr[i] != 0xFF) break;
+ }
+ ++i;
+ if (fIncludeLATMDataLengthField) {
+ resultFrameSize += i;
+ } else {
+ framePtr += i;
+ dataSize -= i;
+ }
+
+ return (resultFrameSize <= dataSize) ? resultFrameSize : dataSize;
+}
+
+BufferedPacket* LATMBufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* ourSource) {
+ MPEG4LATMAudioRTPSource* source = (MPEG4LATMAudioRTPSource*)ourSource;
+ return new LATMBufferedPacket(source->returnedFrameIncludesLATMDataLengthField());
+}
+
+
+////////// parseStreamMuxConfigStr() implementation //////////
+
+static Boolean getNibble(char const*& configStr,
+ unsigned char& resultNibble) {
+ char c = configStr[0];
+ if (c == '\0') return False; // we've reached the end
+
+ if (c >= '0' && c <= '9') {
+ resultNibble = c - '0';
+ } else if (c >= 'A' && c <= 'F') {
+ resultNibble = 10 + c - 'A';
+ } else if (c >= 'a' && c <= 'f') {
+ resultNibble = 10 + c - 'a';
+ } else {
+ return False;
+ }
+
+ ++configStr; // move to the next nibble
+ return True;
+}
+
+static Boolean getByte(char const*& configStr, unsigned char& resultByte) {
+ resultByte = 0; // by default, in case parsing fails
+
+ unsigned char firstNibble;
+ if (!getNibble(configStr, firstNibble)) return False;
+ resultByte = firstNibble<<4;
+
+ unsigned char secondNibble = 0;
+ if (!getNibble(configStr, secondNibble) && configStr[0] != '\0') {
+ // There's a second nibble, but it's malformed
+ return False;
+ }
+ resultByte |= secondNibble;
+
+ return True;
+}
+
+Boolean
+parseStreamMuxConfigStr(char const* configStr,
+ // result parameters:
+ Boolean& audioMuxVersion,
+ Boolean& allStreamsSameTimeFraming,
+ unsigned char& numSubFrames,
+ unsigned char& numProgram,
+ unsigned char& numLayer,
+ unsigned char*& audioSpecificConfig,
+ unsigned& audioSpecificConfigSize) {
+ // Set default versions of the result parameters:
+ audioMuxVersion = False;
+ allStreamsSameTimeFraming = True;
+ numSubFrames = numProgram = numLayer = 0;
+ audioSpecificConfig = NULL;
+ audioSpecificConfigSize = 0;
+
+ do {
+ if (configStr == NULL) break;
+
+ unsigned char nextByte;
+
+ if (!getByte(configStr, nextByte)) break;
+ audioMuxVersion = (nextByte&0x80) != 0;
+ if (audioMuxVersion) break;
+
+ allStreamsSameTimeFraming = ((nextByte&0x40)>>6) != 0;
+ numSubFrames = (nextByte&0x3F);
+
+ if (!getByte(configStr, nextByte)) break;
+ numProgram = (nextByte&0xF0)>>4;
+
+ numLayer = (nextByte&0x0E)>>1;
+
+ // The one remaining bit, and the rest of the string,
+ // are used for "audioSpecificConfig":
+ unsigned char remainingBit = nextByte&1;
+
+ unsigned ascSize = (strlen(configStr)+1)/2 + 1;
+ audioSpecificConfig = new unsigned char[ascSize];
+
+ Boolean parseSuccess;
+ unsigned i = 0;
+ do {
+ nextByte = 0;
+ parseSuccess = getByte(configStr, nextByte);
+ audioSpecificConfig[i++] = (remainingBit<<7)|((nextByte&0xFE)>>1);
+ remainingBit = nextByte&1;
+ } while (parseSuccess);
+ if (i != ascSize) break; // part of the remaining string was bad
+
+ audioSpecificConfigSize = ascSize;
+ return True; // parsing succeeded
+ } while (0);
+
+ delete[] audioSpecificConfig;
+ return False; // parsing failed
+}
+
+unsigned char* parseStreamMuxConfigStr(char const* configStr,
+ // result parameter:
+ unsigned& audioSpecificConfigSize) {
+ Boolean audioMuxVersion, allStreamsSameTimeFraming;
+ unsigned char numSubFrames, numProgram, numLayer;
+ unsigned char* audioSpecificConfig;
+
+ if (!parseStreamMuxConfigStr(configStr,
+ audioMuxVersion, allStreamsSameTimeFraming,
+ numSubFrames, numProgram, numLayer,
+ audioSpecificConfig, audioSpecificConfigSize)) {
+ audioSpecificConfigSize = 0;
+ return NULL;
+ }
+
+ return audioSpecificConfig;
+}
+
+unsigned char* parseGeneralConfigStr(char const* configStr,
+ // result parameter:
+ unsigned& configSize) {
+ unsigned char* config = NULL;
+ do {
+ if (configStr == NULL) break;
+ configSize = (strlen(configStr)+1)/2;
+
+ config = new unsigned char[configSize];
+ if (config == NULL) break;
+
+ unsigned i;
+ for (i = 0; i < configSize; ++i) {
+ if (!getByte(configStr, config[i])) break;
+ }
+ if (i != configSize) break; // part of the string was bad
+
+ return config;
+ } while (0);
+
+ configSize = 0;
+ delete[] config;
+ return NULL;
+}
diff --git a/liveMedia/MPEG4VideoFileServerMediaSubsession.cpp b/liveMedia/MPEG4VideoFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..d02f732
--- /dev/null
+++ b/liveMedia/MPEG4VideoFileServerMediaSubsession.cpp
@@ -0,0 +1,126 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a MPEG-4 video file.
+// Implementation
+
+#include "MPEG4VideoFileServerMediaSubsession.hh"
+#include "MPEG4ESVideoRTPSink.hh"
+#include "ByteStreamFileSource.hh"
+#include "MPEG4VideoStreamFramer.hh"
+
+MPEG4VideoFileServerMediaSubsession*
+MPEG4VideoFileServerMediaSubsession::createNew(UsageEnvironment& env,
+ char const* fileName,
+ Boolean reuseFirstSource) {
+ return new MPEG4VideoFileServerMediaSubsession(env, fileName, reuseFirstSource);
+}
+
+MPEG4VideoFileServerMediaSubsession
+::MPEG4VideoFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource),
+ fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) {
+}
+
+MPEG4VideoFileServerMediaSubsession::~MPEG4VideoFileServerMediaSubsession() {
+ delete[] fAuxSDPLine;
+}
+
+static void afterPlayingDummy(void* clientData) {
+ MPEG4VideoFileServerMediaSubsession* subsess
+ = (MPEG4VideoFileServerMediaSubsession*)clientData;
+ subsess->afterPlayingDummy1();
+}
+
+void MPEG4VideoFileServerMediaSubsession::afterPlayingDummy1() {
+ // Unschedule any pending 'checking' task:
+ envir().taskScheduler().unscheduleDelayedTask(nextTask());
+ // Signal the event loop that we're done:
+ setDoneFlag();
+}
+
+static void checkForAuxSDPLine(void* clientData) {
+ MPEG4VideoFileServerMediaSubsession* subsess
+ = (MPEG4VideoFileServerMediaSubsession*)clientData;
+ subsess->checkForAuxSDPLine1();
+}
+
+void MPEG4VideoFileServerMediaSubsession::checkForAuxSDPLine1() {
+ nextTask() = NULL;
+
+ char const* dasl;
+ if (fAuxSDPLine != NULL) {
+ // Signal the event loop that we're done:
+ setDoneFlag();
+ } else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) {
+ fAuxSDPLine= strDup(dasl);
+ fDummyRTPSink = NULL;
+
+ // Signal the event loop that we're done:
+ setDoneFlag();
+ } else if (!fDoneFlag) {
+ // try again after a brief delay:
+ int uSecsToDelay = 100000; // 100 ms
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay,
+ (TaskFunc*)checkForAuxSDPLine, this);
+ }
+}
+
+char const* MPEG4VideoFileServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) {
+ if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client)
+
+ if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream
+ // Note: For MPEG-4 video files, the 'config' information isn't known
+ // until we start reading the file. This means that "rtpSink"s
+ // "auxSDPLine()" will be NULL initially, and we need to start reading data from our file until this changes.
+ fDummyRTPSink = rtpSink;
+
+ // Start reading the file:
+ fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this);
+
+ // Check whether the sink's 'auxSDPLine()' is ready:
+ checkForAuxSDPLine(this);
+ }
+
+ envir().taskScheduler().doEventLoop(&fDoneFlag);
+
+ return fAuxSDPLine;
+}
+
+FramedSource* MPEG4VideoFileServerMediaSubsession
+::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
+ estBitrate = 500; // kbps, estimate
+
+ // Create the video source:
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(envir(), fFileName);
+ if (fileSource == NULL) return NULL;
+ fFileSize = fileSource->fileSize();
+
+ // Create a framer for the Video Elementary Stream:
+ return MPEG4VideoStreamFramer::createNew(envir(), fileSource);
+}
+
+RTPSink* MPEG4VideoFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* /*inputSource*/) {
+ return MPEG4ESVideoRTPSink::createNew(envir(), rtpGroupsock,
+ rtpPayloadTypeIfDynamic);
+}
diff --git a/liveMedia/MPEG4VideoStreamDiscreteFramer.cpp b/liveMedia/MPEG4VideoStreamDiscreteFramer.cpp
new file mode 100644
index 0000000..3988f44
--- /dev/null
+++ b/liveMedia/MPEG4VideoStreamDiscreteFramer.cpp
@@ -0,0 +1,252 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simplified version of "MPEG4VideoStreamFramer" that takes only complete,
+// discrete frames (rather than an arbitrary byte stream) as input.
+// This avoids the parsing and data copying overhead of the full
+// "MPEG4VideoStreamFramer".
+// Implementation
+
+#include "MPEG4VideoStreamDiscreteFramer.hh"
+
+MPEG4VideoStreamDiscreteFramer*
+MPEG4VideoStreamDiscreteFramer::createNew(UsageEnvironment& env,
+ FramedSource* inputSource, Boolean leavePresentationTimesUnmodified) {
+ // Need to add source type checking here??? #####
+ return new MPEG4VideoStreamDiscreteFramer(env, inputSource, leavePresentationTimesUnmodified);
+}
+
+MPEG4VideoStreamDiscreteFramer
+::MPEG4VideoStreamDiscreteFramer(UsageEnvironment& env,
+ FramedSource* inputSource, Boolean leavePresentationTimesUnmodified)
+ : MPEG4VideoStreamFramer(env, inputSource, False/*don't create a parser*/),
+ fLeavePresentationTimesUnmodified(leavePresentationTimesUnmodified), vop_time_increment_resolution(0), fNumVTIRBits(0),
+ fLastNonBFrameVop_time_increment(0) {
+ fLastNonBFramePresentationTime.tv_sec = 0;
+ fLastNonBFramePresentationTime.tv_usec = 0;
+}
+
+MPEG4VideoStreamDiscreteFramer::~MPEG4VideoStreamDiscreteFramer() {
+}
+
+void MPEG4VideoStreamDiscreteFramer::doGetNextFrame() {
+ // Arrange to read data (which should be a complete MPEG-4 video frame)
+ // from our data source, directly into the client's input buffer.
+ // After reading this, we'll do some parsing on the frame.
+ fInputSource->getNextFrame(fTo, fMaxSize,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+}
+
+void MPEG4VideoStreamDiscreteFramer
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ MPEG4VideoStreamDiscreteFramer* source = (MPEG4VideoStreamDiscreteFramer*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+void MPEG4VideoStreamDiscreteFramer
+::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // Check that the first 4 bytes are a system code:
+ if (frameSize >= 4 && fTo[0] == 0 && fTo[1] == 0 && fTo[2] == 1) {
+ fPictureEndMarker = True; // Assume that we have a complete 'picture' here
+ unsigned i = 3;
+ if (fTo[i] == 0xB0) { // VISUAL_OBJECT_SEQUENCE_START_CODE
+ // The next byte is the "profile_and_level_indication":
+ if (frameSize >= 5) fProfileAndLevelIndication = fTo[4];
+
+ // The start of this frame - up to the first GROUP_VOP_START_CODE
+ // or VOP_START_CODE - is stream configuration information. Save this:
+ for (i = 7; i < frameSize; ++i) {
+ if ((fTo[i] == 0xB3 /*GROUP_VOP_START_CODE*/ ||
+ fTo[i] == 0xB6 /*VOP_START_CODE*/)
+ && fTo[i-1] == 1 && fTo[i-2] == 0 && fTo[i-3] == 0) {
+ break; // The configuration information ends here
+ }
+ }
+ fNumConfigBytes = i < frameSize ? i-3 : frameSize;
+ delete[] fConfigBytes; fConfigBytes = new unsigned char[fNumConfigBytes];
+ for (unsigned j = 0; j < fNumConfigBytes; ++j) fConfigBytes[j] = fTo[j];
+
+ // This information (should) also contain a VOL header, which we need
+ // to analyze, to get "vop_time_increment_resolution" (which we need
+ // - along with "vop_time_increment" - in order to generate accurate
+ // presentation times for "B" frames).
+ analyzeVOLHeader();
+ }
+
+ if (i < frameSize) {
+ u_int8_t nextCode = fTo[i];
+
+ if (nextCode == 0xB3 /*GROUP_VOP_START_CODE*/) {
+ // Skip to the following VOP_START_CODE (if any):
+ for (i += 4; i < frameSize; ++i) {
+ if (fTo[i] == 0xB6 /*VOP_START_CODE*/
+ && fTo[i-1] == 1 && fTo[i-2] == 0 && fTo[i-3] == 0) {
+ nextCode = fTo[i];
+ break;
+ }
+ }
+ }
+
+ if (nextCode == 0xB6 /*VOP_START_CODE*/ && i+5 < frameSize) {
+ ++i;
+
+ // Get the "vop_coding_type" from the next byte:
+ u_int8_t nextByte = fTo[i++];
+ u_int8_t vop_coding_type = nextByte>>6;
+
+ // Next, get the "modulo_time_base" by counting the '1' bits that
+ // follow. We look at the next 32-bits only.
+ // This should be enough in most cases.
+ u_int32_t next4Bytes
+ = (fTo[i]<<24)|(fTo[i+1]<<16)|(fTo[i+2]<<8)|fTo[i+3];
+ i += 4;
+ u_int32_t timeInfo = (nextByte<<(32-6))|(next4Bytes>>6);
+ unsigned modulo_time_base = 0;
+ u_int32_t mask = 0x80000000;
+ while ((timeInfo&mask) != 0) {
+ ++modulo_time_base;
+ mask >>= 1;
+ }
+ mask >>= 2;
+
+ // Then, get the "vop_time_increment".
+ unsigned vop_time_increment = 0;
+ // First, make sure we have enough bits left for this:
+ if ((mask>>(fNumVTIRBits-1)) != 0) {
+ for (unsigned i = 0; i < fNumVTIRBits; ++i) {
+ vop_time_increment |= timeInfo&mask;
+ mask >>= 1;
+ }
+ while (mask != 0) {
+ vop_time_increment >>= 1;
+ mask >>= 1;
+ }
+ }
+
+ // If this is a "B" frame, then we have to tweak "presentationTime":
+ if (!fLeavePresentationTimesUnmodified && vop_coding_type == 2/*B*/
+ && (fLastNonBFramePresentationTime.tv_usec > 0 ||
+ fLastNonBFramePresentationTime.tv_sec > 0)) {
+ int timeIncrement
+ = fLastNonBFrameVop_time_increment - vop_time_increment;
+ if (timeIncrement<0) timeIncrement += vop_time_increment_resolution;
+ unsigned const MILLION = 1000000;
+ double usIncrement = vop_time_increment_resolution == 0 ? 0.0
+ : ((double)timeIncrement*MILLION)/vop_time_increment_resolution;
+ unsigned secondsToSubtract = (unsigned)(usIncrement/MILLION);
+ unsigned uSecondsToSubtract = ((unsigned)usIncrement)%MILLION;
+
+ presentationTime = fLastNonBFramePresentationTime;
+ if ((unsigned)presentationTime.tv_usec < uSecondsToSubtract) {
+ presentationTime.tv_usec += MILLION;
+ if (presentationTime.tv_sec > 0) --presentationTime.tv_sec;
+ }
+ presentationTime.tv_usec -= uSecondsToSubtract;
+ if ((unsigned)presentationTime.tv_sec > secondsToSubtract) {
+ presentationTime.tv_sec -= secondsToSubtract;
+ } else {
+ presentationTime.tv_sec = presentationTime.tv_usec = 0;
+ }
+ } else {
+ fLastNonBFramePresentationTime = presentationTime;
+ fLastNonBFrameVop_time_increment = vop_time_increment;
+ }
+ }
+ }
+ }
+
+ // Complete delivery to the client:
+ fFrameSize = frameSize;
+ fNumTruncatedBytes = numTruncatedBytes;
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+ afterGetting(this);
+}
+
+Boolean MPEG4VideoStreamDiscreteFramer::getNextFrameBit(u_int8_t& result) {
+ if (fNumBitsSeenSoFar/8 >= fNumConfigBytes) return False;
+
+ u_int8_t nextByte = fConfigBytes[fNumBitsSeenSoFar/8];
+ result = (nextByte>>(7-fNumBitsSeenSoFar%8))&1;
+ ++fNumBitsSeenSoFar;
+ return True;
+}
+
+Boolean MPEG4VideoStreamDiscreteFramer::getNextFrameBits(unsigned numBits,
+ u_int32_t& result) {
+ result = 0;
+ for (unsigned i = 0; i < numBits; ++i) {
+ u_int8_t nextBit;
+ if (!getNextFrameBit(nextBit)) return False;
+ result = (result<<1)|nextBit;
+ }
+ return True;
+}
+
+void MPEG4VideoStreamDiscreteFramer::analyzeVOLHeader() {
+ // Begin by moving to the VOL header:
+ unsigned i;
+ for (i = 3; i < fNumConfigBytes; ++i) {
+ if (fConfigBytes[i] >= 0x20 && fConfigBytes[i] <= 0x2F
+ && fConfigBytes[i-1] == 1
+ && fConfigBytes[i-2] == 0 && fConfigBytes[i-3] == 0) {
+ ++i;
+ break;
+ }
+ }
+
+ fNumBitsSeenSoFar = 8*i + 9;
+ do {
+ u_int8_t is_object_layer_identifier;
+ if (!getNextFrameBit(is_object_layer_identifier)) break;
+ if (is_object_layer_identifier) fNumBitsSeenSoFar += 7;
+
+ u_int32_t aspect_ratio_info;
+ if (!getNextFrameBits(4, aspect_ratio_info)) break;
+ if (aspect_ratio_info == 15 /*extended_PAR*/) fNumBitsSeenSoFar += 16;
+
+ u_int8_t vol_control_parameters;
+ if (!getNextFrameBit(vol_control_parameters)) break;
+ if (vol_control_parameters) {
+ fNumBitsSeenSoFar += 3; // chroma_format; low_delay
+ u_int8_t vbw_parameters;
+ if (!getNextFrameBit(vbw_parameters)) break;
+ if (vbw_parameters) fNumBitsSeenSoFar += 79;
+ }
+
+ fNumBitsSeenSoFar += 2; // video_object_layer_shape
+ u_int8_t marker_bit;
+ if (!getNextFrameBit(marker_bit)) break;
+ if (marker_bit != 1) break; // sanity check
+
+ if (!getNextFrameBits(16, vop_time_increment_resolution)) break;
+ if (vop_time_increment_resolution == 0) break; // shouldn't happen
+
+ // Compute how many bits are necessary to represent this:
+ fNumVTIRBits = 0;
+ for (unsigned test = vop_time_increment_resolution; test>0; test /= 2) {
+ ++fNumVTIRBits;
+ }
+ } while (0);
+}
diff --git a/liveMedia/MPEG4VideoStreamFramer.cpp b/liveMedia/MPEG4VideoStreamFramer.cpp
new file mode 100644
index 0000000..972c6a6
--- /dev/null
+++ b/liveMedia/MPEG4VideoStreamFramer.cpp
@@ -0,0 +1,681 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an MPEG-4 video elementary stream into
+// frames for:
+// - Visual Object Sequence (VS) Header + Visual Object (VO) Header
+// + Video Object Layer (VOL) Header
+// - Group of VOP (GOV) Header
+// - VOP frame
+// Implementation
+
+#include "MPEG4VideoStreamFramer.hh"
+#include "MPEGVideoStreamParser.hh"
+#include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()"
+#include <string.h>
+
+////////// MPEG4VideoStreamParser definition //////////
+
+// An enum representing the current state of the parser:
+enum MPEGParseState {
+ PARSING_VISUAL_OBJECT_SEQUENCE,
+ PARSING_VISUAL_OBJECT_SEQUENCE_SEEN_CODE,
+ PARSING_VISUAL_OBJECT,
+ PARSING_VIDEO_OBJECT_LAYER,
+ PARSING_GROUP_OF_VIDEO_OBJECT_PLANE,
+ PARSING_VIDEO_OBJECT_PLANE,
+ PARSING_VISUAL_OBJECT_SEQUENCE_END_CODE
+};
+
+class MPEG4VideoStreamParser: public MPEGVideoStreamParser {
+public:
+ MPEG4VideoStreamParser(MPEG4VideoStreamFramer* usingSource,
+ FramedSource* inputSource);
+ virtual ~MPEG4VideoStreamParser();
+
+private: // redefined virtual functions:
+ virtual void flushInput();
+ virtual unsigned parse();
+
+private:
+ MPEG4VideoStreamFramer* usingSource() {
+ return (MPEG4VideoStreamFramer*)fUsingSource;
+ }
+ void setParseState(MPEGParseState parseState);
+
+ unsigned parseVisualObjectSequence(Boolean haveSeenStartCode = False);
+ unsigned parseVisualObject();
+ unsigned parseVideoObjectLayer();
+ unsigned parseGroupOfVideoObjectPlane();
+ unsigned parseVideoObjectPlane();
+ unsigned parseVisualObjectSequenceEndCode();
+
+ // These are used for parsing within an already-read frame:
+ Boolean getNextFrameBit(u_int8_t& result);
+ Boolean getNextFrameBits(unsigned numBits, u_int32_t& result);
+
+ // Which are used by:
+ void analyzeVOLHeader();
+
+private:
+ MPEGParseState fCurrentParseState;
+ unsigned fNumBitsSeenSoFar; // used by the getNextFrameBit*() routines
+ u_int32_t vop_time_increment_resolution;
+ unsigned fNumVTIRBits;
+ // # of bits needed to count to "vop_time_increment_resolution"
+ u_int8_t fixed_vop_rate;
+ unsigned fixed_vop_time_increment; // used if 'fixed_vop_rate' is set
+ unsigned fSecondsSinceLastTimeCode, fTotalTicksSinceLastTimeCode, fPrevNewTotalTicks;
+ unsigned fPrevPictureCountDelta;
+ Boolean fJustSawTimeCode;
+};
+
+
+////////// MPEG4VideoStreamFramer implementation //////////
+
+MPEG4VideoStreamFramer*
+MPEG4VideoStreamFramer::createNew(UsageEnvironment& env,
+ FramedSource* inputSource) {
+ // Need to add source type checking here??? #####
+ return new MPEG4VideoStreamFramer(env, inputSource);
+}
+
+unsigned char* MPEG4VideoStreamFramer
+::getConfigBytes(unsigned& numBytes) const {
+ numBytes = fNumConfigBytes;
+ return fConfigBytes;
+}
+
+void MPEG4VideoStreamFramer
+::setConfigInfo(u_int8_t profileAndLevelIndication, char const* configStr) {
+ fProfileAndLevelIndication = profileAndLevelIndication;
+
+ delete[] fConfigBytes;
+ fConfigBytes = parseGeneralConfigStr(configStr, fNumConfigBytes);
+}
+
+MPEG4VideoStreamFramer::MPEG4VideoStreamFramer(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean createParser)
+ : MPEGVideoStreamFramer(env, inputSource),
+ fProfileAndLevelIndication(0),
+ fConfigBytes(NULL), fNumConfigBytes(0),
+ fNewConfigBytes(NULL), fNumNewConfigBytes(0) {
+ fParser = createParser
+ ? new MPEG4VideoStreamParser(this, inputSource)
+ : NULL;
+}
+
+MPEG4VideoStreamFramer::~MPEG4VideoStreamFramer() {
+ delete[] fConfigBytes; delete[] fNewConfigBytes;
+}
+
+void MPEG4VideoStreamFramer::startNewConfig() {
+ delete[] fNewConfigBytes; fNewConfigBytes = NULL;
+ fNumNewConfigBytes = 0;
+}
+
+void MPEG4VideoStreamFramer
+::appendToNewConfig(unsigned char* newConfigBytes, unsigned numNewBytes) {
+ // Allocate a new block of memory for the new config bytes:
+ unsigned char* configNew
+ = new unsigned char[fNumNewConfigBytes + numNewBytes];
+
+ // Copy the old, then the new, config bytes there:
+ memmove(configNew, fNewConfigBytes, fNumNewConfigBytes);
+ memmove(&configNew[fNumNewConfigBytes], newConfigBytes, numNewBytes);
+
+ delete[] fNewConfigBytes; fNewConfigBytes = configNew;
+ fNumNewConfigBytes += numNewBytes;
+}
+
+void MPEG4VideoStreamFramer::completeNewConfig() {
+ delete[] fConfigBytes; fConfigBytes = fNewConfigBytes;
+ fNewConfigBytes = NULL;
+ fNumConfigBytes = fNumNewConfigBytes;
+ fNumNewConfigBytes = 0;
+}
+
+Boolean MPEG4VideoStreamFramer::isMPEG4VideoStreamFramer() const {
+ return True;
+}
+
+////////// MPEG4VideoStreamParser implementation //////////
+
+MPEG4VideoStreamParser
+::MPEG4VideoStreamParser(MPEG4VideoStreamFramer* usingSource,
+ FramedSource* inputSource)
+ : MPEGVideoStreamParser(usingSource, inputSource),
+ fCurrentParseState(PARSING_VISUAL_OBJECT_SEQUENCE),
+ vop_time_increment_resolution(0), fNumVTIRBits(0),
+ fixed_vop_rate(0), fixed_vop_time_increment(0),
+ fSecondsSinceLastTimeCode(0), fTotalTicksSinceLastTimeCode(0),
+ fPrevNewTotalTicks(0), fPrevPictureCountDelta(1), fJustSawTimeCode(False) {
+}
+
+MPEG4VideoStreamParser::~MPEG4VideoStreamParser() {
+}
+
+void MPEG4VideoStreamParser::setParseState(MPEGParseState parseState) {
+ fCurrentParseState = parseState;
+ MPEGVideoStreamParser::setParseState();
+}
+
+void MPEG4VideoStreamParser::flushInput() {
+ fSecondsSinceLastTimeCode = 0;
+ fTotalTicksSinceLastTimeCode = 0;
+ fPrevNewTotalTicks = 0;
+ fPrevPictureCountDelta = 1;
+
+ StreamParser::flushInput();
+ if (fCurrentParseState != PARSING_VISUAL_OBJECT_SEQUENCE) {
+ setParseState(PARSING_VISUAL_OBJECT_SEQUENCE); // later, change to GOV or VOP? #####
+ }
+}
+
+
+unsigned MPEG4VideoStreamParser::parse() {
+ try {
+ switch (fCurrentParseState) {
+ case PARSING_VISUAL_OBJECT_SEQUENCE: {
+ return parseVisualObjectSequence();
+ }
+ case PARSING_VISUAL_OBJECT_SEQUENCE_SEEN_CODE: {
+ return parseVisualObjectSequence(True);
+ }
+ case PARSING_VISUAL_OBJECT: {
+ return parseVisualObject();
+ }
+ case PARSING_VIDEO_OBJECT_LAYER: {
+ return parseVideoObjectLayer();
+ }
+ case PARSING_GROUP_OF_VIDEO_OBJECT_PLANE: {
+ return parseGroupOfVideoObjectPlane();
+ }
+ case PARSING_VIDEO_OBJECT_PLANE: {
+ return parseVideoObjectPlane();
+ }
+ case PARSING_VISUAL_OBJECT_SEQUENCE_END_CODE: {
+ return parseVisualObjectSequenceEndCode();
+ }
+ default: {
+ return 0; // shouldn't happen
+ }
+ }
+ } catch (int /*e*/) {
+#ifdef DEBUG
+ fprintf(stderr, "MPEG4VideoStreamParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
+#endif
+ return 0; // the parsing got interrupted
+ }
+}
+
+#define VISUAL_OBJECT_SEQUENCE_START_CODE 0x000001B0
+#define VISUAL_OBJECT_SEQUENCE_END_CODE 0x000001B1
+#define GROUP_VOP_START_CODE 0x000001B3
+#define VISUAL_OBJECT_START_CODE 0x000001B5
+#define VOP_START_CODE 0x000001B6
+
+unsigned MPEG4VideoStreamParser
+::parseVisualObjectSequence(Boolean haveSeenStartCode) {
+#ifdef DEBUG
+ fprintf(stderr, "parsing VisualObjectSequence\n");
+#endif
+ usingSource()->startNewConfig();
+ u_int32_t first4Bytes;
+ if (!haveSeenStartCode) {
+ while ((first4Bytes = test4Bytes()) != VISUAL_OBJECT_SEQUENCE_START_CODE) {
+#ifdef DEBUG
+ fprintf(stderr, "ignoring non VS header: 0x%08x\n", first4Bytes);
+#endif
+ get1Byte(); setParseState(PARSING_VISUAL_OBJECT_SEQUENCE);
+ // ensures we progress over bad data
+ }
+ first4Bytes = get4Bytes();
+ } else {
+ // We've already seen the start code
+ first4Bytes = VISUAL_OBJECT_SEQUENCE_START_CODE;
+ }
+ save4Bytes(first4Bytes);
+
+ // The next byte is the "profile_and_level_indication":
+ u_int8_t pali = get1Byte();
+#ifdef DEBUG
+ fprintf(stderr, "profile_and_level_indication: %02x\n", pali);
+#endif
+ saveByte(pali);
+ usingSource()->fProfileAndLevelIndication = pali;
+
+ // Now, copy all bytes that we see, up until we reach
+ // a VISUAL_OBJECT_START_CODE:
+ u_int32_t next4Bytes = get4Bytes();
+ while (next4Bytes != VISUAL_OBJECT_START_CODE) {
+ saveToNextCode(next4Bytes);
+ }
+
+ setParseState(PARSING_VISUAL_OBJECT);
+
+ // Compute this frame's presentation time:
+ usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
+
+ // This header forms part of the 'configuration' information:
+ usingSource()->appendToNewConfig(fStartOfFrame, curFrameSize());
+
+ return curFrameSize();
+}
+
+static inline Boolean isVideoObjectStartCode(u_int32_t code) {
+ return code >= 0x00000100 && code <= 0x0000011F;
+}
+
+unsigned MPEG4VideoStreamParser::parseVisualObject() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing VisualObject\n");
+#endif
+ // Note that we've already read the VISUAL_OBJECT_START_CODE
+ save4Bytes(VISUAL_OBJECT_START_CODE);
+
+ // Next, extract the "visual_object_type" from the next 1 or 2 bytes:
+ u_int8_t nextByte = get1Byte(); saveByte(nextByte);
+ Boolean is_visual_object_identifier = (nextByte&0x80) != 0;
+ u_int8_t visual_object_type;
+ if (is_visual_object_identifier) {
+#ifdef DEBUG
+ fprintf(stderr, "visual_object_verid: 0x%x; visual_object_priority: 0x%x\n", (nextByte&0x78)>>3, (nextByte&0x07));
+#endif
+ nextByte = get1Byte(); saveByte(nextByte);
+ visual_object_type = (nextByte&0xF0)>>4;
+ } else {
+ visual_object_type = (nextByte&0x78)>>3;
+ }
+#ifdef DEBUG
+ fprintf(stderr, "visual_object_type: 0x%x\n", visual_object_type);
+#endif
+ // At present, we support only the "Video ID" "visual_object_type" (1)
+ if (visual_object_type != 1) {
+ usingSource()->envir() << "MPEG4VideoStreamParser::parseVisualObject(): Warning: We don't handle visual_object_type " << visual_object_type << "\n";
+ }
+
+ // Now, copy all bytes that we see, up until we reach
+ // a video_object_start_code
+ u_int32_t next4Bytes = get4Bytes();
+ while (!isVideoObjectStartCode(next4Bytes)) {
+ saveToNextCode(next4Bytes);
+ }
+ save4Bytes(next4Bytes);
+#ifdef DEBUG
+ fprintf(stderr, "saw a video_object_start_code: 0x%08x\n", next4Bytes);
+#endif
+
+ setParseState(PARSING_VIDEO_OBJECT_LAYER);
+
+ // Compute this frame's presentation time:
+ usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
+
+ // This header forms part of the 'configuration' information:
+ usingSource()->appendToNewConfig(fStartOfFrame, curFrameSize());
+
+ return curFrameSize();
+}
+
+static inline Boolean isVideoObjectLayerStartCode(u_int32_t code) {
+ return code >= 0x00000120 && code <= 0x0000012F;
+}
+
+Boolean MPEG4VideoStreamParser::getNextFrameBit(u_int8_t& result) {
+ if (fNumBitsSeenSoFar/8 >= curFrameSize()) return False;
+
+ u_int8_t nextByte = fStartOfFrame[fNumBitsSeenSoFar/8];
+ result = (nextByte>>(7-fNumBitsSeenSoFar%8))&1;
+ ++fNumBitsSeenSoFar;
+ return True;
+}
+
+Boolean MPEG4VideoStreamParser::getNextFrameBits(unsigned numBits,
+ u_int32_t& result) {
+ result = 0;
+ for (unsigned i = 0; i < numBits; ++i) {
+ u_int8_t nextBit;
+ if (!getNextFrameBit(nextBit)) return False;
+ result = (result<<1)|nextBit;
+ }
+ return True;
+}
+
+void MPEG4VideoStreamParser::analyzeVOLHeader() {
+ // Extract timing information (in particular,
+ // "vop_time_increment_resolution") from the VOL Header:
+ fNumBitsSeenSoFar = 41;
+ do {
+ u_int8_t is_object_layer_identifier;
+ if (!getNextFrameBit(is_object_layer_identifier)) break;
+ if (is_object_layer_identifier) fNumBitsSeenSoFar += 7;
+
+ u_int32_t aspect_ratio_info;
+ if (!getNextFrameBits(4, aspect_ratio_info)) break;
+ if (aspect_ratio_info == 15 /*extended_PAR*/) fNumBitsSeenSoFar += 16;
+
+ u_int8_t vol_control_parameters;
+ if (!getNextFrameBit(vol_control_parameters)) break;
+ if (vol_control_parameters) {
+ fNumBitsSeenSoFar += 3; // chroma_format; low_delay
+ u_int8_t vbw_parameters;
+ if (!getNextFrameBit(vbw_parameters)) break;
+ if (vbw_parameters) fNumBitsSeenSoFar += 79;
+ }
+
+ fNumBitsSeenSoFar += 2; // video_object_layer_shape
+ u_int8_t marker_bit;
+ if (!getNextFrameBit(marker_bit)) break;
+ if (marker_bit != 1) { // sanity check
+ usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): marker_bit 1 not set!\n";
+ break;
+ }
+
+ if (!getNextFrameBits(16, vop_time_increment_resolution)) break;
+#ifdef DEBUG
+ fprintf(stderr, "vop_time_increment_resolution: %d\n", vop_time_increment_resolution);
+#endif
+ if (vop_time_increment_resolution == 0) {
+ usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): vop_time_increment_resolution is zero!\n";
+ break;
+ }
+ // Compute how many bits are necessary to represent this:
+ fNumVTIRBits = 0;
+ for (unsigned test = vop_time_increment_resolution; test>0; test /= 2) {
+ ++fNumVTIRBits;
+ }
+
+ if (!getNextFrameBit(marker_bit)) break;
+ if (marker_bit != 1) { // sanity check
+ usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): marker_bit 2 not set!\n";
+ break;
+ }
+
+ if (!getNextFrameBit(fixed_vop_rate)) break;
+ if (fixed_vop_rate) {
+ // Get the following "fixed_vop_time_increment":
+ if (!getNextFrameBits(fNumVTIRBits, fixed_vop_time_increment)) break;
+#ifdef DEBUG
+ fprintf(stderr, "fixed_vop_time_increment: %d\n", fixed_vop_time_increment);
+ if (fixed_vop_time_increment == 0) {
+ usingSource()->envir() << "MPEG4VideoStreamParser::analyzeVOLHeader(): fixed_vop_time_increment is zero!\n";
+ }
+#endif
+ }
+ // Use "vop_time_increment_resolution" as the 'frame rate'
+ // (really, 'tick rate'):
+ usingSource()->fFrameRate = (double)vop_time_increment_resolution;
+#ifdef DEBUG
+ fprintf(stderr, "fixed_vop_rate: %d; 'frame' (really tick) rate: %f\n", fixed_vop_rate, usingSource()->fFrameRate);
+#endif
+
+ return;
+ } while (0);
+
+ if (fNumBitsSeenSoFar/8 >= curFrameSize()) {
+ char errMsg[200];
+ sprintf(errMsg, "Not enough bits in VOL header: %d/8 >= %d\n", fNumBitsSeenSoFar, curFrameSize());
+ usingSource()->envir() << errMsg;
+ }
+}
+
+unsigned MPEG4VideoStreamParser::parseVideoObjectLayer() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing VideoObjectLayer\n");
+#endif
+ // The first 4 bytes must be a "video_object_layer_start_code".
+ // If not, this is a 'short video header', which we currently
+ // don't support:
+ u_int32_t next4Bytes = get4Bytes();
+ if (!isVideoObjectLayerStartCode(next4Bytes)) {
+ usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectLayer(): This appears to be a 'short video header', which we currently don't support\n";
+ }
+
+ // Now, copy all bytes that we see, up until we reach
+ // a GROUP_VOP_START_CODE or a VOP_START_CODE:
+ do {
+ saveToNextCode(next4Bytes);
+ } while (next4Bytes != GROUP_VOP_START_CODE
+ && next4Bytes != VOP_START_CODE);
+
+ analyzeVOLHeader();
+
+ setParseState((next4Bytes == GROUP_VOP_START_CODE)
+ ? PARSING_GROUP_OF_VIDEO_OBJECT_PLANE
+ : PARSING_VIDEO_OBJECT_PLANE);
+
+ // Compute this frame's presentation time:
+ usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
+
+ // This header ends the 'configuration' information:
+ usingSource()->appendToNewConfig(fStartOfFrame, curFrameSize());
+ usingSource()->completeNewConfig();
+
+ return curFrameSize();
+}
+
+unsigned MPEG4VideoStreamParser::parseGroupOfVideoObjectPlane() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing GroupOfVideoObjectPlane\n");
+#endif
+ // Note that we've already read the GROUP_VOP_START_CODE
+ save4Bytes(GROUP_VOP_START_CODE);
+
+ // Next, extract the (18-bit) time code from the next 3 bytes:
+ u_int8_t next3Bytes[3];
+ getBytes(next3Bytes, 3);
+ saveByte(next3Bytes[0]);saveByte(next3Bytes[1]);saveByte(next3Bytes[2]);
+ unsigned time_code
+ = (next3Bytes[0]<<10)|(next3Bytes[1]<<2)|(next3Bytes[2]>>6);
+ unsigned time_code_hours = (time_code&0x0003E000)>>13;
+ unsigned time_code_minutes = (time_code&0x00001F80)>>7;
+#if defined(DEBUG) || defined(DEBUG_TIMESTAMPS)
+ Boolean marker_bit = (time_code&0x00000040) != 0;
+#endif
+ unsigned time_code_seconds = (time_code&0x0000003F);
+#if defined(DEBUG) || defined(DEBUG_TIMESTAMPS)
+ fprintf(stderr, "time_code: 0x%05x, hours %d, minutes %d, marker_bit %d, seconds %d\n", time_code, time_code_hours, time_code_minutes, marker_bit, time_code_seconds);
+#endif
+ fJustSawTimeCode = True;
+
+ // Now, copy all bytes that we see, up until we reach a VOP_START_CODE:
+ u_int32_t next4Bytes = get4Bytes();
+ while (next4Bytes != VOP_START_CODE) {
+ saveToNextCode(next4Bytes);
+ }
+
+ // Compute this frame's presentation time:
+ usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
+
+ // Record the time code:
+ usingSource()->setTimeCode(time_code_hours, time_code_minutes,
+ time_code_seconds, 0, 0);
+ // Note: Because the GOV header can appear anywhere (not just at a 1s point), we
+ // don't pass "fTotalTicksSinceLastTimeCode" as the "picturesSinceLastGOP" parameter.
+ fSecondsSinceLastTimeCode = 0;
+ if (fixed_vop_rate) fTotalTicksSinceLastTimeCode = 0;
+
+ setParseState(PARSING_VIDEO_OBJECT_PLANE);
+
+ return curFrameSize();
+}
+
+unsigned MPEG4VideoStreamParser::parseVideoObjectPlane() {
+#ifdef DEBUG
+ fprintf(stderr, "#parsing VideoObjectPlane\n");
+#endif
+ // Note that we've already read the VOP_START_CODE
+ save4Bytes(VOP_START_CODE);
+
+ // Get the "vop_coding_type" from the next byte:
+ u_int8_t nextByte = get1Byte(); saveByte(nextByte);
+ u_int8_t vop_coding_type = nextByte>>6;
+
+ // Next, get the "modulo_time_base" by counting the '1' bits that follow.
+ // We look at the next 32-bits only. This should be enough in most cases.
+ u_int32_t next4Bytes = get4Bytes();
+ u_int32_t timeInfo = (nextByte<<(32-6))|(next4Bytes>>6);
+ unsigned modulo_time_base = 0;
+ u_int32_t mask = 0x80000000;
+ while ((timeInfo&mask) != 0) {
+ ++modulo_time_base;
+ mask >>= 1;
+ }
+ mask >>= 1;
+
+ // Check the following marker bit:
+ if ((timeInfo&mask) == 0) {
+ usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectPlane(): marker bit not set!\n";
+ }
+ mask >>= 1;
+
+ // Then, get the "vop_time_increment".
+ // First, make sure we have enough bits left for this:
+ if ((mask>>(fNumVTIRBits-1)) == 0) {
+ usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectPlane(): 32-bits are not enough to get \"vop_time_increment\"!\n";
+ }
+ unsigned vop_time_increment = 0;
+ for (unsigned i = 0; i < fNumVTIRBits; ++i) {
+ vop_time_increment |= timeInfo&mask;
+ mask >>= 1;
+ }
+ while (mask != 0) {
+ vop_time_increment >>= 1;
+ mask >>= 1;
+ }
+#ifdef DEBUG
+ fprintf(stderr, "vop_coding_type: %d(%c), modulo_time_base: %d, vop_time_increment: %d\n", vop_coding_type, "IPBS"[vop_coding_type], modulo_time_base, vop_time_increment);
+#endif
+
+ // Now, copy all bytes that we see, up until we reach a code of some sort:
+ saveToNextCode(next4Bytes);
+
+ // Update our counters based on the frame timing information that we saw:
+ if (fixed_vop_time_increment > 0) {
+ // This is a 'fixed_vop_rate' stream. Use 'fixed_vop_time_increment':
+ usingSource()->fPictureCount += fixed_vop_time_increment;
+ if (vop_time_increment > 0 || modulo_time_base > 0) {
+ fTotalTicksSinceLastTimeCode += fixed_vop_time_increment;
+ // Note: "fSecondsSinceLastTimeCode" and "fPrevNewTotalTicks" are not used.
+ }
+ } else {
+ // Use 'vop_time_increment':
+ unsigned newTotalTicks
+ = (fSecondsSinceLastTimeCode + modulo_time_base)*vop_time_increment_resolution
+ + vop_time_increment;
+ if (newTotalTicks == fPrevNewTotalTicks && fPrevNewTotalTicks > 0) {
+ // This is apparently a buggy MPEG-4 video stream, because
+ // "vop_time_increment" did not change. Overcome this error,
+ // by pretending that it did change.
+#ifdef DEBUG
+ fprintf(stderr, "Buggy MPEG-4 video stream: \"vop_time_increment\" did not change!\n");
+#endif
+ // The following assumes that we don't have 'B' frames. If we do, then TARFU!
+ usingSource()->fPictureCount += vop_time_increment;
+ fTotalTicksSinceLastTimeCode += vop_time_increment;
+ fSecondsSinceLastTimeCode += modulo_time_base;
+ } else {
+ if (newTotalTicks < fPrevNewTotalTicks && vop_coding_type != 2/*B*/
+ && modulo_time_base == 0 && vop_time_increment == 0 && !fJustSawTimeCode) {
+ // This is another kind of buggy MPEG-4 video stream, in which
+ // "vop_time_increment" wraps around, but without
+ // "modulo_time_base" changing (or just having had a new time code).
+ // Overcome this by pretending that "vop_time_increment" *did* wrap around:
+#ifdef DEBUG
+ fprintf(stderr, "Buggy MPEG-4 video stream: \"vop_time_increment\" wrapped around, but without \"modulo_time_base\" changing!\n");
+#endif
+ ++fSecondsSinceLastTimeCode;
+ newTotalTicks += vop_time_increment_resolution;
+ }
+ fPrevNewTotalTicks = newTotalTicks;
+ if (vop_coding_type != 2/*B*/) {
+ int pictureCountDelta = newTotalTicks - fTotalTicksSinceLastTimeCode;
+ if (pictureCountDelta <= 0) pictureCountDelta = fPrevPictureCountDelta;
+ // ensures that the picture count is always increasing
+ usingSource()->fPictureCount += pictureCountDelta;
+ fPrevPictureCountDelta = pictureCountDelta;
+ fTotalTicksSinceLastTimeCode = newTotalTicks;
+ fSecondsSinceLastTimeCode += modulo_time_base;
+ }
+ }
+ }
+ fJustSawTimeCode = False; // for next time
+
+ // The next thing to parse depends on the code that we just saw,
+ // but we are assumed to have ended the current picture:
+ usingSource()->fPictureEndMarker = True; // HACK #####
+ switch (next4Bytes) {
+ case VISUAL_OBJECT_SEQUENCE_END_CODE: {
+ setParseState(PARSING_VISUAL_OBJECT_SEQUENCE_END_CODE);
+ break;
+ }
+ case VISUAL_OBJECT_SEQUENCE_START_CODE: {
+ setParseState(PARSING_VISUAL_OBJECT_SEQUENCE_SEEN_CODE);
+ break;
+ }
+ case VISUAL_OBJECT_START_CODE: {
+ setParseState(PARSING_VISUAL_OBJECT);
+ break;
+ }
+ case GROUP_VOP_START_CODE: {
+ setParseState(PARSING_GROUP_OF_VIDEO_OBJECT_PLANE);
+ break;
+ }
+ case VOP_START_CODE: {
+ setParseState(PARSING_VIDEO_OBJECT_PLANE);
+ break;
+ }
+ default: {
+ if (isVideoObjectStartCode(next4Bytes)) {
+ setParseState(PARSING_VIDEO_OBJECT_LAYER);
+ } else if (isVideoObjectLayerStartCode(next4Bytes)){
+ // copy all bytes that we see, up until we reach a VOP_START_CODE:
+ u_int32_t next4Bytes = get4Bytes();
+ while (next4Bytes != VOP_START_CODE) {
+ saveToNextCode(next4Bytes);
+ }
+ setParseState(PARSING_VIDEO_OBJECT_PLANE);
+ } else {
+ usingSource()->envir() << "MPEG4VideoStreamParser::parseVideoObjectPlane(): Saw unexpected code "
+ << (void*)next4Bytes << "\n";
+ setParseState(PARSING_VIDEO_OBJECT_PLANE); // the safest way to recover...
+ }
+ break;
+ }
+ }
+
+ // Compute this frame's presentation time:
+ usingSource()->computePresentationTime(fTotalTicksSinceLastTimeCode);
+
+ return curFrameSize();
+}
+
+unsigned MPEG4VideoStreamParser::parseVisualObjectSequenceEndCode() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing VISUAL_OBJECT_SEQUENCE_END_CODE\n");
+#endif
+ // Note that we've already read the VISUAL_OBJECT_SEQUENCE_END_CODE
+ save4Bytes(VISUAL_OBJECT_SEQUENCE_END_CODE);
+
+ setParseState(PARSING_VISUAL_OBJECT_SEQUENCE);
+
+ // Treat this as if we had ended a picture:
+ usingSource()->fPictureEndMarker = True; // HACK #####
+
+ return curFrameSize();
+}
diff --git a/liveMedia/MPEGVideoStreamFramer.cpp b/liveMedia/MPEGVideoStreamFramer.cpp
new file mode 100644
index 0000000..9baba76
--- /dev/null
+++ b/liveMedia/MPEGVideoStreamFramer.cpp
@@ -0,0 +1,187 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an MPEG video elementary stream into
+// headers and frames
+// Implementation
+
+#include "MPEGVideoStreamParser.hh"
+#include <GroupsockHelper.hh>
+
+////////// TimeCode implementation //////////
+
+TimeCode::TimeCode()
+ : days(0), hours(0), minutes(0), seconds(0), pictures(0) {
+}
+
+TimeCode::~TimeCode() {
+}
+
+int TimeCode::operator==(TimeCode const& arg2) {
+ return pictures == arg2.pictures && seconds == arg2.seconds
+ && minutes == arg2.minutes && hours == arg2.hours && days == arg2.days;
+}
+
+////////// MPEGVideoStreamFramer implementation //////////
+
+MPEGVideoStreamFramer::MPEGVideoStreamFramer(UsageEnvironment& env,
+ FramedSource* inputSource)
+ : FramedFilter(env, inputSource),
+ fFrameRate(0.0) /* until we learn otherwise */,
+ fParser(NULL) {
+ reset();
+}
+
+MPEGVideoStreamFramer::~MPEGVideoStreamFramer() {
+ delete fParser;
+}
+
+void MPEGVideoStreamFramer::flushInput() {
+ reset();
+ if (fParser != NULL) fParser->flushInput();
+}
+
+void MPEGVideoStreamFramer::reset() {
+ fPictureCount = 0;
+ fPictureEndMarker = True; // So that we start looking as if we'd just ended an 'access unit'
+ fPicturesAdjustment = 0;
+ fPictureTimeBase = 0.0;
+ fTcSecsBase = 0;
+ fHaveSeenFirstTimeCode = False;
+
+ // Clear the 'presentation time base', as a signal for subclasses
+ // to reset it (to our current time) when we start (or resume) streaming:
+ fPresentationTimeBase.tv_sec = 0;
+ fPresentationTimeBase.tv_usec = 0;
+}
+
+#ifdef DEBUG
+static struct timeval firstPT;
+#endif
+void MPEGVideoStreamFramer
+::computePresentationTime(unsigned numAdditionalPictures) {
+ // Computes "fPresentationTime" from the most recent GOP's
+ // time_code, along with the "numAdditionalPictures" parameter:
+ TimeCode& tc = fCurGOPTimeCode;
+
+ unsigned tcSecs
+ = (((tc.days*24)+tc.hours)*60+tc.minutes)*60+tc.seconds - fTcSecsBase;
+ double pictureTime = fFrameRate == 0.0 ? 0.0
+ : (tc.pictures + fPicturesAdjustment + numAdditionalPictures)/fFrameRate;
+ while (pictureTime < fPictureTimeBase) { // "if" should be enough, but just in case
+ if (tcSecs > 0) tcSecs -= 1;
+ pictureTime += 1.0;
+ }
+ pictureTime -= fPictureTimeBase;
+ if (pictureTime < 0.0) pictureTime = 0.0; // sanity check
+ unsigned pictureSeconds = (unsigned)pictureTime;
+ double pictureFractionOfSecond = pictureTime - (double)pictureSeconds;
+
+ fPresentationTime = fPresentationTimeBase;
+ fPresentationTime.tv_sec += tcSecs + pictureSeconds;
+ fPresentationTime.tv_usec += (long)(pictureFractionOfSecond*1000000.0);
+ if (fPresentationTime.tv_usec >= 1000000) {
+ fPresentationTime.tv_usec -= 1000000;
+ ++fPresentationTime.tv_sec;
+ }
+#ifdef DEBUG
+ if (firstPT.tv_sec == 0 && firstPT.tv_usec == 0) firstPT = fPresentationTime;
+ struct timeval diffPT;
+ diffPT.tv_sec = fPresentationTime.tv_sec - firstPT.tv_sec;
+ diffPT.tv_usec = fPresentationTime.tv_usec - firstPT.tv_usec;
+ if (fPresentationTime.tv_usec < firstPT.tv_usec) {
+ --diffPT.tv_sec;
+ diffPT.tv_usec += 1000000;
+ }
+ fprintf(stderr, "MPEGVideoStreamFramer::computePresentationTime(%d) -> %lu.%06ld [%lu.%06ld]\n", numAdditionalPictures, fPresentationTime.tv_sec, fPresentationTime.tv_usec, diffPT.tv_sec, diffPT.tv_usec);
+#endif
+}
+
+void MPEGVideoStreamFramer
+::setTimeCode(unsigned hours, unsigned minutes, unsigned seconds,
+ unsigned pictures, unsigned picturesSinceLastGOP) {
+ TimeCode& tc = fCurGOPTimeCode; // abbrev
+ unsigned days = tc.days;
+ if (hours < tc.hours) {
+ // Assume that the 'day' has wrapped around:
+ ++days;
+ }
+ tc.days = days;
+ tc.hours = hours;
+ tc.minutes = minutes;
+ tc.seconds = seconds;
+ tc.pictures = pictures;
+ if (!fHaveSeenFirstTimeCode) {
+ fPictureTimeBase = fFrameRate == 0.0 ? 0.0 : tc.pictures/fFrameRate;
+ fTcSecsBase = (((tc.days*24)+tc.hours)*60+tc.minutes)*60+tc.seconds;
+ fHaveSeenFirstTimeCode = True;
+ } else if (fCurGOPTimeCode == fPrevGOPTimeCode) {
+ // The time code has not changed since last time. Adjust for this:
+ fPicturesAdjustment += picturesSinceLastGOP;
+ } else {
+ // Normal case: The time code changed since last time.
+ fPrevGOPTimeCode = tc;
+ fPicturesAdjustment = 0;
+ }
+}
+
+void MPEGVideoStreamFramer::doGetNextFrame() {
+ fParser->registerReadInterest(fTo, fMaxSize);
+ continueReadProcessing();
+}
+
+void MPEGVideoStreamFramer::doStopGettingFrames() {
+ flushInput();
+ FramedFilter::doStopGettingFrames();
+}
+
+void MPEGVideoStreamFramer
+::continueReadProcessing(void* clientData,
+ unsigned char* /*ptr*/, unsigned /*size*/,
+ struct timeval /*presentationTime*/) {
+ MPEGVideoStreamFramer* framer = (MPEGVideoStreamFramer*)clientData;
+ framer->continueReadProcessing();
+}
+
+void MPEGVideoStreamFramer::continueReadProcessing() {
+ unsigned acquiredFrameSize = fParser->parse();
+ if (acquiredFrameSize > 0) {
+ // We were able to acquire a frame from the input.
+ // It has already been copied to the reader's space.
+ fFrameSize = acquiredFrameSize;
+ fNumTruncatedBytes = fParser->numTruncatedBytes();
+
+ // "fPresentationTime" should have already been computed.
+
+ // Compute "fDurationInMicroseconds" now:
+ fDurationInMicroseconds
+ = (fFrameRate == 0.0 || ((int)fPictureCount) < 0) ? 0
+ : (unsigned)((fPictureCount*1000000)/fFrameRate);
+#ifdef DEBUG
+ fprintf(stderr, "%d bytes @%u.%06d, fDurationInMicroseconds: %d ((%d*1000000)/%f)\n", acquiredFrameSize, fPresentationTime.tv_sec, fPresentationTime.tv_usec, fDurationInMicroseconds, fPictureCount, fFrameRate);
+#endif
+ fPictureCount = 0;
+
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking infinite recursion.
+ afterGetting(this);
+ } else {
+ // We were unable to parse a complete frame from the input, because:
+ // - we had to read more data from the source stream, or
+ // - the source stream has ended.
+ }
+}
diff --git a/liveMedia/MPEGVideoStreamParser.cpp b/liveMedia/MPEGVideoStreamParser.cpp
new file mode 100644
index 0000000..60c54fb
--- /dev/null
+++ b/liveMedia/MPEGVideoStreamParser.cpp
@@ -0,0 +1,45 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// An abstract parser for MPEG video streams
+// Implementation
+
+#include "MPEGVideoStreamParser.hh"
+
+MPEGVideoStreamParser
+::MPEGVideoStreamParser(MPEGVideoStreamFramer* usingSource,
+ FramedSource* inputSource)
+ : StreamParser(inputSource, FramedSource::handleClosure, usingSource,
+ &MPEGVideoStreamFramer::continueReadProcessing, usingSource),
+ fUsingSource(usingSource) {
+}
+
+MPEGVideoStreamParser::~MPEGVideoStreamParser() {
+}
+
+void MPEGVideoStreamParser::restoreSavedParserState() {
+ StreamParser::restoreSavedParserState();
+ fTo = fSavedTo;
+ fNumTruncatedBytes = fSavedNumTruncatedBytes;
+}
+
+void MPEGVideoStreamParser::registerReadInterest(unsigned char* to,
+ unsigned maxSize) {
+ fStartOfFrame = fTo = fSavedTo = to;
+ fLimit = to + maxSize;
+ fNumTruncatedBytes = fSavedNumTruncatedBytes = 0;
+}
diff --git a/liveMedia/MPEGVideoStreamParser.hh b/liveMedia/MPEGVideoStreamParser.hh
new file mode 100644
index 0000000..5236cf4
--- /dev/null
+++ b/liveMedia/MPEGVideoStreamParser.hh
@@ -0,0 +1,122 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// An abstract parser for MPEG video streams
+// C++ header
+
+#ifndef _MPEG_VIDEO_STREAM_PARSER_HH
+#define _MPEG_VIDEO_STREAM_PARSER_HH
+
+#ifndef _STREAM_PARSER_HH
+#include "StreamParser.hh"
+#endif
+#ifndef _MPEG_VIDEO_STREAM_FRAMER_HH
+#include "MPEGVideoStreamFramer.hh"
+#endif
+
+////////// MPEGVideoStreamParser definition //////////
+
+class MPEGVideoStreamParser: public StreamParser {
+public:
+ MPEGVideoStreamParser(MPEGVideoStreamFramer* usingSource,
+ FramedSource* inputSource);
+ virtual ~MPEGVideoStreamParser();
+
+public:
+ void registerReadInterest(unsigned char* to, unsigned maxSize);
+
+ virtual unsigned parse() = 0;
+ // returns the size of the frame that was acquired, or 0 if none was
+ // The number of truncated bytes (if any) is given by:
+ unsigned numTruncatedBytes() const { return fNumTruncatedBytes; }
+
+protected:
+ void setParseState() {
+ fSavedTo = fTo;
+ fSavedNumTruncatedBytes = fNumTruncatedBytes;
+ saveParserState();
+ }
+
+ // Record "byte" in the current output frame:
+ void saveByte(u_int8_t byte) {
+ if (fTo >= fLimit) { // there's no space left
+ ++fNumTruncatedBytes;
+ return;
+ }
+
+ *fTo++ = byte;
+ }
+
+ void save4Bytes(u_int32_t word) {
+ if (fTo+4 > fLimit) { // there's no space left
+ fNumTruncatedBytes += 4;
+ return;
+ }
+
+ *fTo++ = word>>24; *fTo++ = word>>16; *fTo++ = word>>8; *fTo++ = word;
+ }
+
+ // Save data until we see a sync word (0x000001xx):
+ void saveToNextCode(u_int32_t& curWord) {
+ saveByte(curWord>>24);
+ curWord = (curWord<<8)|get1Byte();
+ while ((curWord&0xFFFFFF00) != 0x00000100) {
+ if ((unsigned)(curWord&0xFF) > 1) {
+ // a sync word definitely doesn't begin anywhere in "curWord"
+ save4Bytes(curWord);
+ curWord = get4Bytes();
+ } else {
+ // a sync word might begin in "curWord", although not at its start
+ saveByte(curWord>>24);
+ unsigned char newByte = get1Byte();
+ curWord = (curWord<<8)|newByte;
+ }
+ }
+ }
+
+ // Skip data until we see a sync word (0x000001xx):
+ void skipToNextCode(u_int32_t& curWord) {
+ curWord = (curWord<<8)|get1Byte();
+ while ((curWord&0xFFFFFF00) != 0x00000100) {
+ if ((unsigned)(curWord&0xFF) > 1) {
+ // a sync word definitely doesn't begin anywhere in "curWord"
+ curWord = get4Bytes();
+ } else {
+ // a sync word might begin in "curWord", although not at its start
+ unsigned char newByte = get1Byte();
+ curWord = (curWord<<8)|newByte;
+ }
+ }
+ }
+
+protected:
+ MPEGVideoStreamFramer* fUsingSource;
+
+ // state of the frame that's currently being read:
+ unsigned char* fStartOfFrame;
+ unsigned char* fTo;
+ unsigned char* fLimit;
+ unsigned fNumTruncatedBytes;
+ unsigned curFrameSize() { return fTo - fStartOfFrame; }
+ unsigned char* fSavedTo;
+ unsigned fSavedNumTruncatedBytes;
+
+private: // redefined virtual functions
+ virtual void restoreSavedParserState();
+};
+
+#endif
diff --git a/liveMedia/Makefile.head b/liveMedia/Makefile.head
new file mode 100644
index 0000000..f4e4414
--- /dev/null
+++ b/liveMedia/Makefile.head
@@ -0,0 +1,4 @@
+INCLUDES = -Iinclude -I../UsageEnvironment/include -I../groupsock/include
+PREFIX = /usr/local
+LIBDIR = $(PREFIX)/lib
+##### Change the following for your environment:
diff --git a/liveMedia/Makefile.tail b/liveMedia/Makefile.tail
new file mode 100644
index 0000000..1740c74
--- /dev/null
+++ b/liveMedia/Makefile.tail
@@ -0,0 +1,447 @@
+##### End of variables to change
+
+NAME = libliveMedia
+LIVEMEDIA_LIB = $(NAME).$(LIB_SUFFIX)
+ALL = $(LIVEMEDIA_LIB)
+all: $(ALL)
+
+.$(C).$(OBJ):
+ $(C_COMPILER) -c $(C_FLAGS) $<
+.$(CPP).$(OBJ):
+ $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $<
+
+MP3_SOURCE_OBJS = MP3FileSource.$(OBJ) MP3Transcoder.$(OBJ) MP3ADU.$(OBJ) MP3ADUdescriptor.$(OBJ) MP3ADUinterleaving.$(OBJ) MP3ADUTranscoder.$(OBJ) MP3StreamState.$(OBJ) MP3Internals.$(OBJ) MP3InternalsHuffman.$(OBJ) MP3InternalsHuffmanTable.$(OBJ) MP3ADURTPSource.$(OBJ)
+MPEG_SOURCE_OBJS = MPEG1or2Demux.$(OBJ) MPEG1or2DemuxedElementaryStream.$(OBJ) MPEGVideoStreamFramer.$(OBJ) MPEG1or2VideoStreamFramer.$(OBJ) MPEG1or2VideoStreamDiscreteFramer.$(OBJ) MPEG4VideoStreamFramer.$(OBJ) MPEG4VideoStreamDiscreteFramer.$(OBJ) H264or5VideoStreamFramer.$(OBJ) H264or5VideoStreamDiscreteFramer.$(OBJ) H264VideoStreamFramer.$(OBJ) H264VideoStreamDiscreteFramer.$(OBJ) H265VideoStreamFramer.$(OBJ) H265VideoStreamDiscreteFramer.$(OBJ) MPEGVideoStreamParser.$(OBJ) MPEG1or2AudioStreamFramer.$(OBJ) MPEG1or2AudioRTPSource.$(OBJ) MPEG4LATMAudioRTPSource.$(OBJ) MPEG4ESVideoRTPSource.$(OBJ) MPEG4GenericRTPSource.$(OBJ) $(MP3_SOURCE_OBJS) MPEG1or2VideoRTPSource.$(OBJ) MPEG2TransportStreamMultiplexor.$(OBJ) MPEG2TransportStreamFromPESSource.$(OBJ) MPEG2TransportStreamFromESSource.$(OBJ) MPEG2TransportStreamFramer.$(OBJ) MPEG2TransportStreamAccumulator.$(OBJ) ADTSAudioFileSource.$(OBJ)
+#JPEG_SOURCE_OBJS = JPEGVideoSource.$(OBJ) JPEGVideoRTPSource.$(OBJ) JPEG2000VideoStreamFramer.$(OBJ) JPEG2000VideoStreamParser.$(OBJ) JPEG2000VideoRTPSource.$(OBJ)
+JPEG_SOURCE_OBJS = JPEGVideoSource.$(OBJ) JPEGVideoRTPSource.$(OBJ) JPEG2000VideoRTPSource.$(OBJ)
+H263_SOURCE_OBJS = H263plusVideoRTPSource.$(OBJ) H263plusVideoStreamFramer.$(OBJ) H263plusVideoStreamParser.$(OBJ)
+AC3_SOURCE_OBJS = AC3AudioStreamFramer.$(OBJ) AC3AudioRTPSource.$(OBJ)
+DV_SOURCE_OBJS = DVVideoStreamFramer.$(OBJ) DVVideoRTPSource.$(OBJ)
+MP3_SINK_OBJS = MP3ADURTPSink.$(OBJ)
+MPEG_SINK_OBJS = MPEG1or2AudioRTPSink.$(OBJ) $(MP3_SINK_OBJS) MPEG1or2VideoRTPSink.$(OBJ) MPEG4LATMAudioRTPSink.$(OBJ) MPEG4GenericRTPSink.$(OBJ) MPEG4ESVideoRTPSink.$(OBJ)
+JPEG_SINK_OBJS = JPEGVideoRTPSink.$(OBJ) JPEG2000VideoRTPSink.$(OBJ)
+H263_SINK_OBJS = H263plusVideoRTPSink.$(OBJ)
+H264_OR_5_SINK_OBJS = H264or5VideoRTPSink.$(OBJ) H264VideoRTPSink.$(OBJ) H265VideoRTPSink.$(OBJ)
+DV_SINK_OBJS = DVVideoRTPSink.$(OBJ)
+AC3_SINK_OBJS = AC3AudioRTPSink.$(OBJ)
+
+MISC_SOURCE_OBJS = MediaSource.$(OBJ) FramedSource.$(OBJ) FramedFileSource.$(OBJ) FramedFilter.$(OBJ) ByteStreamFileSource.$(OBJ) ByteStreamMultiFileSource.$(OBJ) ByteStreamMemoryBufferSource.$(OBJ) BasicUDPSource.$(OBJ) DeviceSource.$(OBJ) AudioInputDevice.$(OBJ) WAVAudioFileSource.$(OBJ) $(MPEG_SOURCE_OBJS) $(JPEG_SOURCE_OBJS) $(H263_SOURCE_OBJS) $(AC3_SOURCE_OBJS) $(DV_SOURCE_OBJS) AMRAudioSource.$(OBJ) AMRAudioFileSource.$(OBJ) InputFile.$(OBJ) StreamReplicator.$(OBJ)
+MISC_SINK_OBJS = MediaSink.$(OBJ) FileSink.$(OBJ) BasicUDPSink.$(OBJ) AMRAudioFileSink.$(OBJ) H264or5VideoFileSink.$(OBJ) H264VideoFileSink.$(OBJ) H265VideoFileSink.$(OBJ) OggFileSink.$(OBJ) $(MPEG_SINK_OBJS) $(JPEG_SINK_OBJS) $(H263_SINK_OBJS) $(H264_OR_5_SINK_OBJS) $(DV_SINK_OBJS) $(AC3_SINK_OBJS) VorbisAudioRTPSink.$(OBJ) TheoraVideoRTPSink.$(OBJ) VP8VideoRTPSink.$(OBJ) VP9VideoRTPSink.$(OBJ) GSMAudioRTPSink.$(OBJ) SimpleRTPSink.$(OBJ) AMRAudioRTPSink.$(OBJ) T140TextRTPSink.$(OBJ) TCPStreamSink.$(OBJ) OutputFile.$(OBJ) RawVideoRTPSink.$(OBJ)
+MISC_FILTER_OBJS = uLawAudioFilter.$(OBJ)
+TRANSPORT_STREAM_TRICK_PLAY_OBJS = MPEG2IndexFromTransportStream.$(OBJ) MPEG2TransportStreamIndexFile.$(OBJ) MPEG2TransportStreamTrickModeFilter.$(OBJ)
+
+RTP_SOURCE_OBJS = RTPSource.$(OBJ) MultiFramedRTPSource.$(OBJ) SimpleRTPSource.$(OBJ) H261VideoRTPSource.$(OBJ) H264VideoRTPSource.$(OBJ) H265VideoRTPSource.$(OBJ) QCELPAudioRTPSource.$(OBJ) AMRAudioRTPSource.$(OBJ) VorbisAudioRTPSource.$(OBJ) TheoraVideoRTPSource.$(OBJ) VP8VideoRTPSource.$(OBJ) VP9VideoRTPSource.$(OBJ) RawVideoRTPSource.$(OBJ)
+RTP_SINK_OBJS = RTPSink.$(OBJ) MultiFramedRTPSink.$(OBJ) AudioRTPSink.$(OBJ) VideoRTPSink.$(OBJ) TextRTPSink.$(OBJ)
+RTP_INTERFACE_OBJS = RTPInterface.$(OBJ)
+RTP_OBJS = $(RTP_SOURCE_OBJS) $(RTP_SINK_OBJS) $(RTP_INTERFACE_OBJS)
+
+RTCP_OBJS = RTCP.$(OBJ) rtcp_from_spec.$(OBJ)
+GENERIC_MEDIA_SERVER_OBJS = GenericMediaServer.$(OBJ)
+RTSP_OBJS = RTSPServer.$(OBJ) RTSPServerRegister.$(OBJ) RTSPClient.$(OBJ) RTSPCommon.$(OBJ) RTSPServerSupportingHTTPStreaming.$(OBJ) RTSPRegisterSender.$(OBJ)
+SIP_OBJS = SIPClient.$(OBJ)
+
+SESSION_OBJS = MediaSession.$(OBJ) ServerMediaSession.$(OBJ) PassiveServerMediaSubsession.$(OBJ) OnDemandServerMediaSubsession.$(OBJ) FileServerMediaSubsession.$(OBJ) MPEG4VideoFileServerMediaSubsession.$(OBJ) H264VideoFileServerMediaSubsession.$(OBJ) H265VideoFileServerMediaSubsession.$(OBJ) H263plusVideoFileServerMediaSubsession.$(OBJ) WAVAudioFileServerMediaSubsession.$(OBJ) AMRAudioFileServerMediaSubsession.$(OBJ) MP3AudioFileServerMediaSubsession.$(OBJ) MPEG1or2VideoFileServerMediaSubsession.$(OBJ) MPEG1or2FileServerDemux.$(OBJ) MPEG1or2DemuxedServerMediaSubsession.$(OBJ) MPEG2TransportFileServerMediaSubsession.$(OBJ) ADTSAudioFileServerMediaSubsession.$(OBJ) DVVideoFileServerMediaSubsession.$(OBJ) AC3AudioFileServerMediaSubsession.$(OBJ) MPEG2TransportUDPServerMediaSubsession.$(OBJ) ProxyServerMediaSession.$(OBJ)
+
+QUICKTIME_OBJS = QuickTimeFileSink.$(OBJ) QuickTimeGenericRTPSource.$(OBJ)
+AVI_OBJS = AVIFileSink.$(OBJ)
+
+MATROSKA_FILE_OBJS = MatroskaFile.$(OBJ) MatroskaFileParser.$(OBJ) EBMLNumber.$(OBJ) MatroskaDemuxedTrack.$(OBJ)
+MATROSKA_SERVER_MEDIA_SUBSESSION_OBJS = MatroskaFileServerMediaSubsession.$(OBJ) MP3AudioMatroskaFileServerMediaSubsession.$(OBJ)
+MATROSKA_RTSP_SERVER_OBJS = MatroskaFileServerDemux.$(OBJ) $(MATROSKA_SERVER_MEDIA_SUBSESSION_OBJS)
+MATROSKA_OBJS = $(MATROSKA_FILE_OBJS) $(MATROSKA_RTSP_SERVER_OBJS)
+
+OGG_FILE_OBJS = OggFile.$(OBJ) OggFileParser.$(OBJ) OggDemuxedTrack.$(OBJ)
+OGG_SERVER_MEDIA_SUBSESSION_OBJS = OggFileServerMediaSubsession.$(OBJ)
+OGG_RTSP_SERVER_OBJS = OggFileServerDemux.$(OBJ) $(OGG_SERVER_MEDIA_SUBSESSION_OBJS)
+OGG_OBJS = $(OGG_FILE_OBJS) $(OGG_RTSP_SERVER_OBJS)
+
+TRANSPORT_STREAM_DEMUX_OBJS = MPEG2TransportStreamDemux.$(OBJ) MPEG2TransportStreamDemuxedTrack.$(OBJ) MPEG2TransportStreamParser.$(OBJ) MPEG2TransportStreamParser_PAT.$(OBJ) MPEG2TransportStreamParser_PMT.$(OBJ) MPEG2TransportStreamParser_STREAM.$(OBJ)
+
+HLS_OBJS = HLSSegmenter.$(OBJ)
+
+SECURITY_OBJS = TLSState.$(OBJ) MIKEY.$(OBJ) SRTPCryptographicContext.$(OBJ) HMAC_SHA1.$(OBJ)
+
+MISC_OBJS = BitVector.$(OBJ) StreamParser.$(OBJ) DigestAuthentication.$(OBJ) ourMD5.$(OBJ) Base64.$(OBJ) Locale.$(OBJ)
+
+LIVEMEDIA_LIB_OBJS = Media.$(OBJ) $(MISC_SOURCE_OBJS) $(MISC_SINK_OBJS) $(MISC_FILTER_OBJS) $(RTP_OBJS) $(RTCP_OBJS) $(GENERIC_MEDIA_SERVER_OBJS) $(RTSP_OBJS) $(SIP_OBJS) $(SESSION_OBJS) $(QUICKTIME_OBJS) $(AVI_OBJS) $(TRANSPORT_STREAM_TRICK_PLAY_OBJS) $(MATROSKA_OBJS) $(OGG_OBJS) $(TRANSPORT_STREAM_DEMUX_OBJS) $(HLS_OBJS) $(SECURITY_OBJS) $(MISC_OBJS)
+
+$(LIVEMEDIA_LIB): $(LIVEMEDIA_LIB_OBJS) \
+ $(PLATFORM_SPECIFIC_LIB_OBJS)
+ $(LIBRARY_LINK)$@ $(LIBRARY_LINK_OPTS) \
+ $(LIVEMEDIA_LIB_OBJS)
+
+Media.$(CPP): include/Media.hh
+include/Media.hh: include/liveMedia_version.hh
+MediaSource.$(CPP): include/MediaSource.hh
+include/MediaSource.hh: include/Media.hh
+FramedSource.$(CPP): include/FramedSource.hh
+include/FramedSource.hh: include/MediaSource.hh
+FramedFileSource.$(CPP): include/FramedFileSource.hh
+include/FramedFileSource.hh: include/FramedSource.hh
+FramedFilter.$(CPP): include/FramedFilter.hh
+include/FramedFilter.hh: include/FramedSource.hh
+RTPSource.$(CPP): include/RTPSource.hh
+include/RTPSource.hh: include/FramedSource.hh include/RTPInterface.hh include/SRTPCryptographicContext.hh
+include/RTPInterface.hh: include/Media.hh
+MultiFramedRTPSource.$(CPP): include/MultiFramedRTPSource.hh include/RTCP.hh
+include/MultiFramedRTPSource.hh: include/RTPSource.hh
+SimpleRTPSource.$(CPP): include/SimpleRTPSource.hh
+include/SimpleRTPSource.hh: include/MultiFramedRTPSource.hh
+H261VideoRTPSource.$(CPP): include/H261VideoRTPSource.hh
+include/H261VideoRTPSource.hh: include/MultiFramedRTPSource.hh
+H264VideoRTPSource.$(CPP): include/H264VideoRTPSource.hh include/Base64.hh
+include/H264VideoRTPSource.hh: include/MultiFramedRTPSource.hh
+H265VideoRTPSource.$(CPP): include/H265VideoRTPSource.hh
+include/H265VideoRTPSource.hh: include/MultiFramedRTPSource.hh
+QCELPAudioRTPSource.$(CPP): include/QCELPAudioRTPSource.hh include/MultiFramedRTPSource.hh include/FramedFilter.hh
+include/QCELPAudioRTPSource.hh: include/RTPSource.hh
+AMRAudioRTPSource.$(CPP): include/AMRAudioRTPSource.hh include/MultiFramedRTPSource.hh
+include/AMRAudioRTPSource.hh: include/RTPSource.hh include/AMRAudioSource.hh
+VorbisAudioRTPSource.$(CPP): include/VorbisAudioRTPSource.hh include/Base64.hh
+include/VorbisAudioRTPSource.hh: include/MultiFramedRTPSource.hh
+TheoraVideoRTPSource.$(CPP): include/TheoraVideoRTPSource.hh
+include/TheoraVideoRTPSource.hh: include/MultiFramedRTPSource.hh
+VP8VideoRTPSource.$(CPP): include/VP8VideoRTPSource.hh
+include/VP8VideoRTPSource.hh: include/MultiFramedRTPSource.hh
+VP9VideoRTPSource.$(CPP): include/VP9VideoRTPSource.hh
+include/VP9VideoRTPSource.hh: include/MultiFramedRTPSource.hh
+RawVideoRTPSource.$(CPP): include/RawVideoRTPSource.hh
+include/RawVideoRTPSource.hh: include/MultiFramedRTPSource.hh
+ByteStreamFileSource.$(CPP): include/ByteStreamFileSource.hh include/InputFile.hh
+include/ByteStreamFileSource.hh: include/FramedFileSource.hh
+ByteStreamMultiFileSource.$(CPP): include/ByteStreamMultiFileSource.hh
+include/ByteStreamMultiFileSource.hh: include/ByteStreamFileSource.hh
+ByteStreamMemoryBufferSource.$(CPP): include/ByteStreamMemoryBufferSource.hh
+include/ByteStreamMemoryBufferSource.hh: include/FramedSource.hh
+BasicUDPSource.$(CPP): include/BasicUDPSource.hh
+include/BasicUDPSource.hh: include/FramedSource.hh
+DeviceSource.$(CPP): include/DeviceSource.hh
+include/DeviceSource.hh: include/FramedSource.hh
+AudioInputDevice.$(CPP): include/AudioInputDevice.hh
+include/AudioInputDevice.hh: include/FramedSource.hh
+WAVAudioFileSource.$(CPP): include/WAVAudioFileSource.hh include/InputFile.hh
+include/WAVAudioFileSource.hh: include/AudioInputDevice.hh
+MPEG1or2Demux.$(CPP): include/MPEG1or2Demux.hh include/MPEG1or2DemuxedElementaryStream.hh StreamParser.hh
+include/MPEG1or2Demux.hh: include/FramedSource.hh
+include/MPEG1or2DemuxedElementaryStream.hh: include/MPEG1or2Demux.hh
+StreamParser.hh: include/FramedSource.hh
+MPEG1or2DemuxedElementaryStream.$(CPP): include/MPEG1or2DemuxedElementaryStream.hh
+MPEGVideoStreamFramer.$(CPP): MPEGVideoStreamParser.hh
+MPEGVideoStreamParser.hh: StreamParser.hh include/MPEGVideoStreamFramer.hh
+include/MPEGVideoStreamFramer.hh: include/FramedFilter.hh
+MPEG1or2VideoStreamFramer.$(CPP): include/MPEG1or2VideoStreamFramer.hh MPEGVideoStreamParser.hh
+include/MPEG1or2VideoStreamFramer.hh: include/MPEGVideoStreamFramer.hh
+MPEG1or2VideoStreamDiscreteFramer.$(CPP): include/MPEG1or2VideoStreamDiscreteFramer.hh
+include/MPEG1or2VideoStreamDiscreteFramer.hh: include/MPEG1or2VideoStreamFramer.hh
+MPEG4VideoStreamFramer.$(CPP): include/MPEG4VideoStreamFramer.hh MPEGVideoStreamParser.hh include/MPEG4LATMAudioRTPSource.hh
+include/MPEG4VideoStreamFramer.hh: include/MPEGVideoStreamFramer.hh
+MPEG4VideoStreamDiscreteFramer.$(CPP): include/MPEG4VideoStreamDiscreteFramer.hh
+include/MPEG4VideoStreamDiscreteFramer.hh: include/MPEG4VideoStreamFramer.hh
+H264or5VideoStreamFramer.$(CPP): include/H264or5VideoStreamFramer.hh MPEGVideoStreamParser.hh include/BitVector.hh
+include/H264or5VideoStreamFramer.hh: include/MPEGVideoStreamFramer.hh
+H264or5VideoStreamDiscreteFramer.$(CPP): include/H264or5VideoStreamDiscreteFramer.hh
+include/H264or5VideoStreamDiscreteFramer.hh: include/H264or5VideoStreamFramer.hh
+H264VideoStreamFramer.$(CPP): include/H264VideoStreamFramer.hh
+include/H264VideoStreamFramer.hh: include/H264or5VideoStreamFramer.hh
+H264VideoStreamDiscreteFramer.$(CPP): include/H264VideoStreamDiscreteFramer.hh
+include/H264VideoStreamDiscreteFramer.hh: include/H264VideoStreamFramer.hh
+H265VideoStreamFramer.$(CPP): include/H265VideoStreamFramer.hh
+include/H265VideoStreamFramer.hh: include/H264or5VideoStreamFramer.hh
+H265VideoStreamDiscreteFramer.$(CPP): include/H265VideoStreamDiscreteFramer.hh
+include/H265VideoStreamDiscreteFramer.hh: include/H265VideoStreamFramer.hh
+MPEGVideoStreamParser.$(CPP): MPEGVideoStreamParser.hh
+MPEG1or2AudioStreamFramer.$(CPP): include/MPEG1or2AudioStreamFramer.hh StreamParser.hh MP3Internals.hh
+include/MPEG1or2AudioStreamFramer.hh: include/FramedFilter.hh
+MPEG1or2AudioRTPSource.$(CPP): include/MPEG1or2AudioRTPSource.hh
+include/MPEG1or2AudioRTPSource.hh: include/MultiFramedRTPSource.hh
+MPEG4LATMAudioRTPSource.$(CPP): include/MPEG4LATMAudioRTPSource.hh
+include/MPEG4LATMAudioRTPSource.hh: include/MultiFramedRTPSource.hh
+MPEG4ESVideoRTPSource.$(CPP): include/MPEG4ESVideoRTPSource.hh
+include/MPEG4ESVideoRTPSource.hh: include/MultiFramedRTPSource.hh
+MPEG4GenericRTPSource.$(CPP): include/MPEG4GenericRTPSource.hh include/BitVector.hh include/MPEG4LATMAudioRTPSource.hh
+include/MPEG4GenericRTPSource.hh: include/MultiFramedRTPSource.hh
+MP3FileSource.$(CPP): include/MP3FileSource.hh MP3StreamState.hh include/InputFile.hh
+include/MP3FileSource.hh: include/FramedFileSource.hh
+MP3StreamState.hh: MP3Internals.hh
+MP3Internals.hh: include/BitVector.hh
+MP3Transcoder.$(CPP): include/MP3ADU.hh include/MP3Transcoder.hh
+include/MP3ADU.hh: include/FramedFilter.hh
+include/MP3Transcoder.hh: include/MP3ADU.hh include/MP3ADUTranscoder.hh
+include/MP3ADUTranscoder.hh: include/FramedFilter.hh
+MP3ADU.$(CPP): include/MP3ADU.hh MP3ADUdescriptor.hh MP3Internals.hh
+MP3ADUdescriptor.$(CPP): MP3ADUdescriptor.hh
+MP3ADUinterleaving.$(CPP): include/MP3ADUinterleaving.hh MP3ADUdescriptor.hh
+include/MP3ADUinterleaving.hh: include/FramedFilter.hh
+MP3ADUTranscoder.$(CPP): include/MP3ADUTranscoder.hh MP3Internals.hh
+MP3StreamState.$(CPP): MP3StreamState.hh include/InputFile.hh
+MP3Internals.$(CPP): MP3InternalsHuffman.hh
+MP3InternalsHuffman.hh: MP3Internals.hh
+MP3InternalsHuffman.$(CPP): MP3InternalsHuffman.hh
+MP3InternalsHuffmanTable.$(CPP): MP3InternalsHuffman.hh
+MP3ADURTPSource.$(CPP): include/MP3ADURTPSource.hh MP3ADUdescriptor.hh
+include/MP3ADURTPSource.hh: include/MultiFramedRTPSource.hh
+MPEG1or2VideoRTPSource.$(CPP): include/MPEG1or2VideoRTPSource.hh
+include/MPEG1or2VideoRTPSource.hh: include/MultiFramedRTPSource.hh
+MPEG2TransportStreamMultiplexor.$(CPP): include/MPEG2TransportStreamMultiplexor.hh
+include/MPEG2TransportStreamMultiplexor.hh: include/FramedSource.hh include/MPEG1or2Demux.hh
+MPEG2TransportStreamFromPESSource.$(CPP): include/MPEG2TransportStreamFromPESSource.hh
+include/MPEG2TransportStreamFromPESSource.hh: include/MPEG2TransportStreamMultiplexor.hh include/MPEG1or2DemuxedElementaryStream.hh
+MPEG2TransportStreamFromESSource.$(CPP): include/MPEG2TransportStreamFromESSource.hh
+include/MPEG2TransportStreamFromESSource.hh: include/MPEG2TransportStreamMultiplexor.hh
+MPEG2TransportStreamFramer.$(CPP): include/MPEG2TransportStreamFramer.hh
+include/MPEG2TransportStreamFramer.hh: include/FramedFilter.hh include/MPEG2TransportStreamIndexFile.hh
+MPEG2TransportStreamAccumulator.$(CPP): include/MPEG2TransportStreamAccumulator.hh
+include/MPEG2TransportStreamAccumulator.hh: include/FramedFilter.hh
+ADTSAudioFileSource.$(CPP): include/ADTSAudioFileSource.hh include/InputFile.hh
+JPEGVideoSource.$(CPP): include/JPEGVideoSource.hh
+include/JPEGVideoSource.hh: include/FramedSource.hh
+JPEGVideoRTPSource.$(CPP): include/JPEGVideoRTPSource.hh
+include/JPEGVideoRTPSource.hh: include/MultiFramedRTPSource.hh
+#JPEG2000VideoStreamFramer.$(CPP): include/JPEG2000VideoStreamFramer.hh JPEG2000VideoStreamParser.hh
+#include/JPEG2000VideoStreamFramer.hh: include/MPEGVideoStreamFramer.hh
+#JPEG2000VideoStreamParser.hh: StreamParser.hh include/MPEGVideoStreamFramer.hh
+#JPEG2000VideoStreamParser.$(CPP): JPEG2000VideoStreamParser.hh
+JPEG2000VideoRTPSource.$(CPP): include/JPEG2000VideoRTPSource.hh
+include/JPEG2000VideoRTPSource.hh: include/MultiFramedRTPSource.hh
+include/ADTSAudioFileSource.hh: include/FramedFileSource.hh
+H263plusVideoRTPSource.$(CPP): include/H263plusVideoRTPSource.hh
+include/H263plusVideoRTPSource.hh: include/MultiFramedRTPSource.hh
+H263plusVideoStreamFramer.$(CPP): include/H263plusVideoStreamFramer.hh H263plusVideoStreamParser.hh
+include/H263plusVideoStreamFramer.hh: include/FramedFilter.hh
+H263plusVideoStreamParser.hh: StreamParser.hh
+H263plusVideoStreamParser.$(CPP): H263plusVideoStreamParser.hh include/H263plusVideoStreamFramer.hh
+AC3AudioStreamFramer.$(CPP): include/AC3AudioStreamFramer.hh StreamParser.hh
+include/AC3AudioStreamFramer.hh: include/FramedFilter.hh
+AC3AudioRTPSource.$(CPP): include/AC3AudioRTPSource.hh
+include/AC3AudioRTPSource.hh: include/MultiFramedRTPSource.hh
+DVVideoRTPSource.$(CPP): include/DVVideoRTPSource.hh
+include/DVVideoRTPSource.hh: include/MultiFramedRTPSource.hh
+AMRAudioSource.$(CPP): include/AMRAudioSource.hh
+include/AMRAudioSource.hh: include/FramedSource.hh
+AMRAudioFileSource.$(CPP): include/AMRAudioFileSource.hh include/InputFile.hh
+include/AMRAudioFileSource.hh: include/AMRAudioSource.hh
+InputFile.$(CPP): include/InputFile.hh
+StreamReplicator.$(CPP): include/StreamReplicator.hh
+include/StreamReplicator.hh: include/FramedSource.hh
+MediaSink.$(CPP): include/MediaSink.hh
+include/MediaSink.hh: include/FramedSource.hh
+FileSink.$(CPP): include/FileSink.hh include/OutputFile.hh
+include/FileSink.hh: include/MediaSink.hh
+BasicUDPSink.$(CPP): include/BasicUDPSink.hh
+include/BasicUDPSink.hh: include/MediaSink.hh
+AMRAudioFileSink.$(CPP): include/AMRAudioFileSink.hh include/AMRAudioSource.hh include/OutputFile.hh
+include/AMRAudioFileSink.hh: include/FileSink.hh
+H264or5VideoFileSink.$(CPP): include/H264or5VideoFileSink.hh include/H264VideoRTPSource.hh
+include/H264or5VideoFileSink.hh: include/FileSink.hh
+H264VideoFileSink.$(CPP): include/H264VideoFileSink.hh include/OutputFile.hh
+include/H264VideoFileSink.hh: include/H264or5VideoFileSink.hh
+H265VideoFileSink.$(CPP): include/H265VideoFileSink.hh include/OutputFile.hh
+include/H265VideoFileSink.hh: include/H264or5VideoFileSink.hh
+OggFileSink.$(CPP): include/OggFileSink.hh include/OutputFile.hh include/VorbisAudioRTPSource.hh include/MPEG2TransportStreamMultiplexor.hh include/FramedSource.hh
+include/OggFileSink.hh: include/FileSink.hh
+RTPSink.$(CPP): include/RTPSink.hh
+include/RTPSink.hh: include/MediaSink.hh include/RTPInterface.hh
+MultiFramedRTPSink.$(CPP): include/MultiFramedRTPSink.hh
+include/MultiFramedRTPSink.hh: include/RTPSink.hh
+AudioRTPSink.$(CPP): include/AudioRTPSink.hh
+include/AudioRTPSink.hh: include/MultiFramedRTPSink.hh
+VideoRTPSink.$(CPP): include/VideoRTPSink.hh
+include/VideoRTPSink.hh: include/MultiFramedRTPSink.hh
+TextRTPSink.$(CPP): include/TextRTPSink.hh
+include/TextRTPSink.hh: include/MultiFramedRTPSink.hh
+RTPInterface.$(CPP): include/RTPInterface.hh
+MPEG1or2AudioRTPSink.$(CPP): include/MPEG1or2AudioRTPSink.hh
+include/MPEG1or2AudioRTPSink.hh: include/AudioRTPSink.hh
+MP3ADURTPSink.$(CPP): include/MP3ADURTPSink.hh
+include/MP3ADURTPSink.hh: include/AudioRTPSink.hh
+MPEG1or2VideoRTPSink.$(CPP): include/MPEG1or2VideoRTPSink.hh include/MPEG1or2VideoStreamFramer.hh
+include/MPEG1or2VideoRTPSink.hh: include/VideoRTPSink.hh
+MPEG4LATMAudioRTPSink.$(CPP): include/MPEG4LATMAudioRTPSink.hh
+include/MPEG4LATMAudioRTPSink.hh: include/AudioRTPSink.hh
+MPEG4GenericRTPSink.$(CPP): include/MPEG4GenericRTPSink.hh include/Locale.hh
+include/MPEG4GenericRTPSink.hh: include/MultiFramedRTPSink.hh
+MPEG4ESVideoRTPSink.$(CPP): include/MPEG4ESVideoRTPSink.hh include/MPEG4VideoStreamFramer.hh include/MPEG4LATMAudioRTPSource.hh
+include/MPEG4ESVideoRTPSink.hh: include/VideoRTPSink.hh
+JPEGVideoRTPSink.$(CPP): include/JPEGVideoRTPSink.hh include/JPEGVideoSource.hh
+include/JPEGVideoRTPSink.hh: include/VideoRTPSink.hh
+JPEG2000VideoRTPSink.$(CPP): include/JPEG2000VideoRTPSink.hh
+include/JPEG2000VideoRTPSink.hh: include/VideoRTPSink.hh
+H263plusVideoRTPSink.$(CPP): include/H263plusVideoRTPSink.hh
+include/H263plusVideoRTPSink.hh: include/VideoRTPSink.hh
+H264or5VideoRTPSink.$(CPP): include/H264or5VideoRTPSink.hh include/H264or5VideoStreamFramer.hh
+include/H264or5VideoRTPSink.hh: include/VideoRTPSink.hh include/FramedFilter.hh
+H264VideoRTPSink.$(CPP): include/H264VideoRTPSink.hh include/H264VideoStreamFramer.hh include/Base64.hh include/H264VideoRTPSource.hh
+include/H264VideoRTPSink.hh: include/H264or5VideoRTPSink.hh
+H265VideoRTPSink.$(CPP): include/H265VideoRTPSink.hh include/H265VideoStreamFramer.hh include/Base64.hh include/BitVector.hh include/H264VideoRTPSource.hh
+include/H265VideoRTPSink.hh: include/H264or5VideoRTPSink.hh
+DVVideoRTPSink.$(CPP): include/DVVideoRTPSink.hh
+include/DVVideoRTPSink.hh: include/VideoRTPSink.hh include/DVVideoStreamFramer.hh
+include/DVVideoStreamFramer.hh: include/FramedFilter.hh
+AC3AudioRTPSink.$(CPP): include/AC3AudioRTPSink.hh
+include/AC3AudioRTPSink.hh: include/AudioRTPSink.hh
+VorbisAudioRTPSink.$(CPP): include/VorbisAudioRTPSink.hh include/Base64.hh include/VorbisAudioRTPSource.hh
+include/VorbisAudioRTPSink.hh: include/AudioRTPSink.hh
+TheoraVideoRTPSink.$(CPP): include/TheoraVideoRTPSink.hh include/Base64.hh include/VorbisAudioRTPSource.hh include/VorbisAudioRTPSink.hh
+include/TheoraVideoRTPSink.hh: include/VideoRTPSink.hh
+RawVideoRTPSink.$(CPP): include/RawVideoRTPSink.hh
+include/RawVideoRTPSink.hh: include/VideoRTPSink.hh
+VP8VideoRTPSink.$(CPP): include/VP8VideoRTPSink.hh
+include/VP8VideoRTPSink.hh: include/VideoRTPSink.hh
+VP9VideoRTPSink.$(CPP): include/VP9VideoRTPSink.hh
+include/VP9VideoRTPSink.hh: include/VideoRTPSink.hh
+GSMAudioRTPSink.$(CPP): include/GSMAudioRTPSink.hh
+include/GSMAudioRTPSink.hh: include/AudioRTPSink.hh
+SimpleRTPSink.$(CPP): include/SimpleRTPSink.hh
+include/SimpleRTPSink.hh: include/MultiFramedRTPSink.hh
+AMRAudioRTPSink.$(CPP): include/AMRAudioRTPSink.hh include/AMRAudioSource.hh
+include/AMRAudioRTPSink.hh: include/AudioRTPSink.hh
+T140TextRTPSink.$(CPP): include/T140TextRTPSink.hh
+include/T140TextRTPSink.hh: include/TextRTPSink.hh include/FramedFilter.hh
+TCPStreamSink.$(CPP): include/TCPStreamSink.hh
+include/TCPStreamSink.hh: include/MediaSink.hh
+OutputFile.$(CPP): include/OutputFile.hh
+uLawAudioFilter.$(CPP): include/uLawAudioFilter.hh
+include/uLawAudioFilter.hh: include/FramedFilter.hh
+MPEG2IndexFromTransportStream.$(CPP): include/MPEG2IndexFromTransportStream.hh
+include/MPEG2IndexFromTransportStream.hh: include/FramedFilter.hh
+MPEG2TransportStreamIndexFile.$(CPP): include/MPEG2TransportStreamIndexFile.hh include/InputFile.hh
+include/MPEG2TransportStreamIndexFile.hh: include/Media.hh
+MPEG2TransportStreamTrickModeFilter.$(CPP): include/MPEG2TransportStreamTrickModeFilter.hh include/ByteStreamFileSource.hh
+include/MPEG2TransportStreamTrickModeFilter.hh: include/FramedFilter.hh include/MPEG2TransportStreamIndexFile.hh
+RTCP.$(CPP): include/RTCP.hh rtcp_from_spec.h
+include/RTCP.hh: include/RTPSink.hh include/RTPSource.hh include/SRTPCryptographicContext.hh
+rtcp_from_spec.$(C): rtcp_from_spec.h
+GenericMediaServer.$(CPP): include/GenericMediaServer.hh
+include/GenericMediaServer.hh: include/ServerMediaSession.hh
+RTSPServer.$(CPP): include/RTSPServer.hh include/RTSPCommon.hh include/RTSPRegisterSender.hh include/ProxyServerMediaSession.hh include/Base64.hh
+include/RTSPServer.hh: include/GenericMediaServer.hh include/DigestAuthentication.hh
+RTSPServerRegister.$(CPP): include/RTSPServer.hh
+include/ServerMediaSession.hh: include/RTCP.hh
+RTSPClient.$(CPP): include/RTSPClient.hh include/RTSPCommon.hh include/Base64.hh include/Locale.hh include/ourMD5.hh
+include/RTSPClient.hh: include/MediaSession.hh include/DigestAuthentication.hh
+RTSPCommon.$(CPP): include/RTSPCommon.hh include/Locale.hh
+RTSPServerSupportingHTTPStreaming.$(CPP): include/RTSPServerSupportingHTTPStreaming.hh include/RTSPCommon.hh
+include/RTSPServerSupportingHTTPStreaming.hh: include/RTSPServer.hh include/ByteStreamMemoryBufferSource.hh include/TCPStreamSink.hh
+RTSPRegisterSender.$(CPP): include/RTSPRegisterSender.hh
+include/RTSPRegisterSender.hh: include/RTSPClient.hh
+SIPClient.$(CPP): include/SIPClient.hh
+include/SIPClient.hh: include/MediaSession.hh include/DigestAuthentication.hh
+MediaSession.$(CPP): include/liveMedia.hh include/Locale.hh include/Base64.hh
+include/MediaSession.hh: include/RTCP.hh include/FramedFilter.hh include/SRTPCryptographicContext.hh
+ServerMediaSession.$(CPP): include/ServerMediaSession.hh
+PassiveServerMediaSubsession.$(CPP): include/PassiveServerMediaSubsession.hh
+include/PassiveServerMediaSubsession.hh: include/ServerMediaSession.hh include/RTPSink.hh include/RTCP.hh
+OnDemandServerMediaSubsession.$(CPP): include/OnDemandServerMediaSubsession.hh
+include/OnDemandServerMediaSubsession.hh: include/ServerMediaSession.hh include/RTPSink.hh include/BasicUDPSink.hh include/RTCP.hh
+FileServerMediaSubsession.$(CPP): include/FileServerMediaSubsession.hh
+include/FileServerMediaSubsession.hh: include/OnDemandServerMediaSubsession.hh
+MPEG4VideoFileServerMediaSubsession.$(CPP): include/MPEG4VideoFileServerMediaSubsession.hh include/MPEG4ESVideoRTPSink.hh include/ByteStreamFileSource.hh include/MPEG4VideoStreamFramer.hh
+include/MPEG4VideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh
+H264VideoFileServerMediaSubsession.$(CPP): include/H264VideoFileServerMediaSubsession.hh include/H264VideoRTPSink.hh include/ByteStreamFileSource.hh include/H264VideoStreamFramer.hh
+include/H264VideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh
+H265VideoFileServerMediaSubsession.$(CPP): include/H265VideoFileServerMediaSubsession.hh include/H265VideoRTPSink.hh include/ByteStreamFileSource.hh include/H265VideoStreamFramer.hh
+include/H265VideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh
+H263plusVideoFileServerMediaSubsession.$(CPP): include/H263plusVideoFileServerMediaSubsession.hh include/H263plusVideoRTPSink.hh include/ByteStreamFileSource.hh include/H263plusVideoStreamFramer.hh
+include/H263plusVideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh
+WAVAudioFileServerMediaSubsession.$(CPP): include/WAVAudioFileServerMediaSubsession.hh include/WAVAudioFileSource.hh include/uLawAudioFilter.hh include/SimpleRTPSink.hh
+include/WAVAudioFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh
+AMRAudioFileServerMediaSubsession.$(CPP): include/AMRAudioFileServerMediaSubsession.hh include/AMRAudioRTPSink.hh include/AMRAudioFileSource.hh
+include/AMRAudioFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh
+MP3AudioFileServerMediaSubsession.$(CPP): include/MP3AudioFileServerMediaSubsession.hh include/MPEG1or2AudioRTPSink.hh include/MP3ADURTPSink.hh include/MP3FileSource.hh include/MP3ADU.hh
+include/MP3AudioFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh include/MP3ADUinterleaving.hh
+MPEG1or2VideoFileServerMediaSubsession.$(CPP): include/MPEG1or2VideoFileServerMediaSubsession.hh include/MPEG1or2VideoRTPSink.hh include/ByteStreamFileSource.hh include/MPEG1or2VideoStreamFramer.hh
+include/MPEG1or2VideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh
+MPEG1or2FileServerDemux.$(CPP): include/MPEG1or2FileServerDemux.hh include/MPEG1or2DemuxedServerMediaSubsession.hh include/ByteStreamFileSource.hh
+include/MPEG1or2FileServerDemux.hh: include/ServerMediaSession.hh include/MPEG1or2DemuxedElementaryStream.hh
+MPEG1or2DemuxedServerMediaSubsession.$(CPP): include/MPEG1or2DemuxedServerMediaSubsession.hh include/MPEG1or2AudioStreamFramer.hh include/MPEG1or2AudioRTPSink.hh include/MPEG1or2VideoStreamFramer.hh include/MPEG1or2VideoRTPSink.hh include/AC3AudioStreamFramer.hh include/AC3AudioRTPSink.hh include/ByteStreamFileSource.hh
+include/MPEG1or2DemuxedServerMediaSubsession.hh: include/OnDemandServerMediaSubsession.hh include/MPEG1or2FileServerDemux.hh
+MPEG2TransportFileServerMediaSubsession.$(CPP): include/MPEG2TransportFileServerMediaSubsession.hh include/SimpleRTPSink.hh
+include/MPEG2TransportFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh include/MPEG2TransportStreamFramer.hh include/ByteStreamFileSource.hh include/MPEG2TransportStreamTrickModeFilter.hh include/MPEG2TransportStreamFromESSource.hh
+ADTSAudioFileServerMediaSubsession.$(CPP): include/ADTSAudioFileServerMediaSubsession.hh include/ADTSAudioFileSource.hh include/MPEG4GenericRTPSink.hh
+include/ADTSAudioFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh
+DVVideoFileServerMediaSubsession.$(CPP): include/DVVideoFileServerMediaSubsession.hh include/DVVideoRTPSink.hh include/ByteStreamFileSource.hh include/DVVideoStreamFramer.hh
+include/DVVideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh
+AC3AudioFileServerMediaSubsession.$(CPP): include/AC3AudioFileServerMediaSubsession.hh include/AC3AudioRTPSink.hh include/ByteStreamFileSource.hh include/AC3AudioStreamFramer.hh
+include/AC3AudioFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh
+#JPEG2000VideoFileServerMediaSubsession.$(CPP): include/JPEG2000VideoFileServerMediaSubsession.hh
+#include/JPEG2000VideoFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh
+MPEG2TransportUDPServerMediaSubsession.$(CPP): include/MPEG2TransportUDPServerMediaSubsession.hh include/BasicUDPSource.hh include/SimpleRTPSource.hh include/MPEG2TransportStreamFramer.hh include/SimpleRTPSink.hh
+include/MPEG2TransportUDPServerMediaSubsession.hh: include/OnDemandServerMediaSubsession.hh
+ProxyServerMediaSession.$(CPP): include/liveMedia.hh include/RTSPCommon.hh
+include/ProxyServerMediaSession.hh: include/ServerMediaSession.hh include/MediaSession.hh include/RTSPClient.hh include/MediaTranscodingTable.hh
+include/MediaTranscodingTable.hh: include/FramedFilter.hh include/MediaSession.hh
+QuickTimeFileSink.$(CPP): include/QuickTimeFileSink.hh include/InputFile.hh include/OutputFile.hh include/QuickTimeGenericRTPSource.hh include/H263plusVideoRTPSource.hh include/MPEG4GenericRTPSource.hh include/MPEG4LATMAudioRTPSource.hh
+include/QuickTimeFileSink.hh: include/MediaSession.hh
+QuickTimeGenericRTPSource.$(CPP): include/QuickTimeGenericRTPSource.hh
+include/QuickTimeGenericRTPSource.hh: include/MultiFramedRTPSource.hh
+AVIFileSink.$(CPP): include/AVIFileSink.hh include/InputFile.hh include/OutputFile.hh
+include/AVIFileSink.hh: include/MediaSession.hh
+MatroskaFile.$(CPP): MatroskaFileParser.hh MatroskaDemuxedTrack.hh include/ByteStreamFileSource.hh include/H264VideoStreamDiscreteFramer.hh include/H265VideoStreamDiscreteFramer.hh include/MPEG1or2AudioRTPSink.hh include/MPEG4GenericRTPSink.hh include/AC3AudioRTPSink.hh include/SimpleRTPSink.hh include/VorbisAudioRTPSink.hh include/H264VideoRTPSink.hh include/H265VideoRTPSink.hh include/VP8VideoRTPSink.hh include/VP9VideoRTPSink.hh include/T140TextRTPSink.hh include/Base64.hh include/H264VideoFileSink.hh include/H265VideoFileSink.hh include/AMRAudioFileSink.hh include/OggFileSink.hh
+MatroskaFileParser.hh: StreamParser.hh include/MatroskaFile.hh EBMLNumber.hh
+include/MatroskaFile.hh: include/RTPSink.hh include/FileSink.hh
+MatroskaDemuxedTrack.hh: include/FramedSource.hh
+MatroskaFileParser.$(CPP): MatroskaFileParser.hh MatroskaDemuxedTrack.hh include/ByteStreamFileSource.hh
+EBMLNumber.$(CPP): EBMLNumber.hh
+MatroskaDemuxedTrack.$(CPP): MatroskaDemuxedTrack.hh include/MatroskaFile.hh
+MatroskaFileServerMediaSubsession.$(CPP): MatroskaFileServerMediaSubsession.hh MatroskaDemuxedTrack.hh include/FramedFilter.hh
+MatroskaFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh include/MatroskaFileServerDemux.hh
+MP3AudioMatroskaFileServerMediaSubsession.$(CPP): MP3AudioMatroskaFileServerMediaSubsession.hh MatroskaDemuxedTrack.hh
+MP3AudioMatroskaFileServerMediaSubsession.hh: include/MP3AudioFileServerMediaSubsession.hh include/MatroskaFileServerDemux.hh
+MatroskaFileServerDemux.$(CPP): include/MatroskaFileServerDemux.hh MP3AudioMatroskaFileServerMediaSubsession.hh MatroskaFileServerMediaSubsession.hh
+include/MatroskaFileServerDemux.hh: include/ServerMediaSession.hh include/MatroskaFile.hh
+OggFile.$(CPP): OggFileParser.hh OggDemuxedTrack.hh include/ByteStreamFileSource.hh include/VorbisAudioRTPSink.hh include/SimpleRTPSink.hh include/TheoraVideoRTPSink.hh
+OggFileParser.hh: StreamParser.hh include/OggFile.hh
+include/OggFile.hh: include/RTPSink.hh
+OggDemuxedTrack.hh: include/FramedSource.hh
+OggFileParser.$(CPP): OggFileParser.hh OggDemuxedTrack.hh
+OggDemuxedTrack.$(CPP): OggDemuxedTrack.hh include/OggFile.hh
+OggFileServerMediaSubsession.$(CPP): OggFileServerMediaSubsession.hh OggDemuxedTrack.hh include/FramedFilter.hh
+OggFileServerMediaSubsession.hh: include/FileServerMediaSubsession.hh include/OggFileServerDemux.hh
+OggFileServerDemux.$(CPP): include/OggFileServerDemux.hh OggFileServerMediaSubsession.hh
+include/OggFileServerDemux.hh: include/ServerMediaSession.hh include/OggFile.hh
+MPEG2TransportStreamDemux.$(CPP): include/MPEG2TransportStreamDemux.hh MPEG2TransportStreamParser.hh
+include/MPEG2TransportStreamDemux.hh: include/FramedSource.hh
+MPEG2TransportStreamParser.hh: StreamParser.hh MPEG2TransportStreamDemuxedTrack.hh include/MediaSink.hh
+MPEG2TransportStreamDemuxedTrack.hh: include/MPEG2TransportStreamDemux.hh
+MPEG2TransportStreamDemuxedTrack.$(CPP): MPEG2TransportStreamParser.hh
+MPEG2TransportStreamParser.$(CPP): MPEG2TransportStreamParser.hh
+MPEG2TransportStreamParser_PAT.$(CPP): MPEG2TransportStreamParser.hh
+MPEG2TransportStreamParser_PMT.$(CPP): MPEG2TransportStreamParser.hh
+MPEG2TransportStreamParser_STREAM.$(CPP): MPEG2TransportStreamParser.hh include/FileSink.hh
+HLSSegmenter.$(CPP): include/HLSSegmenter.hh include/OutputFile.hh include/MPEG2TransportStreamMultiplexor.hh
+include/HLSSegmenter.hh: include/MediaSink.hh
+TLSState.$(CPP): include/TLSState.hh include/RTSPClient.hh
+MIKEY.$(CPP): include/MIKEY.hh
+HMAC_SHA1.$(CPP): include/HMAC_SHA1.hh
+include/SRTPCryptographicContext.hh: include/MIKEY.hh
+SRTPCryptographicContext.$(CPP): include/SRTPCryptographicContext.hh include/HMAC_SHA1.hh
+include/HMAC_SHA1.hh: include/HMAC_hash.hh
+BitVector.$(CPP): include/BitVector.hh
+StreamParser.$(CPP): StreamParser.hh
+DigestAuthentication.$(CPP): include/DigestAuthentication.hh include/ourMD5.hh
+ourMD5.$(CPP): include/ourMD5.hh
+Base64.$(CPP): include/Base64.hh
+Locale.$(CPP): include/Locale.hh
+
+include/liveMedia.hh:: include/JPEG2000VideoRTPSource.hh include/JPEG2000VideoRTPSink.hh
+#include/liveMedia.hh:: include/JPEG2000VideoStreamFramer.hh include/JPEG2000VideoFileServerMediaSubsession.hh
+
+include/liveMedia.hh:: include/MPEG1or2AudioRTPSink.hh include/MP3ADURTPSink.hh include/MPEG1or2VideoRTPSink.hh include/MPEG4ESVideoRTPSink.hh include/BasicUDPSink.hh include/AMRAudioFileSink.hh include/H264VideoFileSink.hh include/H265VideoFileSink.hh include/OggFileSink.hh include/GSMAudioRTPSink.hh include/H263plusVideoRTPSink.hh include/H264VideoRTPSink.hh include/H265VideoRTPSink.hh include/DVVideoRTPSource.hh include/DVVideoRTPSink.hh include/DVVideoStreamFramer.hh include/H264VideoStreamFramer.hh include/H265VideoStreamFramer.hh include/H264VideoStreamDiscreteFramer.hh include/H265VideoStreamDiscreteFramer.hh include/JPEGVideoRTPSink.hh include/SimpleRTPSink.hh include/uLawAudioFilter.hh include/MPEG2IndexFromTransportStream.hh include/MPEG2TransportStreamTrickModeFilter.hh include/ByteStreamMultiFileSource.hh include/ByteStreamMemoryBufferSource.hh include/BasicUDPSource.hh include/SimpleRTPSource.hh include/MPEG1or2AudioRTPSource.hh include/MPEG4LATMAudioRTPSource.hh include/MPEG4LATMAudioRTPSink.hh include/MPEG4ESVideoRTPSource.hh include/MPEG4GenericRTPSource.hh include/MP3ADURTPSource.hh include/QCELPAudioRTPSource.hh include/AMRAudioRTPSource.hh include/JPEGVideoRTPSource.hh include/JPEGVideoSource.hh include/MPEG1or2VideoRTPSource.hh include/VorbisAudioRTPSource.hh include/TheoraVideoRTPSource.hh include/VP8VideoRTPSource.hh include/VP9VideoRTPSource.hh include/RawVideoRTPSource.hh
+
+include/liveMedia.hh:: include/MPEG2TransportStreamFromPESSource.hh include/MPEG2TransportStreamFromESSource.hh include/MPEG2TransportStreamFramer.hh include/ADTSAudioFileSource.hh include/H261VideoRTPSource.hh include/H263plusVideoRTPSource.hh include/H264VideoRTPSource.hh include/H265VideoRTPSource.hh include/MP3FileSource.hh include/MP3ADU.hh include/MP3ADUinterleaving.hh include/MP3Transcoder.hh include/MPEG1or2DemuxedElementaryStream.hh include/MPEG1or2AudioStreamFramer.hh include/MPEG1or2VideoStreamDiscreteFramer.hh include/MPEG4VideoStreamDiscreteFramer.hh include/H263plusVideoStreamFramer.hh include/AC3AudioStreamFramer.hh include/AC3AudioRTPSource.hh include/AC3AudioRTPSink.hh include/VorbisAudioRTPSink.hh include/TheoraVideoRTPSink.hh include/VP8VideoRTPSink.hh include/VP9VideoRTPSink.hh include/MPEG4GenericRTPSink.hh include/DeviceSource.hh include/AudioInputDevice.hh include/WAVAudioFileSource.hh include/StreamReplicator.hh include/RTSPRegisterSender.hh
+
+include/liveMedia.hh:: include/RTSPServerSupportingHTTPStreaming.hh include/RTSPClient.hh include/SIPClient.hh include/QuickTimeFileSink.hh include/QuickTimeGenericRTPSource.hh include/AVIFileSink.hh include/PassiveServerMediaSubsession.hh include/MPEG4VideoFileServerMediaSubsession.hh include/H264VideoFileServerMediaSubsession.hh include/H265VideoFileServerMediaSubsession.hh include/WAVAudioFileServerMediaSubsession.hh include/AMRAudioFileServerMediaSubsession.hh include/AMRAudioFileSource.hh include/AMRAudioRTPSink.hh include/T140TextRTPSink.hh include/TCPStreamSink.hh include/MP3AudioFileServerMediaSubsession.hh include/MPEG1or2VideoFileServerMediaSubsession.hh include/MPEG1or2FileServerDemux.hh include/MPEG2TransportFileServerMediaSubsession.hh include/H263plusVideoFileServerMediaSubsession.hh include/ADTSAudioFileServerMediaSubsession.hh include/DVVideoFileServerMediaSubsession.hh include/AC3AudioFileServerMediaSubsession.hh include/MPEG2TransportUDPServerMediaSubsession.hh include/MatroskaFileServerDemux.hh include/OggFileServerDemux.hh include/ProxyServerMediaSession.hh include/HLSSegmenter.hh
+
+clean:
+ -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~
+
+install: install1 $(INSTALL2)
+install1: $(LIVEMEDIA_LIB)
+ install -d $(DESTDIR)$(PREFIX)/include/liveMedia $(DESTDIR)$(LIBDIR)
+ install -m 644 include/*.hh $(DESTDIR)$(PREFIX)/include/liveMedia
+ install -m 644 $(LIVEMEDIA_LIB) $(DESTDIR)$(LIBDIR)
+install_shared_libraries: $(LIVEMEDIA_LIB)
+ ln -fs $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).$(SHORT_LIB_SUFFIX)
+ ln -fs $(NAME).$(LIB_SUFFIX) $(DESTDIR)$(LIBDIR)/$(NAME).so
+
+##### Any additional, platform-specific rules come here:
diff --git a/liveMedia/MatroskaDemuxedTrack.cpp b/liveMedia/MatroskaDemuxedTrack.cpp
new file mode 100644
index 0000000..7e40edf
--- /dev/null
+++ b/liveMedia/MatroskaDemuxedTrack.cpp
@@ -0,0 +1,47 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A media track, demultiplexed from a Matroska file
+// Implementation
+
+#include "MatroskaDemuxedTrack.hh"
+#include "MatroskaFile.hh"
+
+void MatroskaDemuxedTrack::seekToTime(double& seekNPT) {
+ fOurSourceDemux.seekToTime(seekNPT);
+}
+
+MatroskaDemuxedTrack::MatroskaDemuxedTrack(UsageEnvironment& env, unsigned trackNumber, MatroskaDemux& sourceDemux)
+ : FramedSource(env),
+ fOurTrackNumber(trackNumber), fOurSourceDemux(sourceDemux), fDurationImbalance(0),
+ fOpusTrackNumber(0) {
+ fPrevPresentationTime.tv_sec = 0; fPrevPresentationTime.tv_usec = 0;
+}
+
+MatroskaDemuxedTrack::~MatroskaDemuxedTrack() {
+ fOurSourceDemux.removeTrack(fOurTrackNumber);
+}
+
+void MatroskaDemuxedTrack::doGetNextFrame() {
+ fOurSourceDemux.continueReading();
+}
+
+char const* MatroskaDemuxedTrack::MIMEtype() const {
+ MatroskaTrack* track = fOurSourceDemux.fOurFile.lookup(fOurTrackNumber);
+ if (track == NULL) return "(unknown)"; // shouldn't happen
+ return track->mimeType;
+}
diff --git a/liveMedia/MatroskaDemuxedTrack.hh b/liveMedia/MatroskaDemuxedTrack.hh
new file mode 100644
index 0000000..c94b3a5
--- /dev/null
+++ b/liveMedia/MatroskaDemuxedTrack.hh
@@ -0,0 +1,64 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A media track, demultiplexed from a Matroska file
+// C++ header
+
+#ifndef _MATROSKA_DEMUXED_TRACK_HH
+#define _MATROSKA_DEMUXED_TRACK_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class MatroskaDemux; // forward
+
+class MatroskaDemuxedTrack: public FramedSource {
+public:
+ void seekToTime(double& seekNPT);
+
+private: // We are created only by a MatroskaDemux (a friend)
+ friend class MatroskaDemux;
+ MatroskaDemuxedTrack(UsageEnvironment& env, unsigned trackNumber, MatroskaDemux& sourceDemux);
+ virtual ~MatroskaDemuxedTrack();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual char const* MIMEtype() const;
+
+private: // We are accessed only by MatroskaDemux and by MatroskaFileParser (a friend)
+ friend class MatroskaFileParser;
+ unsigned char* to() { return fTo; }
+ unsigned maxSize() { return fMaxSize; }
+ unsigned& frameSize() { return fFrameSize; }
+ unsigned& numTruncatedBytes() { return fNumTruncatedBytes; }
+ struct timeval& presentationTime() { return fPresentationTime; }
+ unsigned& durationInMicroseconds() { return fDurationInMicroseconds; }
+
+ struct timeval& prevPresentationTime() { return fPrevPresentationTime; }
+ int& durationImbalance() { return fDurationImbalance; }
+
+private:
+ unsigned fOurTrackNumber;
+ MatroskaDemux& fOurSourceDemux;
+ struct timeval fPrevPresentationTime;
+ int fDurationImbalance;
+ unsigned fOpusTrackNumber; // hack for Opus audio
+};
+
+#endif
diff --git a/liveMedia/MatroskaFile.cpp b/liveMedia/MatroskaFile.cpp
new file mode 100644
index 0000000..9e046fd
--- /dev/null
+++ b/liveMedia/MatroskaFile.cpp
@@ -0,0 +1,1045 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class that encapsulates a Matroska file.
+// Implementation
+
+#include "MatroskaFileParser.hh"
+#include "MatroskaDemuxedTrack.hh"
+#include <ByteStreamFileSource.hh>
+#include <H264VideoStreamDiscreteFramer.hh>
+#include <H265VideoStreamDiscreteFramer.hh>
+#include <MPEG1or2AudioRTPSink.hh>
+#include <MPEG4GenericRTPSink.hh>
+#include <AC3AudioRTPSink.hh>
+#include <SimpleRTPSink.hh>
+#include <VorbisAudioRTPSink.hh>
+#include <H264VideoRTPSink.hh>
+#include <H265VideoRTPSink.hh>
+#include <VP8VideoRTPSink.hh>
+#include <VP9VideoRTPSink.hh>
+#include <TheoraVideoRTPSink.hh>
+#include <RawVideoRTPSink.hh>
+#include <T140TextRTPSink.hh>
+#include <Base64.hh>
+#include <H264VideoFileSink.hh>
+#include <H265VideoFileSink.hh>
+#include <AMRAudioFileSink.hh>
+#include <OggFileSink.hh>
+
+////////// CuePoint definition //////////
+
+class CuePoint {
+public:
+ CuePoint(double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster/* 1-based */);
+ virtual ~CuePoint();
+
+ static void addCuePoint(CuePoint*& root, double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster/* 1-based */,
+ Boolean& needToReviseBalanceOfParent);
+ // If "cueTime" == "root.fCueTime", replace the existing data, otherwise add to the left or right subtree.
+ // (Note that this is a static member function because - as a result of tree rotation - "root" might change.)
+
+ Boolean lookup(double& cueTime, u_int64_t& resultClusterOffsetInFile, unsigned& resultBlockNumWithinCluster);
+
+ static void fprintf(FILE* fid, CuePoint* cuePoint); // used for debugging; it's static to allow for "cuePoint == NULL"
+
+private:
+ // The "CuePoint" tree is implemented as an AVL Tree, to keep it balanced (for efficient lookup).
+ CuePoint* fSubTree[2]; // 0 => left; 1 => right
+ CuePoint* left() const { return fSubTree[0]; }
+ CuePoint* right() const { return fSubTree[1]; }
+ char fBalance; // height of right subtree - height of left subtree
+
+ static void rotate(unsigned direction/*0 => left; 1 => right*/, CuePoint*& root); // used to keep the tree in balance
+
+ double fCueTime;
+ u_int64_t fClusterOffsetInFile;
+ unsigned fBlockNumWithinCluster; // 0-based
+};
+
+UsageEnvironment& operator<<(UsageEnvironment& env, const CuePoint* cuePoint); // used for debugging
+
+
+////////// MatroskaTrackTable definition /////////
+
+// For looking up and iterating over the file's tracks:
+class MatroskaTrackTable {
+public:
+ MatroskaTrackTable();
+ virtual ~MatroskaTrackTable();
+
+ void add(MatroskaTrack* newTrack, unsigned trackNumber);
+ MatroskaTrack* lookup(unsigned trackNumber);
+
+ unsigned numTracks() const;
+
+ class Iterator {
+ public:
+ Iterator(MatroskaTrackTable& ourTable);
+ virtual ~Iterator();
+ MatroskaTrack* next();
+ private:
+ HashTable::Iterator* fIter;
+ };
+
+private:
+ friend class Iterator;
+ HashTable* fTable;
+};
+
+
+
+////////// MatroskaFile implementation //////////
+
+void MatroskaFile
+::createNew(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData,
+ char const* preferredLanguage) {
+ new MatroskaFile(env, fileName, onCreation, onCreationClientData, preferredLanguage);
+}
+
+MatroskaFile::MatroskaFile(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData,
+ char const* preferredLanguage)
+ : Medium(env),
+ fFileName(strDup(fileName)), fOnCreation(onCreation), fOnCreationClientData(onCreationClientData),
+ fPreferredLanguage(strDup(preferredLanguage)),
+ fTimecodeScale(1000000), fSegmentDuration(0.0), fSegmentDataOffset(0), fClusterOffset(0), fCuesOffset(0), fCuePoints(NULL),
+ fChosenVideoTrackNumber(0), fChosenAudioTrackNumber(0), fChosenSubtitleTrackNumber(0) {
+ fTrackTable = new MatroskaTrackTable;
+ fDemuxesTable = HashTable::create(ONE_WORD_HASH_KEYS);
+
+ FramedSource* inputSource = ByteStreamFileSource::createNew(envir(), fileName);
+ if (inputSource == NULL) {
+ // The specified input file does not exist!
+ fParserForInitialization = NULL;
+ handleEndOfTrackHeaderParsing(); // we have no file, and thus no tracks, but we still need to signal this
+ } else {
+ // Initialize ourselves by parsing the file's 'Track' headers:
+ fParserForInitialization = new MatroskaFileParser(*this, inputSource, handleEndOfTrackHeaderParsing, this, NULL);
+ }
+}
+
+MatroskaFile::~MatroskaFile() {
+ delete fParserForInitialization;
+ delete fCuePoints;
+
+ // Delete any outstanding "MatroskaDemux"s, and the table for them:
+ MatroskaDemux* demux;
+ while ((demux = (MatroskaDemux*)fDemuxesTable->RemoveNext()) != NULL) {
+ delete demux;
+ }
+ delete fDemuxesTable;
+ delete fTrackTable;
+
+ delete[] (char*)fPreferredLanguage;
+ delete[] (char*)fFileName;
+}
+
+void MatroskaFile::handleEndOfTrackHeaderParsing(void* clientData) {
+ ((MatroskaFile*)clientData)->handleEndOfTrackHeaderParsing();
+}
+
+class TrackChoiceRecord {
+public:
+ unsigned trackNumber;
+ u_int8_t trackType;
+ unsigned choiceFlags;
+};
+
+void MatroskaFile::handleEndOfTrackHeaderParsing() {
+ // Having parsed all of our track headers, iterate through the tracks to figure out which ones should be played.
+ // The Matroska 'specification' is rather imprecise about this (as usual). However, we use the following algorithm:
+ // - Use one (but no more) enabled track of each type (video, audio, subtitle). (Ignore all tracks that are not 'enabled'.)
+ // - For each track type, choose the one that's 'forced'.
+ // - If more than one is 'forced', choose the first one that matches our preferred language, or the first if none matches.
+ // - If none is 'forced', choose the one that's 'default'.
+ // - If more than one is 'default', choose the first one that matches our preferred language, or the first if none matches.
+ // - If none is 'default', choose the first one that matches our preferred language, or the first if none matches.
+ unsigned numTracks = fTrackTable->numTracks();
+ if (numTracks > 0) {
+ TrackChoiceRecord* trackChoice = new TrackChoiceRecord[numTracks];
+ unsigned numEnabledTracks = 0;
+ MatroskaTrackTable::Iterator iter(*fTrackTable);
+ MatroskaTrack* track;
+ while ((track = iter.next()) != NULL) {
+ if (!track->isEnabled || track->trackType == 0 || track->mimeType[0] == '\0') continue; // track not enabled, or not fully-defined
+
+ trackChoice[numEnabledTracks].trackNumber = track->trackNumber;
+ trackChoice[numEnabledTracks].trackType = track->trackType;
+
+ // Assign flags for this track so that, when sorted, the largest value becomes our choice:
+ unsigned choiceFlags = 0;
+ if (fPreferredLanguage != NULL && track->language != NULL && strcmp(fPreferredLanguage, track->language) == 0) {
+ // This track matches our preferred language:
+ choiceFlags |= 1;
+ }
+ if (track->isForced) {
+ choiceFlags |= 4;
+ } else if (track->isDefault) {
+ choiceFlags |= 2;
+ }
+ trackChoice[numEnabledTracks].choiceFlags = choiceFlags;
+
+ ++numEnabledTracks;
+ }
+
+ // Choose the desired track for each track type:
+ for (u_int8_t trackType = 0x01; trackType != MATROSKA_TRACK_TYPE_OTHER; trackType <<= 1) {
+ int bestNum = -1;
+ int bestChoiceFlags = -1;
+ for (unsigned i = 0; i < numEnabledTracks; ++i) {
+ if (trackChoice[i].trackType == trackType && (int)trackChoice[i].choiceFlags > bestChoiceFlags) {
+ bestNum = i;
+ bestChoiceFlags = (int)trackChoice[i].choiceFlags;
+ }
+ }
+ if (bestChoiceFlags >= 0) { // There is a track for this track type
+ if (trackType == MATROSKA_TRACK_TYPE_VIDEO) fChosenVideoTrackNumber = trackChoice[bestNum].trackNumber;
+ else if (trackType == MATROSKA_TRACK_TYPE_AUDIO) fChosenAudioTrackNumber = trackChoice[bestNum].trackNumber;
+ else fChosenSubtitleTrackNumber = trackChoice[bestNum].trackNumber;
+ }
+ }
+
+ delete[] trackChoice;
+ }
+
+#ifdef DEBUG
+ if (fChosenVideoTrackNumber > 0) fprintf(stderr, "Chosen video track: #%d\n", fChosenVideoTrackNumber); else fprintf(stderr, "No chosen video track\n");
+ if (fChosenAudioTrackNumber > 0) fprintf(stderr, "Chosen audio track: #%d\n", fChosenAudioTrackNumber); else fprintf(stderr, "No chosen audio track\n");
+ if (fChosenSubtitleTrackNumber > 0) fprintf(stderr, "Chosen subtitle track: #%d\n", fChosenSubtitleTrackNumber); else fprintf(stderr, "No chosen subtitle track\n");
+#endif
+
+ // Delete our parser, because it's done its job now:
+ delete fParserForInitialization; fParserForInitialization = NULL;
+
+ // Finally, signal our caller that we've been created and initialized:
+ if (fOnCreation != NULL) (*fOnCreation)(this, fOnCreationClientData);
+}
+
+MatroskaTrack* MatroskaFile::lookup(unsigned trackNumber) const {
+ return fTrackTable->lookup(trackNumber);
+}
+
+MatroskaDemux* MatroskaFile::newDemux() {
+ MatroskaDemux* demux = new MatroskaDemux(*this);
+ fDemuxesTable->Add((char const*)demux, demux);
+
+ return demux;
+}
+
+void MatroskaFile::removeDemux(MatroskaDemux* demux) {
+ fDemuxesTable->Remove((char const*)demux);
+}
+
+#define getPrivByte(b) if (n == 0) break; else do {b = *p++; --n;} while (0) /* Vorbis/Theora configuration header parsing */
+#define CHECK_PTR if (ptr >= limit) break /* H.264/H.265 parsing */
+#define NUM_BYTES_REMAINING (unsigned)(limit - ptr) /* H.264/H.265 parsing */
+
+void MatroskaFile::getH264ConfigData(MatroskaTrack const* track,
+ u_int8_t*& sps, unsigned& spsSize,
+ u_int8_t*& pps, unsigned& ppsSize) {
+ sps = pps = NULL;
+ spsSize = ppsSize = 0;
+
+ do {
+ if (track == NULL) break;
+
+ // Use our track's 'Codec Private' data: Bytes 5 and beyond contain SPS and PPSs:
+ if (track->codecPrivateSize < 6) break;
+ u_int8_t* SPSandPPSBytes = &track->codecPrivate[5];
+ unsigned numSPSandPPSBytes = track->codecPrivateSize - 5;
+
+ // Extract, from "SPSandPPSBytes", one SPS NAL unit, and one PPS NAL unit.
+ // (I hope one is all we need of each.)
+ unsigned i;
+ u_int8_t* ptr = SPSandPPSBytes;
+ u_int8_t* limit = &SPSandPPSBytes[numSPSandPPSBytes];
+
+ unsigned numSPSs = (*ptr++)&0x1F; CHECK_PTR;
+ for (i = 0; i < numSPSs; ++i) {
+ unsigned spsSize1 = (*ptr++)<<8; CHECK_PTR;
+ spsSize1 |= *ptr++; CHECK_PTR;
+
+ if (spsSize1 > NUM_BYTES_REMAINING) break;
+ u_int8_t nal_unit_type = ptr[0]&0x1F;
+ if (sps == NULL && nal_unit_type == 7/*sanity check*/) { // save the first one
+ spsSize = spsSize1;
+ sps = new u_int8_t[spsSize];
+ memmove(sps, ptr, spsSize);
+ }
+ ptr += spsSize1;
+ }
+
+ unsigned numPPSs = (*ptr++)&0x1F; CHECK_PTR;
+ for (i = 0; i < numPPSs; ++i) {
+ unsigned ppsSize1 = (*ptr++)<<8; CHECK_PTR;
+ ppsSize1 |= *ptr++; CHECK_PTR;
+
+ if (ppsSize1 > NUM_BYTES_REMAINING) break;
+ u_int8_t nal_unit_type = ptr[0]&0x1F;
+ if (pps == NULL && nal_unit_type == 8/*sanity check*/) { // save the first one
+ ppsSize = ppsSize1;
+ pps = new u_int8_t[ppsSize];
+ memmove(pps, ptr, ppsSize);
+ }
+ ptr += ppsSize1;
+ }
+
+ return;
+ } while (0);
+
+ // An error occurred:
+ delete[] sps; sps = NULL; spsSize = 0;
+ delete[] pps; pps = NULL; ppsSize = 0;
+}
+
+void MatroskaFile::getH265ConfigData(MatroskaTrack const* track,
+ u_int8_t*& vps, unsigned& vpsSize,
+ u_int8_t*& sps, unsigned& spsSize,
+ u_int8_t*& pps, unsigned& ppsSize) {
+ vps = sps = pps = NULL;
+ vpsSize = spsSize = ppsSize = 0;
+
+ do {
+ if (track == NULL) break;
+
+ u_int8_t* VPS_SPS_PPSBytes = NULL; unsigned numVPS_SPS_PPSBytes = 0;
+ unsigned i;
+
+ if (track->codecPrivateUsesH264FormatForH265) {
+ // The data uses the H.264-style format (but including VPS NAL unit(s)).
+ // The VPS,SPS,PPS NAL unit information starts at byte #5:
+ if (track->codecPrivateSize >= 6) {
+ numVPS_SPS_PPSBytes = track->codecPrivateSize - 5;
+ VPS_SPS_PPSBytes = &track->codecPrivate[5];
+ }
+ } else {
+ // The data uses the proper H.265-style format.
+ // The VPS,SPS,PPS NAL unit information starts at byte #22:
+ if (track->codecPrivateSize >= 23) {
+ numVPS_SPS_PPSBytes = track->codecPrivateSize - 22;
+ VPS_SPS_PPSBytes = &track->codecPrivate[22];
+ }
+ }
+ if (VPS_SPS_PPSBytes == NULL) break; // no VPS,SPS,PPS NAL unit information was present
+
+ // Extract, from "VPS_SPS_PPSBytes", one VPS NAL unit, one SPS NAL unit, and one PPS NAL unit.
+ // (I hope one is all we need of each.)
+ u_int8_t* ptr = VPS_SPS_PPSBytes;
+ u_int8_t* limit = &VPS_SPS_PPSBytes[numVPS_SPS_PPSBytes];
+
+ if (track->codecPrivateUsesH264FormatForH265) {
+ // The data uses the H.264-style format (but including VPS NAL unit(s)).
+ while (NUM_BYTES_REMAINING > 0) {
+ unsigned numNALUnits = (*ptr++)&0x1F; CHECK_PTR;
+ for (i = 0; i < numNALUnits; ++i) {
+ unsigned nalUnitLength = (*ptr++)<<8; CHECK_PTR;
+ nalUnitLength |= *ptr++; CHECK_PTR;
+
+ if (nalUnitLength > NUM_BYTES_REMAINING) break;
+ u_int8_t nal_unit_type = (ptr[0]&0x7E)>>1;
+ if (nal_unit_type == 32) { // VPS
+ vpsSize = nalUnitLength;
+ delete[] vps; vps = new u_int8_t[nalUnitLength];
+ memmove(vps, ptr, nalUnitLength);
+ } else if (nal_unit_type == 33) { // SPS
+ spsSize = nalUnitLength;
+ delete[] sps; sps = new u_int8_t[nalUnitLength];
+ memmove(sps, ptr, nalUnitLength);
+ } else if (nal_unit_type == 34) { // PPS
+ ppsSize = nalUnitLength;
+ delete[] pps; pps = new u_int8_t[nalUnitLength];
+ memmove(pps, ptr, nalUnitLength);
+ }
+ ptr += nalUnitLength;
+ }
+ }
+ } else {
+ // The data uses the proper H.265-style format.
+ unsigned numOfArrays = *ptr++; CHECK_PTR;
+ for (unsigned j = 0; j < numOfArrays; ++j) {
+ ++ptr; CHECK_PTR; // skip the 'array_completeness'|'reserved'|'NAL_unit_type' byte
+
+ unsigned numNalus = (*ptr++)<<8; CHECK_PTR;
+ numNalus |= *ptr++; CHECK_PTR;
+
+ for (i = 0; i < numNalus; ++i) {
+ unsigned nalUnitLength = (*ptr++)<<8; CHECK_PTR;
+ nalUnitLength |= *ptr++; CHECK_PTR;
+
+ if (nalUnitLength > NUM_BYTES_REMAINING) break;
+ u_int8_t nal_unit_type = (ptr[0]&0x7E)>>1;
+ if (nal_unit_type == 32) { // VPS
+ vpsSize = nalUnitLength;
+ delete[] vps; vps = new u_int8_t[nalUnitLength];
+ memmove(vps, ptr, nalUnitLength);
+ } else if (nal_unit_type == 33) { // SPS
+ spsSize = nalUnitLength;
+ delete[] sps; sps = new u_int8_t[nalUnitLength];
+ memmove(sps, ptr, nalUnitLength);
+ } else if (nal_unit_type == 34) { // PPS
+ ppsSize = nalUnitLength;
+ delete[] pps; pps = new u_int8_t[nalUnitLength];
+ memmove(pps, ptr, nalUnitLength);
+ }
+ ptr += nalUnitLength;
+ }
+ }
+ }
+
+ return;
+ } while (0);
+
+ // An error occurred:
+ delete[] vps; vps = NULL; vpsSize = 0;
+ delete[] sps; sps = NULL; spsSize = 0;
+ delete[] pps; pps = NULL; ppsSize = 0;
+}
+
+void MatroskaFile
+::getVorbisOrTheoraConfigData(MatroskaTrack const* track,
+ u_int8_t*& identificationHeader, unsigned& identificationHeaderSize,
+ u_int8_t*& commentHeader, unsigned& commentHeaderSize,
+ u_int8_t*& setupHeader, unsigned& setupHeaderSize) {
+ identificationHeader = commentHeader = setupHeader = NULL;
+ identificationHeaderSize = commentHeaderSize = setupHeaderSize = 0;
+
+ do {
+ if (track == NULL) break;
+
+ // The Matroska file's 'Codec Private' data is assumed to be the codec configuration
+ // information, containing the "Identification", "Comment", and "Setup" headers.
+ // Extract these headers now:
+ Boolean isTheora = strcmp(track->mimeType, "video/THEORA") == 0; // otherwise, Vorbis
+ u_int8_t* p = track->codecPrivate;
+ unsigned n = track->codecPrivateSize;
+ if (n == 0 || p == NULL) break; // we have no 'Codec Private' data
+
+ u_int8_t numHeaders;
+ getPrivByte(numHeaders);
+ unsigned headerSize[3]; // we don't handle any more than 2+1 headers
+
+ // Extract the sizes of each of these headers:
+ unsigned sizesSum = 0;
+ Boolean success = True;
+ unsigned i;
+ for (i = 0; i < numHeaders && i < 3; ++i) {
+ unsigned len = 0;
+ u_int8_t c;
+
+ do {
+ success = False;
+ getPrivByte(c);
+ success = True;
+
+ len += c;
+ } while (c == 255);
+ if (!success || len == 0) break;
+
+ headerSize[i] = len;
+ sizesSum += len;
+ }
+ if (!success) break;
+
+ // Compute the implicit size of the final header:
+ if (numHeaders < 3) {
+ int finalHeaderSize = n - sizesSum;
+ if (finalHeaderSize <= 0) break; // error in data; give up
+
+ headerSize[numHeaders] = (unsigned)finalHeaderSize;
+ ++numHeaders; // include the final header now
+ } else {
+ numHeaders = 3; // The maximum number of headers that we handle
+ }
+
+ // Then, extract and classify each header:
+ for (i = 0; i < numHeaders; ++i) {
+ success = False;
+ unsigned newHeaderSize = headerSize[i];
+ u_int8_t* newHeader = new u_int8_t[newHeaderSize];
+ if (newHeader == NULL) break;
+
+ u_int8_t* hdr = newHeader;
+ while (newHeaderSize-- > 0) {
+ success = False;
+ getPrivByte(*hdr++);
+ success = True;
+ }
+ if (!success) {
+ delete[] newHeader;
+ break;
+ }
+
+ u_int8_t headerType = newHeader[0];
+ if (headerType == 1 || (isTheora && headerType == 0x80)) { // "identification" header
+ delete[] identificationHeader; identificationHeader = newHeader;
+ identificationHeaderSize = headerSize[i];
+ } else if (headerType == 3 || (isTheora && headerType == 0x81)) { // "comment" header
+ delete[] commentHeader; commentHeader = newHeader;
+ commentHeaderSize = headerSize[i];
+ } else if (headerType == 5 || (isTheora && headerType == 0x82)) { // "setup" header
+ delete[] setupHeader; setupHeader = newHeader;
+ setupHeaderSize = headerSize[i];
+ } else {
+ delete[] newHeader; // because it was a header type that we don't understand
+ }
+ }
+ if (!success) break;
+
+ return;
+ } while (0);
+
+ // An error occurred:
+ delete[] identificationHeader; identificationHeader = NULL; identificationHeaderSize = 0;
+ delete[] commentHeader; commentHeader = NULL; commentHeaderSize = 0;
+ delete[] setupHeader; setupHeader = NULL; setupHeaderSize = 0;
+}
+
+float MatroskaFile::fileDuration() {
+ if (fCuePoints == NULL) return 0.0; // Hack, because the RTSP server code assumes that duration > 0 => seekable. (fix this) #####
+
+ return segmentDuration()*(timecodeScale()/1000000000.0f);
+}
+
+// The size of the largest key frame that we expect. This determines our buffer sizes:
+#define MAX_KEY_FRAME_SIZE 300000
+
+FramedSource* MatroskaFile
+::createSourceForStreaming(FramedSource* baseSource, unsigned trackNumber,
+ unsigned& estBitrate, unsigned& numFiltersInFrontOfTrack) {
+ if (baseSource == NULL) return NULL;
+
+ FramedSource* result = baseSource; // by default
+ estBitrate = 100; // by default
+ numFiltersInFrontOfTrack = 0; // by default
+
+ // Look at the track's MIME type to set its estimated bitrate (for use by RTCP).
+ // (Later, try to be smarter about figuring out the bitrate.) #####
+ // Some MIME types also require adding a special 'framer' in front of the source.
+ MatroskaTrack* track = lookup(trackNumber);
+ if (track != NULL) { // should always be true
+ if (strcmp(track->mimeType, "audio/MPEG") == 0) {
+ estBitrate = 128;
+ } else if (strcmp(track->mimeType, "audio/AAC") == 0) {
+ estBitrate = 96;
+ } else if (strcmp(track->mimeType, "audio/AC3") == 0) {
+ estBitrate = 48;
+ } else if (strcmp(track->mimeType, "audio/VORBIS") == 0) {
+ estBitrate = 96;
+ } else if (strcmp(track->mimeType, "video/H264") == 0) {
+ estBitrate = 500;
+ // Allow for the possibility of very large NAL units being fed to the sink object:
+ OutPacketBuffer::increaseMaxSizeTo(MAX_KEY_FRAME_SIZE); // bytes
+
+ // Add a framer in front of the source:
+ result = H264VideoStreamDiscreteFramer::createNew(envir(), result);
+ ++numFiltersInFrontOfTrack;
+ } else if (strcmp(track->mimeType, "video/H265") == 0) {
+ estBitrate = 500;
+ // Allow for the possibility of very large NAL units being fed to the sink object:
+ OutPacketBuffer::increaseMaxSizeTo(MAX_KEY_FRAME_SIZE); // bytes
+
+ // Add a framer in front of the source:
+ result = H265VideoStreamDiscreteFramer::createNew(envir(), result);
+ ++numFiltersInFrontOfTrack;
+ } else if (strcmp(track->mimeType, "video/VP8") == 0) {
+ estBitrate = 500;
+ } else if (strcmp(track->mimeType, "video/VP9") == 0) {
+ estBitrate = 500;
+ } else if (strcmp(track->mimeType, "video/THEORA") == 0) {
+ estBitrate = 500;
+ } else if (strcmp(track->mimeType, "text/T140") == 0) {
+ estBitrate = 48;
+ }
+ }
+
+ return result;
+}
+
+char const* MatroskaFile::trackMIMEType(unsigned trackNumber) const {
+ MatroskaTrack* track = lookup(trackNumber);
+ if (track == NULL) return NULL;
+
+ return track->mimeType;
+}
+
+RTPSink* MatroskaFile
+::createRTPSinkForTrackNumber(unsigned trackNumber, Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic) {
+ RTPSink* result = NULL; // default value, if an error occurs
+
+ do {
+ MatroskaTrack* track = lookup(trackNumber);
+ if (track == NULL) break;
+
+ if (strcmp(track->mimeType, "audio/L16") == 0) {
+ result = SimpleRTPSink::createNew(envir(), rtpGroupsock,rtpPayloadTypeIfDynamic, track->samplingFrequency, "audio", "L16", track->numChannels);
+ } else if (strcmp(track->mimeType, "audio/MPEG") == 0) {
+ result = MPEG1or2AudioRTPSink::createNew(envir(), rtpGroupsock);
+ } else if (strcmp(track->mimeType, "audio/AAC") == 0) {
+ // The Matroska file's 'Codec Private' data is assumed to be the AAC configuration
+ // information. Use this to generate a hexadecimal 'config' string for the new RTP sink:
+ char* configStr = new char[2*track->codecPrivateSize + 1]; if (configStr == NULL) break;
+ // 2 hex digits per byte, plus the trailing '\0'
+ for (unsigned i = 0; i < track->codecPrivateSize; ++i) {
+ sprintf(&configStr[2*i], "%02X", track->codecPrivate[i]);
+ }
+
+ result = MPEG4GenericRTPSink::createNew(envir(), rtpGroupsock,
+ rtpPayloadTypeIfDynamic,
+ track->samplingFrequency,
+ "audio", "AAC-hbr", configStr,
+ track->numChannels);
+ delete[] configStr;
+ } else if (strcmp(track->mimeType, "audio/AC3") == 0) {
+ result = AC3AudioRTPSink
+ ::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic, track->samplingFrequency);
+ } else if (strcmp(track->mimeType, "audio/OPUS") == 0) {
+ result = SimpleRTPSink
+ ::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ 48000, "audio", "OPUS", 2, False/*only 1 Opus 'packet' in each RTP packet*/);
+ } else if (strcmp(track->mimeType, "audio/VORBIS") == 0 || strcmp(track->mimeType, "video/THEORA") == 0) {
+ u_int8_t* identificationHeader; unsigned identificationHeaderSize;
+ u_int8_t* commentHeader; unsigned commentHeaderSize;
+ u_int8_t* setupHeader; unsigned setupHeaderSize;
+ getVorbisOrTheoraConfigData(track,
+ identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize);
+
+ if (strcmp(track->mimeType, "video/THEORA") == 0) {
+ result = TheoraVideoRTPSink
+ ::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize);
+ } else { // Vorbis
+ result = VorbisAudioRTPSink
+ ::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ track->samplingFrequency, track->numChannels,
+ identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize);
+ }
+ delete[] identificationHeader; delete[] commentHeader; delete[] setupHeader;
+ } else if (strcmp(track->mimeType, "video/RAW") == 0) {
+ result = RawVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ track->pixelHeight, track->pixelWidth, track->bitDepth, track->colorSampling, track->colorimetry);
+ } else if (strcmp(track->mimeType, "video/H264") == 0) {
+ u_int8_t* sps; unsigned spsSize;
+ u_int8_t* pps; unsigned ppsSize;
+
+ getH264ConfigData(track, sps, spsSize, pps, ppsSize);
+ result = H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ sps, spsSize, pps, ppsSize);
+ delete[] sps; delete[] pps;
+ } else if (strcmp(track->mimeType, "video/H265") == 0) {
+ u_int8_t* vps; unsigned vpsSize;
+ u_int8_t* sps; unsigned spsSize;
+ u_int8_t* pps; unsigned ppsSize;
+
+ getH265ConfigData(track, vps, vpsSize, sps, spsSize, pps, ppsSize);
+ result = H265VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ vps, vpsSize, sps, spsSize, pps, ppsSize);
+ delete[] vps; delete[] sps; delete[] pps;
+ } else if (strcmp(track->mimeType, "video/VP8") == 0) {
+ result = VP8VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+ } else if (strcmp(track->mimeType, "video/VP9") == 0) {
+ result = VP9VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+ } else if (strcmp(track->mimeType, "text/T140") == 0) {
+ result = T140TextRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+ }
+ } while (0);
+
+ return result;
+}
+
+FileSink* MatroskaFile::createFileSinkForTrackNumber(unsigned trackNumber, char const* fileName) {
+ FileSink* result = NULL; // default value, if an error occurs
+ Boolean createOggFileSink = False; // by default
+
+ do {
+ MatroskaTrack* track = lookup(trackNumber);
+ if (track == NULL) break;
+
+ if (strcmp(track->mimeType, "video/H264") == 0) {
+ u_int8_t* sps; unsigned spsSize;
+ u_int8_t* pps; unsigned ppsSize;
+
+ getH264ConfigData(track, sps, spsSize, pps, ppsSize);
+
+ char* sps_base64 = base64Encode((char*)sps, spsSize);
+ char* pps_base64 = base64Encode((char*)pps, ppsSize);
+ delete[] sps; delete[] pps;
+
+ char* sPropParameterSetsStr
+ = new char[(sps_base64 == NULL ? 0 : strlen(sps_base64)) +
+ (pps_base64 == NULL ? 0 : strlen(pps_base64)) +
+ 10 /*more than enough space*/];
+ sprintf(sPropParameterSetsStr, "%s,%s", sps_base64, pps_base64);
+ delete[] sps_base64; delete[] pps_base64;
+
+ result = H264VideoFileSink::createNew(envir(), fileName,
+ sPropParameterSetsStr,
+ MAX_KEY_FRAME_SIZE); // extra large buffer size for large key frames
+ delete[] sPropParameterSetsStr;
+ } else if (strcmp(track->mimeType, "video/H265") == 0) {
+ u_int8_t* vps; unsigned vpsSize;
+ u_int8_t* sps; unsigned spsSize;
+ u_int8_t* pps; unsigned ppsSize;
+
+ getH265ConfigData(track, vps, vpsSize, sps, spsSize, pps, ppsSize);
+
+ char* vps_base64 = base64Encode((char*)vps, vpsSize);
+ char* sps_base64 = base64Encode((char*)sps, spsSize);
+ char* pps_base64 = base64Encode((char*)pps, ppsSize);
+ delete[] vps; delete[] sps; delete[] pps;
+
+ result = H265VideoFileSink::createNew(envir(), fileName,
+ vps_base64, sps_base64, pps_base64,
+ MAX_KEY_FRAME_SIZE); // extra large buffer size for large key frames
+ delete[] vps_base64; delete[] sps_base64; delete[] pps_base64;
+ } else if (strcmp(track->mimeType, "video/THEORA") == 0) {
+ createOggFileSink = True;
+ } else if (strcmp(track->mimeType, "audio/AMR") == 0 ||
+ strcmp(track->mimeType, "audio/AMR-WB") == 0) {
+ // For AMR audio streams, we use a special sink that inserts AMR frame hdrs:
+ result = AMRAudioFileSink::createNew(envir(), fileName);
+ } else if (strcmp(track->mimeType, "audio/VORBIS") == 0 ||
+ strcmp(track->mimeType, "audio/OPUS") == 0) {
+ createOggFileSink = True;
+ }
+
+ if (createOggFileSink) {
+ char* configStr = NULL; // by default
+
+ if (strcmp(track->mimeType, "audio/VORBIS") == 0 || strcmp(track->mimeType, "video/THEORA") == 0) {
+ u_int8_t* identificationHeader; unsigned identificationHeaderSize;
+ u_int8_t* commentHeader; unsigned commentHeaderSize;
+ u_int8_t* setupHeader; unsigned setupHeaderSize;
+ getVorbisOrTheoraConfigData(track,
+ identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize);
+ u_int32_t identField = 0xFACADE; // Can we get a real value from the file somehow?
+ configStr = generateVorbisOrTheoraConfigStr(identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize,
+ identField);
+ delete[] identificationHeader; delete[] commentHeader; delete[] setupHeader;
+ }
+
+ result = OggFileSink::createNew(envir(), fileName, track->samplingFrequency, configStr, MAX_KEY_FRAME_SIZE);
+ delete[] configStr;
+ } else if (result == NULL) {
+ // By default, just create a regular "FileSink":
+ result = FileSink::createNew(envir(), fileName, MAX_KEY_FRAME_SIZE);
+ }
+ } while (0);
+
+ return result;
+}
+
+void MatroskaFile::addTrack(MatroskaTrack* newTrack, unsigned trackNumber) {
+ fTrackTable->add(newTrack, trackNumber);
+}
+
+void MatroskaFile::addCuePoint(double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster) {
+ Boolean dummy = False; // not used
+ CuePoint::addCuePoint(fCuePoints, cueTime, clusterOffsetInFile, blockNumWithinCluster, dummy);
+}
+
+Boolean MatroskaFile::lookupCuePoint(double& cueTime, u_int64_t& resultClusterOffsetInFile, unsigned& resultBlockNumWithinCluster) {
+ if (fCuePoints == NULL) return False;
+
+ (void)fCuePoints->lookup(cueTime, resultClusterOffsetInFile, resultBlockNumWithinCluster);
+ return True;
+}
+
+void MatroskaFile::printCuePoints(FILE* fid) {
+ CuePoint::fprintf(fid, fCuePoints);
+}
+
+
+////////// MatroskaTrackTable implementation //////////
+
+MatroskaTrackTable::MatroskaTrackTable()
+ : fTable(HashTable::create(ONE_WORD_HASH_KEYS)) {
+}
+
+MatroskaTrackTable::~MatroskaTrackTable() {
+ // Remove and delete all of our "MatroskaTrack" descriptors, and the hash table itself:
+ MatroskaTrack* track;
+ while ((track = (MatroskaTrack*)fTable->RemoveNext()) != NULL) {
+ delete track;
+ }
+ delete fTable;
+}
+
+void MatroskaTrackTable::add(MatroskaTrack* newTrack, unsigned trackNumber) {
+ if (newTrack != NULL && newTrack->trackNumber != 0) fTable->Remove((char const*)newTrack->trackNumber);
+ MatroskaTrack* existingTrack = (MatroskaTrack*)fTable->Add((char const*)trackNumber, newTrack);
+ delete existingTrack; // in case it wasn't NULL
+}
+
+MatroskaTrack* MatroskaTrackTable::lookup(unsigned trackNumber) {
+ return (MatroskaTrack*)fTable->Lookup((char const*)trackNumber);
+}
+
+unsigned MatroskaTrackTable::numTracks() const { return fTable->numEntries(); }
+
+MatroskaTrackTable::Iterator::Iterator(MatroskaTrackTable& ourTable) {
+ fIter = HashTable::Iterator::create(*(ourTable.fTable));
+}
+
+MatroskaTrackTable::Iterator::~Iterator() {
+ delete fIter;
+}
+
+MatroskaTrack* MatroskaTrackTable::Iterator::next() {
+ char const* key;
+ return (MatroskaTrack*)fIter->next(key);
+}
+
+
+////////// MatroskaTrack implementation //////////
+
+MatroskaTrack::MatroskaTrack()
+ : trackNumber(0/*not set*/), trackType(0/*unknown*/),
+ isEnabled(True), isDefault(True), isForced(False),
+ defaultDuration(0),
+ name(NULL), language(NULL), codecID(NULL),
+ samplingFrequency(0), numChannels(2), mimeType(""),
+ codecPrivateSize(0), codecPrivate(NULL),
+ codecPrivateUsesH264FormatForH265(False), codecIsOpus(False),
+ headerStrippedBytesSize(0), headerStrippedBytes(NULL),
+ colorSampling(""), colorimetry("BT709-2") /*Matroska default value for Primaries */,
+ pixelWidth(0), pixelHeight(0), bitDepth(8), subframeSizeSize(0) {
+}
+
+MatroskaTrack::~MatroskaTrack() {
+ delete[] name; delete[] language; delete[] codecID;
+ delete[] codecPrivate;
+ delete[] headerStrippedBytes;
+}
+
+
+////////// MatroskaDemux implementation //////////
+
+MatroskaDemux::MatroskaDemux(MatroskaFile& ourFile)
+ : Medium(ourFile.envir()),
+ fOurFile(ourFile), fDemuxedTracksTable(HashTable::create(ONE_WORD_HASH_KEYS)),
+ fNextTrackTypeToCheck(0x1) {
+ fOurParser = new MatroskaFileParser(ourFile, ByteStreamFileSource::createNew(envir(), ourFile.fileName()),
+ handleEndOfFile, this, this);
+}
+
+MatroskaDemux::~MatroskaDemux() {
+ // Begin by acting as if we've reached the end of the source file. This should cause all of our demuxed tracks to get closed.
+ handleEndOfFile();
+
+ // Then delete our table of "MatroskaDemuxedTrack"s
+ // - but not the "MatroskaDemuxedTrack"s themselves; that should have already happened:
+ delete fDemuxedTracksTable;
+
+ delete fOurParser;
+ fOurFile.removeDemux(this);
+}
+
+FramedSource* MatroskaDemux::newDemuxedTrack() {
+ unsigned dummyResultTrackNumber;
+ return newDemuxedTrack(dummyResultTrackNumber);
+}
+
+FramedSource* MatroskaDemux::newDemuxedTrack(unsigned& resultTrackNumber) {
+ FramedSource* result;
+ resultTrackNumber = 0;
+
+ for (result = NULL; result == NULL && fNextTrackTypeToCheck != MATROSKA_TRACK_TYPE_OTHER;
+ fNextTrackTypeToCheck <<= 1) {
+ if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_VIDEO) resultTrackNumber = fOurFile.chosenVideoTrackNumber();
+ else if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_AUDIO) resultTrackNumber = fOurFile.chosenAudioTrackNumber();
+ else if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_SUBTITLE) resultTrackNumber = fOurFile.chosenSubtitleTrackNumber();
+
+ result = newDemuxedTrackByTrackNumber(resultTrackNumber);
+ }
+
+ return result;
+}
+
+FramedSource* MatroskaDemux::newDemuxedTrackByTrackNumber(unsigned trackNumber) {
+ if (trackNumber == 0) return NULL;
+
+ FramedSource* trackSource = new MatroskaDemuxedTrack(envir(), trackNumber, *this);
+ fDemuxedTracksTable->Add((char const*)trackNumber, trackSource);
+ return trackSource;
+}
+
+MatroskaDemuxedTrack* MatroskaDemux::lookupDemuxedTrack(unsigned trackNumber) {
+ return (MatroskaDemuxedTrack*)fDemuxedTracksTable->Lookup((char const*)trackNumber);
+}
+
+void MatroskaDemux::removeTrack(unsigned trackNumber) {
+ fDemuxedTracksTable->Remove((char const*)trackNumber);
+ if (fDemuxedTracksTable->numEntries() == 0) {
+ // We no longer have any demuxed tracks, so delete ourselves now:
+ Medium::close(this);
+ }
+}
+
+void MatroskaDemux::continueReading() {
+ fOurParser->continueParsing();
+}
+
+void MatroskaDemux::seekToTime(double& seekNPT) {
+ if (fOurParser != NULL) fOurParser->seekToTime(seekNPT);
+}
+
+void MatroskaDemux::handleEndOfFile(void* clientData) {
+ ((MatroskaDemux*)clientData)->handleEndOfFile();
+}
+
+void MatroskaDemux::handleEndOfFile() {
+ // Iterate through all of our 'demuxed tracks', handling 'end of input' on each one.
+ // Hack: Because this can cause the hash table to get modified underneath us, we don't call the handlers until after we've
+ // first iterated through all of the tracks.
+ unsigned numTracks = fDemuxedTracksTable->numEntries();
+ if (numTracks == 0) return;
+ MatroskaDemuxedTrack** tracks = new MatroskaDemuxedTrack*[numTracks];
+
+ HashTable::Iterator* iter = HashTable::Iterator::create(*fDemuxedTracksTable);
+ unsigned i;
+ char const* trackNumber;
+
+ for (i = 0; i < numTracks; ++i) {
+ tracks[i] = (MatroskaDemuxedTrack*)iter->next(trackNumber);
+ }
+ delete iter;
+
+ for (i = 0; i < numTracks; ++i) {
+ if (tracks[i] == NULL) continue; // sanity check; shouldn't happen
+ tracks[i]->handleClosure();
+ }
+
+ delete[] tracks;
+}
+
+
+////////// CuePoint implementation //////////
+
+CuePoint::CuePoint(double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster)
+ : fBalance(0),
+ fCueTime(cueTime), fClusterOffsetInFile(clusterOffsetInFile), fBlockNumWithinCluster(blockNumWithinCluster - 1) {
+ fSubTree[0] = fSubTree[1] = NULL;
+}
+
+CuePoint::~CuePoint() {
+ delete fSubTree[0]; delete fSubTree[1];
+}
+
+void CuePoint::addCuePoint(CuePoint*& root, double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster,
+ Boolean& needToReviseBalanceOfParent) {
+ needToReviseBalanceOfParent = False; // by default; may get changed below
+
+ if (root == NULL) {
+ root = new CuePoint(cueTime, clusterOffsetInFile, blockNumWithinCluster);
+ needToReviseBalanceOfParent = True;
+ } else if (cueTime == root->fCueTime) {
+ // Replace existing data:
+ root->fClusterOffsetInFile = clusterOffsetInFile;
+ root->fBlockNumWithinCluster = blockNumWithinCluster - 1;
+ } else {
+ // Add to our left or right subtree:
+ int direction = cueTime > root->fCueTime; // 0 (left) or 1 (right)
+ Boolean needToReviseOurBalance = False;
+ addCuePoint(root->fSubTree[direction], cueTime, clusterOffsetInFile, blockNumWithinCluster, needToReviseOurBalance);
+
+ if (needToReviseOurBalance) {
+ // We need to change our 'balance' number, perhaps while also performing a rotation to bring ourself back into balance:
+ if (root->fBalance == 0) {
+ // We were balanced before, but now we're unbalanced (by 1) on the "direction" side:
+ root->fBalance = -1 + 2*direction; // -1 for "direction" 0; 1 for "direction" 1
+ needToReviseBalanceOfParent = True;
+ } else if (root->fBalance == 1 - 2*direction) { // 1 for "direction" 0; -1 for "direction" 1
+ // We were unbalanced (by 1) on the side opposite to where we added an entry, so now we're balanced:
+ root->fBalance = 0;
+ } else {
+ // We were unbalanced (by 1) on the side where we added an entry, so now we're unbalanced by 2, and have to rebalance:
+ if (root->fSubTree[direction]->fBalance == -1 + 2*direction) { // -1 for "direction" 0; 1 for "direction" 1
+ // We're 'doubly-unbalanced' on this side, so perform a single rotation in the opposite direction:
+ root->fBalance = root->fSubTree[direction]->fBalance = 0;
+ rotate(1-direction, root);
+ } else {
+ // This is the Left-Right case (for "direction" 0) or the Right-Left case (for "direction" 1); perform two rotations:
+ char newParentCurBalance = root->fSubTree[direction]->fSubTree[1-direction]->fBalance;
+ if (newParentCurBalance == 1 - 2*direction) { // 1 for "direction" 0; -1 for "direction" 1
+ root->fBalance = 0;
+ root->fSubTree[direction]->fBalance = -1 + 2*direction; // -1 for "direction" 0; 1 for "direction" 1
+ } else if (newParentCurBalance == 0) {
+ root->fBalance = 0;
+ root->fSubTree[direction]->fBalance = 0;
+ } else {
+ root->fBalance = 1 - 2*direction; // 1 for "direction" 0; -1 for "direction" 1
+ root->fSubTree[direction]->fBalance = 0;
+ }
+ rotate(direction, root->fSubTree[direction]);
+
+ root->fSubTree[direction]->fBalance = 0; // the new root will be balanced
+ rotate(1-direction, root);
+ }
+ }
+ }
+ }
+}
+
+Boolean CuePoint::lookup(double& cueTime, u_int64_t& resultClusterOffsetInFile, unsigned& resultBlockNumWithinCluster) {
+ if (cueTime < fCueTime) {
+ if (left() == NULL) {
+ resultClusterOffsetInFile = 0;
+ resultBlockNumWithinCluster = 0;
+ return False;
+ } else {
+ return left()->lookup(cueTime, resultClusterOffsetInFile, resultBlockNumWithinCluster);
+ }
+ } else {
+ if (right() == NULL || !right()->lookup(cueTime, resultClusterOffsetInFile, resultBlockNumWithinCluster)) {
+ // Use this record:
+ cueTime = fCueTime;
+ resultClusterOffsetInFile = fClusterOffsetInFile;
+ resultBlockNumWithinCluster = fBlockNumWithinCluster;
+ }
+ return True;
+ }
+}
+
+void CuePoint::fprintf(FILE* fid, CuePoint* cuePoint) {
+ if (cuePoint != NULL) {
+ ::fprintf(fid, "[");
+ fprintf(fid, cuePoint->left());
+
+ ::fprintf(fid, ",%.1f{%d},", cuePoint->fCueTime, cuePoint->fBalance);
+
+ fprintf(fid, cuePoint->right());
+ ::fprintf(fid, "]");
+ }
+}
+
+void CuePoint::rotate(unsigned direction/*0 => left; 1 => right*/, CuePoint*& root) {
+ CuePoint* pivot = root->fSubTree[1-direction]; // ASSERT: pivot != NULL
+ root->fSubTree[1-direction] = pivot->fSubTree[direction];
+ pivot->fSubTree[direction] = root;
+ root = pivot;
+}
diff --git a/liveMedia/MatroskaFileParser.cpp b/liveMedia/MatroskaFileParser.cpp
new file mode 100644
index 0000000..bac58b9
--- /dev/null
+++ b/liveMedia/MatroskaFileParser.cpp
@@ -0,0 +1,1512 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A parser for a Matroska file.
+// Implementation
+
+#include "MatroskaFileParser.hh"
+#include "MatroskaDemuxedTrack.hh"
+#include <ByteStreamFileSource.hh>
+#include <GroupsockHelper.hh> // for "gettimeofday()
+
+MatroskaFileParser::MatroskaFileParser(MatroskaFile& ourFile, FramedSource* inputSource,
+ FramedSource::onCloseFunc* onEndFunc, void* onEndClientData,
+ MatroskaDemux* ourDemux)
+ : StreamParser(inputSource, onEndFunc, onEndClientData, continueParsing, this),
+ fOurFile(ourFile), fInputSource(inputSource),
+ fOnEndFunc(onEndFunc), fOnEndClientData(onEndClientData),
+ fOurDemux(ourDemux),
+ fCurOffsetInFile(0), fSavedCurOffsetInFile(0), fLimitOffsetInFile(0),
+ fNumHeaderBytesToSkip(0), fClusterTimecode(0), fBlockTimecode(0),
+ fFrameSizesWithinBlock(NULL),
+ fPresentationTimeOffset(0.0) {
+ if (ourDemux == NULL) {
+ // Initialization
+ fCurrentParseState = PARSING_START_OF_FILE;
+
+ continueParsing();
+ } else {
+ fCurrentParseState = LOOKING_FOR_CLUSTER;
+ // In this case, parsing (of track data) doesn't start until a client starts reading from a track.
+ }
+}
+
+MatroskaFileParser::~MatroskaFileParser() {
+ delete[] fFrameSizesWithinBlock;
+ Medium::close(fInputSource);
+}
+
+void MatroskaFileParser::seekToTime(double& seekNPT) {
+#ifdef DEBUG
+ fprintf(stderr, "seekToTime(%f)\n", seekNPT);
+#endif
+ if (seekNPT <= 0.0) {
+#ifdef DEBUG
+ fprintf(stderr, "\t=> start of file\n");
+#endif
+ seekNPT = 0.0;
+ seekToFilePosition(0);
+ } else if (seekNPT >= fOurFile.fileDuration()) {
+#ifdef DEBUG
+ fprintf(stderr, "\t=> end of file\n");
+#endif
+ seekNPT = fOurFile.fileDuration();
+ seekToEndOfFile();
+ } else {
+ u_int64_t clusterOffsetInFile;
+ unsigned blockNumWithinCluster;
+ if (!fOurFile.lookupCuePoint(seekNPT, clusterOffsetInFile, blockNumWithinCluster)) {
+#ifdef DEBUG
+ fprintf(stderr, "\t=> not supported\n");
+#endif
+ return; // seeking not supported
+ }
+
+#ifdef DEBUG
+ fprintf(stderr, "\t=> seek time %f, file position %llu, block number within cluster %d\n", seekNPT, clusterOffsetInFile, blockNumWithinCluster);
+#endif
+ seekToFilePosition(clusterOffsetInFile);
+ fCurrentParseState = LOOKING_FOR_BLOCK;
+ // LATER handle "blockNumWithinCluster"; for now, we assume that it's 0 #####
+ }
+}
+
+void MatroskaFileParser
+::continueParsing(void* clientData, unsigned char* /*ptr*/, unsigned /*size*/, struct timeval /*presentationTime*/) {
+ ((MatroskaFileParser*)clientData)->continueParsing();
+}
+
+void MatroskaFileParser::continueParsing() {
+ if (fInputSource != NULL) {
+ if (!parse()) {
+ // We didn't complete the parsing, because we had to read more data from the source, or because we're waiting for
+ // another read from downstream. Once that happens, we'll get called again.
+ return;
+ }
+ }
+
+ // We successfully parsed the file. Call our 'done' function now:
+ if (fOnEndFunc != NULL) (*fOnEndFunc)(fOnEndClientData);
+}
+
+Boolean MatroskaFileParser::parse() {
+ Boolean areDone = False;
+
+ if (fInputSource->isCurrentlyAwaitingData()) return False;
+ // Our input source is currently being read. Wait until that read completes
+ try {
+ skipRemainingHeaderBytes(True); // if any
+ do {
+ if (fInputSource->isCurrentlyAwaitingData()) return False;
+ // Our input source is currently being read. Wait until that read completes
+
+ switch (fCurrentParseState) {
+ case PARSING_START_OF_FILE: {
+ areDone = parseStartOfFile();
+ break;
+ }
+ case LOOKING_FOR_TRACKS: {
+ lookForNextTrack();
+ break;
+ }
+ case PARSING_TRACK: {
+ areDone = parseTrack();
+ if (areDone && fOurFile.fCuesOffset > 0) {
+ // We've finished parsing the 'Track' information. There are also 'Cues' in the file, so parse those before finishing:
+ // Seek to the specified position in the file. We were already told that the 'Cues' begins there:
+#ifdef DEBUG
+ fprintf(stderr, "Seeking to file position %llu (the previously-reported location of 'Cues')\n", fOurFile.fCuesOffset);
+#endif
+ seekToFilePosition(fOurFile.fCuesOffset);
+ fCurrentParseState = PARSING_CUES;
+ areDone = False;
+ }
+ break;
+ }
+ case PARSING_CUES: {
+ areDone = parseCues();
+ break;
+ }
+ case LOOKING_FOR_CLUSTER: {
+ if (fOurFile.fClusterOffset > 0) {
+ // Optimization: Seek to the specified position in the file. We were already told that the 'Cluster' begins there:
+#ifdef DEBUG
+ fprintf(stderr, "Optimization: Seeking to file position %llu (the previously-reported location of a 'Cluster')\n", fOurFile.fClusterOffset);
+#endif
+ seekToFilePosition(fOurFile.fClusterOffset);
+ }
+ fCurrentParseState = LOOKING_FOR_BLOCK;
+ break;
+ }
+ case LOOKING_FOR_BLOCK: {
+ lookForNextBlock();
+ break;
+ }
+ case PARSING_BLOCK: {
+ parseBlock();
+ break;
+ }
+ case DELIVERING_FRAME_WITHIN_BLOCK: {
+ if (!deliverFrameWithinBlock()) return False;
+ break;
+ }
+ case DELIVERING_FRAME_BYTES: {
+ deliverFrameBytes();
+ return False; // Halt parsing for now. A new 'read' from downstream will cause parsing to resume.
+ break;
+ }
+ }
+ } while (!areDone);
+
+ return True;
+ } catch (int /*e*/) {
+#ifdef DEBUG
+ fprintf(stderr, "MatroskaFileParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
+#endif
+ return False; // the parsing got interrupted
+ }
+}
+
+Boolean MatroskaFileParser::parseStartOfFile() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing start of file\n");
+#endif
+ EBMLId id;
+ EBMLDataSize size;
+
+ // The file must begin with the standard EBML header (which we skip):
+ if (!parseEBMLIdAndSize(id, size) || id != MATROSKA_ID_EBML) {
+ fOurFile.envir() << "ERROR: File does not begin with an EBML header\n";
+ return True; // We're done with the file, because it's not valid
+ }
+#ifdef DEBUG
+ fprintf(stderr, "MatroskaFileParser::parseStartOfFile(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val());
+#endif
+
+ fCurrentParseState = LOOKING_FOR_TRACKS;
+ skipHeader(size);
+
+ return False; // because we have more parsing to do - inside the 'Track' header
+}
+
+void MatroskaFileParser::lookForNextTrack() {
+#ifdef DEBUG
+ fprintf(stderr, "looking for Track\n");
+#endif
+ EBMLId id;
+ EBMLDataSize size;
+
+ // Read and skip over (or enter) each Matroska header, until we get to a 'Track'.
+ while (fCurrentParseState == LOOKING_FOR_TRACKS) {
+ while (!parseEBMLIdAndSize(id, size)) {}
+#ifdef DEBUG
+ fprintf(stderr, "MatroskaFileParser::lookForNextTrack(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val());
+#endif
+ switch (id.val()) {
+ case MATROSKA_ID_SEGMENT: { // 'Segment' header: enter this
+ // Remember the position, within the file, of the start of Segment data, because Seek Positions are relative to this:
+ fOurFile.fSegmentDataOffset = fCurOffsetInFile;
+ break;
+ }
+ case MATROSKA_ID_SEEK_HEAD: { // 'Seek Head' header: enter this
+ break;
+ }
+ case MATROSKA_ID_SEEK: { // 'Seek' header: enter this
+ break;
+ }
+ case MATROSKA_ID_SEEK_ID: { // 'Seek ID' header: get this value
+ if (parseEBMLNumber(fLastSeekId)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tSeek ID 0x%s:\t%s\n", fLastSeekId.hexString(), fLastSeekId.stringName());
+#endif
+ }
+ break;
+ }
+ case MATROSKA_ID_SEEK_POSITION: { // 'Seek Position' header: get this value
+ u_int64_t seekPosition;
+ if (parseEBMLVal_unsigned64(size, seekPosition)) {
+ u_int64_t offsetInFile = fOurFile.fSegmentDataOffset + seekPosition;
+#ifdef DEBUG
+ fprintf(stderr, "\tSeek Position %llu (=> offset within the file: %llu (0x%llx))\n", seekPosition, offsetInFile, offsetInFile);
+#endif
+ // The only 'Seek Position's that we care about are for 'Cluster' and 'Cues':
+ if (fLastSeekId == MATROSKA_ID_CLUSTER) {
+ fOurFile.fClusterOffset = offsetInFile;
+ } else if (fLastSeekId == MATROSKA_ID_CUES) {
+ fOurFile.fCuesOffset = offsetInFile;
+ }
+ }
+ break;
+ }
+ case MATROSKA_ID_INFO: { // 'Segment Info' header: enter this
+ break;
+ }
+ case MATROSKA_ID_TIMECODE_SCALE: { // 'Timecode Scale' header: get this value
+ unsigned timecodeScale;
+ if (parseEBMLVal_unsigned(size, timecodeScale) && timecodeScale > 0) {
+ fOurFile.fTimecodeScale = timecodeScale;
+#ifdef DEBUG
+ fprintf(stderr, "\tTimecode Scale %u ns (=> Segment Duration == %f seconds)\n",
+ fOurFile.timecodeScale(), fOurFile.segmentDuration()*(fOurFile.fTimecodeScale/1000000000.0f));
+#endif
+ }
+ break;
+ }
+ case MATROSKA_ID_DURATION: { // 'Segment Duration' header: get this value
+ if (parseEBMLVal_float(size, fOurFile.fSegmentDuration)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tSegment Duration %f (== %f seconds)\n",
+ fOurFile.segmentDuration(), fOurFile.segmentDuration()*(fOurFile.fTimecodeScale/1000000000.0f));
+#endif
+ }
+ break;
+ }
+#ifdef DEBUG
+ case MATROSKA_ID_TITLE: { // 'Segment Title': display this value
+ char* title;
+ if (parseEBMLVal_string(size, title)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tTitle: %s\n", title);
+#endif
+ delete[] title;
+ }
+ break;
+ }
+#endif
+ case MATROSKA_ID_TRACKS: { // enter this, and move on to parsing 'Tracks'
+ fLimitOffsetInFile = fCurOffsetInFile + size.val(); // Make sure we don't read past the end of this header
+ fCurrentParseState = PARSING_TRACK;
+ break;
+ }
+ default: { // skip over this header
+ skipHeader(size);
+ break;
+ }
+ }
+ setParseState();
+ }
+}
+
+Boolean MatroskaFileParser::parseTrack() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing Track\n");
+#endif
+ // Read and process each Matroska header, until we get to the end of the Track:
+ MatroskaTrack* track = NULL;
+ EBMLId id;
+ EBMLDataSize size;
+ while (fCurOffsetInFile < fLimitOffsetInFile) {
+ while (!parseEBMLIdAndSize(id, size)) {}
+#ifdef DEBUG
+ if (id == MATROSKA_ID_TRACK_ENTRY) fprintf(stderr, "\n"); // makes debugging output easier to read
+ fprintf(stderr, "MatroskaFileParser::parseTrack(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val());
+#endif
+ switch (id.val()) {
+ case MATROSKA_ID_TRACK_ENTRY: { // 'Track Entry' header: enter this
+ // Create a new "MatroskaTrack" object for this entry:
+ if (track != NULL && track->trackNumber == 0) delete track; // We had a previous "MatroskaTrack" object that was never used
+ track = new MatroskaTrack;
+ break;
+ }
+ case MATROSKA_ID_TRACK_NUMBER: {
+ unsigned trackNumber;
+ if (parseEBMLVal_unsigned(size, trackNumber)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tTrack Number %d\n", trackNumber);
+#endif
+ if (track != NULL && trackNumber != 0) {
+ track->trackNumber = trackNumber;
+ fOurFile.addTrack(track, trackNumber);
+ }
+ }
+ break;
+ }
+ case MATROSKA_ID_TRACK_TYPE: {
+ unsigned trackType;
+ if (parseEBMLVal_unsigned(size, trackType) && track != NULL) {
+ // We convert the Matroska 'track type' code into our own code (which we can use as a bitmap):
+ track->trackType
+ = trackType == 1 ? MATROSKA_TRACK_TYPE_VIDEO : trackType == 2 ? MATROSKA_TRACK_TYPE_AUDIO
+ : trackType == 0x11 ? MATROSKA_TRACK_TYPE_SUBTITLE : MATROSKA_TRACK_TYPE_OTHER;
+#ifdef DEBUG
+ fprintf(stderr, "\tTrack Type 0x%02x (%s)\n", trackType,
+ track->trackType == MATROSKA_TRACK_TYPE_VIDEO ? "video" :
+ track->trackType == MATROSKA_TRACK_TYPE_AUDIO ? "audio" :
+ track->trackType == MATROSKA_TRACK_TYPE_SUBTITLE ? "subtitle" :
+ "<other>");
+#endif
+ }
+ break;
+ }
+ case MATROSKA_ID_FLAG_ENABLED: {
+ unsigned flagEnabled;
+ if (parseEBMLVal_unsigned(size, flagEnabled)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tTrack is Enabled: %d\n", flagEnabled);
+#endif
+ if (track != NULL) track->isEnabled = flagEnabled != 0;
+ }
+ break;
+ }
+ case MATROSKA_ID_FLAG_DEFAULT: {
+ unsigned flagDefault;
+ if (parseEBMLVal_unsigned(size, flagDefault)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tTrack is Default: %d\n", flagDefault);
+#endif
+ if (track != NULL) track->isDefault = flagDefault != 0;
+ }
+ break;
+ }
+ case MATROSKA_ID_FLAG_FORCED: {
+ unsigned flagForced;
+ if (parseEBMLVal_unsigned(size, flagForced)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tTrack is Forced: %d\n", flagForced);
+#endif
+ if (track != NULL) track->isForced = flagForced != 0;
+ }
+ break;
+ }
+ case MATROSKA_ID_DEFAULT_DURATION: {
+ unsigned defaultDuration;
+ if (parseEBMLVal_unsigned(size, defaultDuration)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tDefault duration %f ms\n", defaultDuration/1000000.0);
+#endif
+ if (track != NULL) track->defaultDuration = defaultDuration;
+ }
+ break;
+ }
+ case MATROSKA_ID_MAX_BLOCK_ADDITION_ID: {
+ unsigned maxBlockAdditionID;
+ if (parseEBMLVal_unsigned(size, maxBlockAdditionID)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tMax Block Addition ID: %u\n", maxBlockAdditionID);
+#endif
+ }
+ break;
+ }
+ case MATROSKA_ID_NAME: {
+ char* name;
+ if (parseEBMLVal_string(size, name)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tName: %s\n", name);
+#endif
+ if (track != NULL) {
+ delete[] track->name; track->name = name;
+ } else {
+ delete[] name;
+ }
+ }
+ break;
+ }
+ case MATROSKA_ID_LANGUAGE: {
+ char* language;
+ if (parseEBMLVal_string(size, language)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tLanguage: %s\n", language);
+#endif
+ if (track != NULL) {
+ delete[] track->language; track->language = language;
+ } else {
+ delete[] language;
+ }
+ }
+ break;
+ }
+ case MATROSKA_ID_CODEC: {
+ char* codecID;
+ if (parseEBMLVal_string(size, codecID)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tCodec ID: %s\n", codecID);
+#endif
+ if (track != NULL) {
+ delete[] track->codecID; track->codecID = codecID;
+
+ // Also set the track's "mimeType" field, if we can deduce it from the "codecID":
+ if (strcmp(codecID, "A_PCM/INT/BIG") == 0) {
+ track->mimeType = "audio/L16";
+ } else if (strncmp(codecID, "A_MPEG", 6) == 0) {
+ track->mimeType = "audio/MPEG";
+ } else if (strncmp(codecID, "A_AAC", 5) == 0) {
+ track->mimeType = "audio/AAC";
+ } else if (strncmp(codecID, "A_AC3", 5) == 0) {
+ track->mimeType = "audio/AC3";
+ } else if (strncmp(codecID, "A_VORBIS", 8) == 0) {
+ track->mimeType = "audio/VORBIS";
+ } else if (strcmp(codecID, "A_OPUS") == 0) {
+ track->mimeType = "audio/OPUS";
+ track->codecIsOpus = True;
+ } else if (strcmp(codecID, "V_MPEG4/ISO/AVC") == 0) {
+ track->mimeType = "video/H264";
+ } else if (strcmp(codecID, "V_MPEGH/ISO/HEVC") == 0) {
+ track->mimeType = "video/H265";
+ } else if (strncmp(codecID, "V_VP8", 5) == 0) {
+ track->mimeType = "video/VP8";
+ } else if (strncmp(codecID, "V_VP9", 5) == 0) {
+ track->mimeType = "video/VP9";
+ } else if (strncmp(codecID, "V_THEORA", 8) == 0) {
+ track->mimeType = "video/THEORA";
+ } else if (strncmp(codecID, "S_TEXT", 6) == 0) {
+ track->mimeType = "text/T140";
+ } else if (strncmp(codecID, "V_MJPEG", 7) == 0) {
+ track->mimeType = "video/JPEG";
+ } else if (strncmp(codecID, "V_UNCOMPRESSED", 14) == 0) {
+ track->mimeType = "video/RAW";
+ }
+ } else {
+ delete[] codecID;
+ }
+ }
+ break;
+ }
+ case MATROSKA_ID_CODEC_PRIVATE: {
+ u_int8_t* codecPrivate;
+ unsigned codecPrivateSize;
+ if (parseEBMLVal_binary(size, codecPrivate)) {
+ codecPrivateSize = (unsigned)size.val();
+#ifdef DEBUG
+ fprintf(stderr, "\tCodec Private: ");
+ for (unsigned i = 0; i < codecPrivateSize; ++i) fprintf(stderr, "%02x:", codecPrivate[i]);
+ fprintf(stderr, "\n");
+#endif
+ if (track != NULL) {
+ delete[] track->codecPrivate; track->codecPrivate = codecPrivate;
+ track->codecPrivateSize = codecPrivateSize;
+
+ // Hack for H.264 and H.265: The 'codec private' data contains
+ // the size of NAL unit lengths:
+ if (track->codecID != NULL) {
+ if (strcmp(track->codecID, "V_MPEG4/ISO/AVC") == 0) { // H.264
+ // Byte 4 of the 'codec private' data contains 'lengthSizeMinusOne':
+ if (codecPrivateSize >= 5) track->subframeSizeSize = (codecPrivate[4]&0x3) + 1;
+ } else if (strcmp(track->codecID, "V_MPEGH/ISO/HEVC") == 0) { // H.265
+ // H.265 'codec private' data is *supposed* to use the format that's described in
+ // http://lists.matroska.org/pipermail/matroska-devel/2013-September/004567.html
+ // However, some Matroska files use the same format that was used for H.264.
+ // We check for this here, by checking various fields that are supposed to be
+ // 'all-1' in the 'correct' format:
+ if (codecPrivateSize < 23 || (codecPrivate[13]&0xF0) != 0xF0 ||
+ (codecPrivate[15]&0xFC) != 0xFC || (codecPrivate[16]&0xFC) != 0xFC ||
+ (codecPrivate[17]&0xF8) != 0xF8 || (codecPrivate[18]&0xF8) != 0xF8) {
+ // The 'correct' format isn't being used, so assume the H.264 format instead:
+ track->codecPrivateUsesH264FormatForH265 = True;
+
+ // Byte 4 of the 'codec private' data contains 'lengthSizeMinusOne':
+ if (codecPrivateSize >= 5) track->subframeSizeSize = (codecPrivate[4]&0x3) + 1;
+ } else {
+ // This looks like the 'correct' format:
+ track->codecPrivateUsesH264FormatForH265 = False;
+
+ // Byte 21 of the 'codec private' data contains 'lengthSizeMinusOne':
+ track->subframeSizeSize = (codecPrivate[21]&0x3) + 1;
+ }
+ }
+ }
+ } else {
+ delete[] codecPrivate;
+ }
+ }
+ break;
+ }
+ case MATROSKA_ID_VIDEO: { // 'Video settings' header: enter this
+ break;
+ }
+ case MATROSKA_ID_PIXEL_WIDTH: {
+ unsigned pixelWidth;
+ if (parseEBMLVal_unsigned(size, pixelWidth)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tPixel Width %d\n", pixelWidth);
+#endif
+ if (track != NULL) track->pixelWidth = pixelWidth;
+ }
+ break;
+ }
+ case MATROSKA_ID_PIXEL_HEIGHT: {
+ unsigned pixelHeight;
+ if (parseEBMLVal_unsigned(size, pixelHeight)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tPixel Height %d\n", pixelHeight);
+#endif
+ if (track != NULL) track->pixelHeight = pixelHeight;
+ }
+ break;
+ }
+ case MATROSKA_ID_DISPLAY_WIDTH: {
+ unsigned displayWidth;
+ if (parseEBMLVal_unsigned(size, displayWidth)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tDisplay Width %d\n", displayWidth);
+#endif
+ }
+ break;
+ }
+ case MATROSKA_ID_DISPLAY_HEIGHT: {
+ unsigned displayHeight;
+ if (parseEBMLVal_unsigned(size, displayHeight)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tDisplay Height %d\n", displayHeight);
+#endif
+ }
+ break;
+ }
+ case MATROSKA_ID_DISPLAY_UNIT: {
+ unsigned displayUnit;
+ if (parseEBMLVal_unsigned(size, displayUnit)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tDisplay Unit %d\n", displayUnit);
+#endif
+ }
+ break;
+ }
+ case MATROSKA_ID_AUDIO: { // 'Audio settings' header: enter this
+ break;
+ }
+ case MATROSKA_ID_SAMPLING_FREQUENCY: {
+ float samplingFrequency;
+ if (parseEBMLVal_float(size, samplingFrequency)) {
+ if (track != NULL) {
+ track->samplingFrequency = (unsigned)samplingFrequency;
+#ifdef DEBUG
+ fprintf(stderr, "\tSampling frequency %f (->%d)\n", samplingFrequency, track->samplingFrequency);
+#endif
+ }
+ }
+ break;
+ }
+ case MATROSKA_ID_OUTPUT_SAMPLING_FREQUENCY: {
+ float outputSamplingFrequency;
+ if (parseEBMLVal_float(size, outputSamplingFrequency)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tOutput sampling frequency %f\n", outputSamplingFrequency);
+#endif
+ }
+ break;
+ }
+ case MATROSKA_ID_CHANNELS: {
+ unsigned numChannels;
+ if (parseEBMLVal_unsigned(size, numChannels)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tChannels %d\n", numChannels);
+#endif
+ if (track != NULL) track->numChannels = numChannels;
+ }
+ break;
+ }
+ case MATROSKA_ID_BIT_DEPTH: {
+ unsigned bitDepth;
+ if (parseEBMLVal_unsigned(size, bitDepth)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tBit Depth %d\n", bitDepth);
+#endif
+ if (track != NULL) track->bitDepth = bitDepth;
+ }
+ break;
+ }
+ case MATROSKA_ID_CONTENT_ENCODINGS:
+ case MATROSKA_ID_CONTENT_ENCODING: { // 'Content Encodings' or 'Content Encoding' header: enter this
+ break;
+ }
+ case MATROSKA_ID_CONTENT_COMPRESSION: { // 'Content Compression' header: enter this
+ // Note: We currently support only 'Header Stripping' compression, not 'zlib' compression (the default algorithm).
+ // Therefore, we disable this track, unless/until we later see that 'Header Stripping' is supported:
+ if (track != NULL) track->isEnabled = False;
+ break;
+ }
+ case MATROSKA_ID_CONTENT_COMP_ALGO: {
+ unsigned contentCompAlgo;
+ if (parseEBMLVal_unsigned(size, contentCompAlgo)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tContent Compression Algorithm %d (%s)\n", contentCompAlgo,
+ contentCompAlgo == 0 ? "zlib" : contentCompAlgo == 3 ? "Header Stripping" : "<unknown>");
+#endif
+ // The only compression algorithm that we support is #3: Header Stripping; disable the track otherwise
+ if (track != NULL) track->isEnabled = contentCompAlgo == 3;
+ }
+ break;
+ }
+ case MATROSKA_ID_CONTENT_COMP_SETTINGS: {
+ u_int8_t* headerStrippedBytes;
+ unsigned headerStrippedBytesSize;
+ if (parseEBMLVal_binary(size, headerStrippedBytes)) {
+ headerStrippedBytesSize = (unsigned)size.val();
+#ifdef DEBUG
+ fprintf(stderr, "\tHeader Stripped Bytes: ");
+ for (unsigned i = 0; i < headerStrippedBytesSize; ++i) fprintf(stderr, "%02x:", headerStrippedBytes[i]);
+ fprintf(stderr, "\n");
+#endif
+ if (track != NULL) {
+ delete[] track->headerStrippedBytes; track->headerStrippedBytes = headerStrippedBytes;
+ track->headerStrippedBytesSize = headerStrippedBytesSize;
+ } else {
+ delete[] headerStrippedBytes;
+ }
+ }
+ break;
+ }
+ case MATROSKA_ID_CONTENT_ENCRYPTION: { // 'Content Encrpytion' header: skip this
+ // Note: We don't currently support encryption at all. Therefore, we disable this track:
+ if (track != NULL) track->isEnabled = False;
+ // Fall through to...
+ }
+ case MATROSKA_ID_COLOR_SPACE: {
+ u_int8_t* colourSpace;
+ unsigned colourSpaceSize;
+ if (parseEBMLVal_binary(size, colourSpace)) {
+ colourSpaceSize = (unsigned)size.val();
+#ifdef DEBUG
+ fprintf(stderr, "\tColor space : %02x %02x %02x %02x\n", colourSpace[0], colourSpace[1], colourSpace[2], colourSpace[3]);
+#endif
+ if ((track != NULL) && (colourSpaceSize == 4)) {
+ //convert to sampling value (rfc 4175)
+ if ((strncmp((const char*)colourSpace, "I420", 4) == 0) || (strncmp((const char*)colourSpace, "IYUV", 4) == 0)){
+ track->colorSampling = "YCbCr-4:2:0";
+ }
+ else if ((strncmp((const char*)colourSpace, "YUY2", 4) == 0) || (strncmp((const char*)colourSpace, "UYVY", 4) == 0)){
+ track->colorSampling = "YCbCr-4:2:2";
+ }
+ else if (strncmp((const char*)colourSpace, "AYUV", 4) == 0) {
+ track->colorSampling = "YCbCr-4:4:4";
+ }
+ else if ((strncmp((const char*)colourSpace, "Y41P", 4) == 0) || (strncmp((const char*)colourSpace, "Y41T", 4) == 0)) {
+ track->colorSampling = "YCbCr-4:1:1";
+ }
+ else if (strncmp((const char*)colourSpace, "RGBA", 4) == 0) {
+ track->colorSampling = "RGBA";
+ }
+ else if (strncmp((const char*)colourSpace, "BGRA", 4) == 0) {
+ track->colorSampling = "BGRA";
+ }
+ } else {
+ delete[] colourSpace;
+ }
+ }
+ break;
+ }
+ case MATROSKA_ID_PRIMARIES: {
+ unsigned primaries;
+ if (parseEBMLVal_unsigned(size, primaries)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tPrimaries %u\n", primaries);
+#endif
+ if (track != NULL) {
+ switch (primaries) {
+ case 1: //ITU-R BT.709
+ track->colorimetry = "BT709-2";
+ break;
+ case 7: //SMPTE 240M
+ track->colorimetry = "SMPTE240M";
+ break;
+ case 2: //Unspecified
+ case 3: //Reserved
+ case 4: //ITU-R BT.470M
+ case 5: //ITU-R BT.470BG
+ case 6: //SMPTE 170M
+ case 8: //FILM
+ case 9: //ITU-R BT.2020
+ default:
+#ifdef DEBUG
+ fprintf(stderr, "\tUnsupported color primaries %u\n", primaries);
+#endif
+ break;
+ }
+ }
+ }
+ }
+ default: { // We don't process this header, so just skip over it:
+ skipHeader(size);
+ break;
+ }
+ }
+ setParseState();
+ }
+
+ fLimitOffsetInFile = 0; // reset
+ if (track != NULL && track->trackNumber == 0) delete track; // We had a previous "MatroskaTrack" object that was never used
+ return True; // we're done parsing track entries
+}
+
+void MatroskaFileParser::lookForNextBlock() {
+#ifdef DEBUG
+ fprintf(stderr, "looking for Block\n");
+#endif
+ // Read and skip over each Matroska header, until we get to a 'Cluster':
+ EBMLId id;
+ EBMLDataSize size;
+ while (fCurrentParseState == LOOKING_FOR_BLOCK) {
+ while (!parseEBMLIdAndSize(id, size)) {}
+#ifdef DEBUG
+ fprintf(stderr, "MatroskaFileParser::lookForNextBlock(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val());
+#endif
+ switch (id.val()) {
+ case MATROSKA_ID_SEGMENT: { // 'Segment' header: enter this
+ break;
+ }
+ case MATROSKA_ID_CLUSTER: { // 'Cluster' header: enter this
+ break;
+ }
+ case MATROSKA_ID_TIMECODE: { // 'Timecode' header: get this value
+ unsigned timecode;
+ if (parseEBMLVal_unsigned(size, timecode)) {
+ fClusterTimecode = timecode;
+#ifdef DEBUG
+ fprintf(stderr, "\tCluster timecode: %d (== %f seconds)\n", fClusterTimecode, fClusterTimecode*(fOurFile.fTimecodeScale/1000000000.0));
+#endif
+ }
+ break;
+ }
+ case MATROSKA_ID_BLOCK_GROUP: { // 'Block Group' header: enter this
+ break;
+ }
+ case MATROSKA_ID_SIMPLEBLOCK:
+ case MATROSKA_ID_BLOCK: { // 'SimpleBlock' or 'Block' header: enter this (and we're done)
+ fBlockSize = (unsigned)size.val();
+ fCurrentParseState = PARSING_BLOCK;
+ break;
+ }
+ case MATROSKA_ID_BLOCK_DURATION: { // 'Block Duration' header: get this value (but we currently don't do anything with it)
+ unsigned blockDuration;
+ if (parseEBMLVal_unsigned(size, blockDuration)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tblock duration: %d (== %f ms)\n", blockDuration, (float)(blockDuration*fOurFile.fTimecodeScale/1000000.0));
+#endif
+ }
+ break;
+ }
+ // Attachments are parsed only if we're in DEBUG mode (otherwise we just skip over them):
+#ifdef DEBUG
+ case MATROSKA_ID_ATTACHMENTS: { // 'Attachments': enter this
+ break;
+ }
+ case MATROSKA_ID_ATTACHED_FILE: { // 'Attached File': enter this
+ break;
+ }
+ case MATROSKA_ID_FILE_DESCRIPTION: { // 'File Description': get this value
+ char* fileDescription;
+ if (parseEBMLVal_string(size, fileDescription)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tFile Description: %s\n", fileDescription);
+#endif
+ delete[] fileDescription;
+ }
+ break;
+ }
+ case MATROSKA_ID_FILE_NAME: { // 'File Name': get this value
+ char* fileName;
+ if (parseEBMLVal_string(size, fileName)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tFile Name: %s\n", fileName);
+#endif
+ delete[] fileName;
+ }
+ break;
+ }
+ case MATROSKA_ID_FILE_MIME_TYPE: { // 'File MIME Type': get this value
+ char* fileMIMEType;
+ if (parseEBMLVal_string(size, fileMIMEType)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tFile MIME Type: %s\n", fileMIMEType);
+#endif
+ delete[] fileMIMEType;
+ }
+ break;
+ }
+ case MATROSKA_ID_FILE_UID: { // 'File UID': get this value
+ unsigned fileUID;
+ if (parseEBMLVal_unsigned(size, fileUID)) {
+#ifdef DEBUG
+ fprintf(stderr, "\tFile UID: 0x%x\n", fileUID);
+#endif
+ }
+ break;
+ }
+#endif
+ default: { // skip over this header
+ skipHeader(size);
+ break;
+ }
+ }
+ setParseState();
+ }
+}
+
+Boolean MatroskaFileParser::parseCues() {
+#if defined(DEBUG) || defined(DEBUG_CUES)
+ fprintf(stderr, "parsing Cues\n");
+#endif
+ EBMLId id;
+ EBMLDataSize size;
+
+ // Read the next header, which should be MATROSKA_ID_CUES:
+ if (!parseEBMLIdAndSize(id, size) || id != MATROSKA_ID_CUES) return True; // The header wasn't what we expected, so we're done
+ fLimitOffsetInFile = fCurOffsetInFile + size.val(); // Make sure we don't read past the end of this header
+
+ double currentCueTime = 0.0;
+ u_int64_t currentClusterOffsetInFile = 0;
+
+ while (fCurOffsetInFile < fLimitOffsetInFile) {
+ while (!parseEBMLIdAndSize(id, size)) {}
+#ifdef DEBUG_CUES
+ if (id == MATROSKA_ID_CUE_POINT) fprintf(stderr, "\n"); // makes debugging output easier to read
+ fprintf(stderr, "MatroskaFileParser::parseCues(): Parsed id 0x%s (%s), size: %lld\n", id.hexString(), id.stringName(), size.val());
+#endif
+ switch (id.val()) {
+ case MATROSKA_ID_CUE_POINT: { // 'Cue Point' header: enter this
+ break;
+ }
+ case MATROSKA_ID_CUE_TIME: { // 'Cue Time' header: get this value
+ unsigned cueTime;
+ if (parseEBMLVal_unsigned(size, cueTime)) {
+ currentCueTime = cueTime*(fOurFile.fTimecodeScale/1000000000.0);
+#ifdef DEBUG_CUES
+ fprintf(stderr, "\tCue Time %d (== %f seconds)\n", cueTime, currentCueTime);
+#endif
+ }
+ break;
+ }
+ case MATROSKA_ID_CUE_TRACK_POSITIONS: { // 'Cue Track Positions' header: enter this
+ break;
+ }
+ case MATROSKA_ID_CUE_TRACK: { // 'Cue Track' header: get this value (but only for debugging; we don't do anything with it)
+ unsigned cueTrack;
+ if (parseEBMLVal_unsigned(size, cueTrack)) {
+#ifdef DEBUG_CUES
+ fprintf(stderr, "\tCue Track %d\n", cueTrack);
+#endif
+ }
+ break;
+ }
+ case MATROSKA_ID_CUE_CLUSTER_POSITION: { // 'Cue Cluster Position' header: get this value
+ u_int64_t cueClusterPosition;
+ if (parseEBMLVal_unsigned64(size, cueClusterPosition)) {
+ currentClusterOffsetInFile = fOurFile.fSegmentDataOffset + cueClusterPosition;
+#ifdef DEBUG_CUES
+ fprintf(stderr, "\tCue Cluster Position %llu (=> offset within the file: %llu (0x%llx))\n", cueClusterPosition, currentClusterOffsetInFile, currentClusterOffsetInFile);
+#endif
+ // Record this cue point:
+ fOurFile.addCuePoint(currentCueTime, currentClusterOffsetInFile, 1/*default block number within cluster*/);
+ }
+ break;
+ }
+ case MATROSKA_ID_CUE_BLOCK_NUMBER: { // 'Cue Block Number' header: get this value
+ unsigned cueBlockNumber;
+ if (parseEBMLVal_unsigned(size, cueBlockNumber) && cueBlockNumber != 0) {
+#ifdef DEBUG_CUES
+ fprintf(stderr, "\tCue Block Number %d\n", cueBlockNumber);
+#endif
+ // Record this cue point (overwriting any existing entry for this cue time):
+ fOurFile.addCuePoint(currentCueTime, currentClusterOffsetInFile, cueBlockNumber);
+ }
+ break;
+ }
+ default: { // We don't process this header, so just skip over it:
+ skipHeader(size);
+ break;
+ }
+ }
+ setParseState();
+ }
+
+ fLimitOffsetInFile = 0; // reset
+#if defined(DEBUG) || defined(DEBUG_CUES)
+ fprintf(stderr, "done parsing Cues\n");
+#endif
+#ifdef DEBUG_CUES
+ fprintf(stderr, "Cue Point tree: ");
+ fOurFile.printCuePoints(stderr);
+ fprintf(stderr, "\n");
+#endif
+ return True; // we're done parsing Cues
+}
+
+typedef enum { NoLacing, XiphLacing, FixedSizeLacing, EBMLLacing } MatroskaLacingType;
+
+void MatroskaFileParser::parseBlock() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing SimpleBlock or Block\n");
+#endif
+ do {
+ unsigned blockStartPos = curOffset();
+
+ // The block begins with the track number:
+ EBMLNumber trackNumber;
+ if (!parseEBMLNumber(trackNumber)) break;
+ fBlockTrackNumber = (unsigned)trackNumber.val();
+
+ // If this track is not being read, then skip the rest of this block, and look for another one:
+ if (fOurDemux->lookupDemuxedTrack(fBlockTrackNumber) == NULL) {
+ unsigned headerBytesSeen = curOffset() - blockStartPos;
+ if (headerBytesSeen < fBlockSize) {
+ skipBytes(fBlockSize - headerBytesSeen);
+ }
+#ifdef DEBUG
+ fprintf(stderr, "\tSkipped block for unused track number %d\n", fBlockTrackNumber);
+#endif
+ fCurrentParseState = LOOKING_FOR_BLOCK;
+ setParseState();
+ return;
+ }
+
+ MatroskaTrack* track = fOurFile.lookup(fBlockTrackNumber);
+ if (track == NULL) break; // shouldn't happen
+
+ // The next two bytes are the block's timecode (relative to the cluster timecode)
+ fBlockTimecode = (get1Byte()<<8)|get1Byte();
+
+ // The next byte indicates the type of 'lacing' used:
+ u_int8_t c = get1Byte();
+ c &= 0x6; // we're interested in bits 5-6 only
+ MatroskaLacingType lacingType = (c==0x0)?NoLacing : (c==0x02)?XiphLacing : (c==0x04)?FixedSizeLacing : EBMLLacing;
+#ifdef DEBUG
+ fprintf(stderr, "\ttrack number %d, timecode %d (=> %f seconds), %s lacing\n", fBlockTrackNumber, fBlockTimecode, (fClusterTimecode+fBlockTimecode)*(fOurFile.fTimecodeScale/1000000000.0), (lacingType==NoLacing)?"no" : (lacingType==XiphLacing)?"Xiph" : (lacingType==FixedSizeLacing)?"fixed-size" : "EBML");
+#endif
+
+ if (lacingType == NoLacing) {
+ fNumFramesInBlock = 1;
+ } else {
+ // The next byte tells us how many frames are present in this block
+ fNumFramesInBlock = get1Byte() + 1;
+ }
+ delete[] fFrameSizesWithinBlock; fFrameSizesWithinBlock = new unsigned[fNumFramesInBlock];
+ if (fFrameSizesWithinBlock == NULL) break;
+
+ if (lacingType == NoLacing) {
+ unsigned headerBytesSeen = curOffset() - blockStartPos;
+ if (headerBytesSeen > fBlockSize) break;
+
+ fFrameSizesWithinBlock[0] = fBlockSize - headerBytesSeen;
+ } else if (lacingType == FixedSizeLacing) {
+ unsigned headerBytesSeen = curOffset() - blockStartPos;
+ if (headerBytesSeen > fBlockSize) break;
+
+ unsigned frameBytesAvailable = fBlockSize - headerBytesSeen;
+ unsigned constantFrameSize = frameBytesAvailable/fNumFramesInBlock;
+
+ for (unsigned i = 0; i < fNumFramesInBlock; ++i) {
+ fFrameSizesWithinBlock[i] = constantFrameSize;
+ }
+ // If there are any bytes left over, assign them to the last frame:
+ fFrameSizesWithinBlock[fNumFramesInBlock-1] += frameBytesAvailable%fNumFramesInBlock;
+ } else { // EBML or Xiph lacing
+ unsigned curFrameSize = 0;
+ unsigned frameSizesTotal = 0;
+ unsigned i;
+
+ for (i = 0; i < fNumFramesInBlock-1; ++i) {
+ if (lacingType == EBMLLacing) {
+ EBMLNumber frameSize;
+ if (!parseEBMLNumber(frameSize)) break;
+ unsigned fsv = (unsigned)frameSize.val();
+
+ if (i == 0) {
+ curFrameSize = fsv;
+ } else {
+ // The value we read is a signed value, that's added to the previous frame size, to get the current frame size:
+ unsigned toSubtract = (fsv>0xFFFFFF)?0x07FFFFFF : (fsv>0xFFFF)?0x0FFFFF : (fsv>0xFF)?0x1FFF : 0x3F;
+ int fsv_signed = fsv - toSubtract;
+ curFrameSize += fsv_signed;
+ if ((int)curFrameSize < 0) break;
+ }
+ } else { // Xiph lacing
+ curFrameSize = 0;
+ u_int8_t c;
+ do {
+ c = get1Byte();
+ curFrameSize += c;
+ } while (c == 0xFF);
+ }
+ fFrameSizesWithinBlock[i] = curFrameSize;
+ frameSizesTotal += curFrameSize;
+ }
+ if (i != fNumFramesInBlock-1) break; // an error occurred within the "for" loop
+
+ // Compute the size of the final frame within the block (from the block's size, and the frame sizes already computed):)
+ unsigned headerBytesSeen = curOffset() - blockStartPos;
+ if (headerBytesSeen + frameSizesTotal > fBlockSize) break;
+ fFrameSizesWithinBlock[i] = fBlockSize - (headerBytesSeen + frameSizesTotal);
+ }
+
+ // We're done parsing headers within the block, and (as a result) we now know the sizes of all frames within the block.
+ // If we have 'stripped bytes' that are common to (the front of) all frames, then count them now:
+ if (track->headerStrippedBytesSize != 0) {
+ for (unsigned i = 0; i < fNumFramesInBlock; ++i) fFrameSizesWithinBlock[i] += track->headerStrippedBytesSize;
+ }
+#ifdef DEBUG
+ fprintf(stderr, "\tThis block contains %d frame(s); size(s):", fNumFramesInBlock);
+ unsigned frameSizesTotal = 0;
+ for (unsigned i = 0; i < fNumFramesInBlock; ++i) {
+ fprintf(stderr, " %d", fFrameSizesWithinBlock[i]);
+ frameSizesTotal += fFrameSizesWithinBlock[i];
+ }
+ if (fNumFramesInBlock > 1) fprintf(stderr, " (total: %u)", frameSizesTotal);
+ fprintf(stderr, " bytes\n");
+#endif
+ // Next, start delivering these frames:
+ fCurrentParseState = DELIVERING_FRAME_WITHIN_BLOCK;
+ fCurOffsetWithinFrame = fNextFrameNumberToDeliver = 0;
+ setParseState();
+ return;
+ } while (0);
+
+ // An error occurred. Try to recover:
+#ifdef DEBUG
+ fprintf(stderr, "parseBlock(): Error parsing data; trying to recover...\n");
+#endif
+ fCurrentParseState = LOOKING_FOR_BLOCK;
+}
+
+Boolean MatroskaFileParser::deliverFrameWithinBlock() {
+#ifdef DEBUG
+ fprintf(stderr, "delivering frame within SimpleBlock or Block\n");
+#endif
+ do {
+ MatroskaTrack* track = fOurFile.lookup(fBlockTrackNumber);
+ if (track == NULL) break; // shouldn't happen
+
+ MatroskaDemuxedTrack* demuxedTrack = fOurDemux->lookupDemuxedTrack(fBlockTrackNumber);
+ if (demuxedTrack == NULL) break; // shouldn't happen
+ if (!demuxedTrack->isCurrentlyAwaitingData()) {
+ // Someone has been reading this stream, but isn't right now.
+ // We can't deliver this frame until he asks for it, so punt for now.
+ // The next time he asks for a frame, he'll get it.
+#ifdef DEBUG
+ fprintf(stderr, "\tdeferring delivery of frame #%d (%d bytes)", fNextFrameNumberToDeliver, fFrameSizesWithinBlock[fNextFrameNumberToDeliver]);
+ if (track->haveSubframes()) fprintf(stderr, "[offset %d]", fCurOffsetWithinFrame);
+ fprintf(stderr, "\n");
+#endif
+ restoreSavedParserState(); // so we read from the beginning next time
+ return False;
+ }
+
+ unsigned frameSize;
+ u_int8_t const* specialFrameSource = NULL;
+ u_int8_t const opusCommentHeader[16]
+ = {'O','p','u','s','T','a','g','s', 0, 0, 0, 0, 0, 0, 0, 0};
+ if (track->codecIsOpus && demuxedTrack->fOpusTrackNumber < 2) {
+ // Special case for Opus audio. The first frame (the 'configuration' header) comes from
+ // the 'private data'. The second frame (the 'comment' header) comes is synthesized by
+ // us here:
+ if (demuxedTrack->fOpusTrackNumber == 0) {
+ specialFrameSource = track->codecPrivate;
+ frameSize = track->codecPrivateSize;
+ } else { // demuxedTrack->fOpusTrackNumber == 1
+ specialFrameSource = opusCommentHeader;
+ frameSize = sizeof opusCommentHeader;
+ }
+ ++demuxedTrack->fOpusTrackNumber;
+ } else {
+ frameSize = fFrameSizesWithinBlock[fNextFrameNumberToDeliver];
+ if (track->haveSubframes()) {
+ // The next "track->subframeSizeSize" bytes contain the length of a 'subframe':
+ if (fCurOffsetWithinFrame + track->subframeSizeSize > frameSize) break; // sanity check
+ unsigned subframeSize = 0;
+ for (unsigned i = 0; i < track->subframeSizeSize; ++i) {
+ u_int8_t c;
+ getCommonFrameBytes(track, &c, 1, 0);
+ if (fCurFrameNumBytesToGet > 0) { // it'll be 1
+ c = get1Byte();
+ ++fCurOffsetWithinFrame;
+ }
+ subframeSize = subframeSize*256 + c;
+ }
+ if (subframeSize == 0 || fCurOffsetWithinFrame + subframeSize > frameSize) break; // sanity check
+ frameSize = subframeSize;
+ }
+ }
+
+ // Compute the presentation time of this frame (from the cluster timecode, the block timecode, and the default duration):
+ double pt = (fClusterTimecode+fBlockTimecode)*(fOurFile.fTimecodeScale/1000000000.0)
+ + fNextFrameNumberToDeliver*(track->defaultDuration/1000000000.0);
+ if (fPresentationTimeOffset == 0.0) {
+ // This is the first time we've computed a presentation time. Compute an offset to make the presentation times aligned
+ // with 'wall clock' time:
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ double ptNow = timeNow.tv_sec + timeNow.tv_usec/1000000.0;
+ fPresentationTimeOffset = ptNow - pt;
+ }
+ pt += fPresentationTimeOffset;
+ struct timeval presentationTime;
+ presentationTime.tv_sec = (unsigned)pt;
+ presentationTime.tv_usec = (unsigned)((pt - presentationTime.tv_sec)*1000000);
+ unsigned durationInMicroseconds;
+ if (specialFrameSource != NULL) {
+ durationInMicroseconds = 0;
+ } else { // normal case
+ durationInMicroseconds = track->defaultDuration/1000;
+ if (track->haveSubframes()) {
+ // If this is a 'subframe', use a duration of 0 instead (unless it's the last 'subframe'):
+ if (fCurOffsetWithinFrame + frameSize + track->subframeSizeSize < fFrameSizesWithinBlock[fNextFrameNumberToDeliver]) {
+ // There's room for at least one more subframe after this, so give this subframe a duration of 0
+ durationInMicroseconds = 0;
+ }
+ }
+ }
+
+ if (track->defaultDuration == 0) {
+ // Adjust the frame duration to keep the sum of frame durations aligned with presentation times.
+ if (demuxedTrack->prevPresentationTime().tv_sec != 0) { // not the first time for this track
+ demuxedTrack->durationImbalance()
+ += (presentationTime.tv_sec - demuxedTrack->prevPresentationTime().tv_sec)*1000000
+ + (presentationTime.tv_usec - demuxedTrack->prevPresentationTime().tv_usec);
+ }
+ int adjustment = 0;
+ if (demuxedTrack->durationImbalance() > 0) {
+ // The duration needs to be increased.
+ int const adjustmentThreshold = 100000; // don't increase the duration by more than this amount (in case there's a mistake)
+ adjustment = demuxedTrack->durationImbalance() > adjustmentThreshold
+ ? adjustmentThreshold : demuxedTrack->durationImbalance();
+ } else if (demuxedTrack->durationImbalance() < 0) {
+ // The duration needs to be decreased.
+ adjustment = (unsigned)(-demuxedTrack->durationImbalance()) < durationInMicroseconds
+ ? demuxedTrack->durationImbalance() : -(int)durationInMicroseconds;
+ }
+ durationInMicroseconds += adjustment;
+ demuxedTrack->durationImbalance() -= durationInMicroseconds; // for next time
+ demuxedTrack->prevPresentationTime() = presentationTime; // for next time
+ }
+
+ demuxedTrack->presentationTime() = presentationTime;
+ demuxedTrack->durationInMicroseconds() = durationInMicroseconds;
+
+ // Deliver the next block now:
+ if (frameSize > demuxedTrack->maxSize()) {
+ demuxedTrack->numTruncatedBytes() = frameSize - demuxedTrack->maxSize();
+ demuxedTrack->frameSize() = demuxedTrack->maxSize();
+ } else { // normal case
+ demuxedTrack->numTruncatedBytes() = 0;
+ demuxedTrack->frameSize() = frameSize;
+ }
+ getCommonFrameBytes(track, demuxedTrack->to(), demuxedTrack->frameSize(), demuxedTrack->numTruncatedBytes());
+
+ // Next, deliver (and/or skip) bytes from the input file:
+ if (specialFrameSource != NULL) {
+ memmove(demuxedTrack->to(), specialFrameSource, demuxedTrack->frameSize());
+#ifdef DEBUG
+ fprintf(stderr, "\tdelivered special frame: %d bytes", demuxedTrack->frameSize());
+ if (demuxedTrack->numTruncatedBytes() > 0) fprintf(stderr, " (%d bytes truncated)", demuxedTrack->numTruncatedBytes());
+ fprintf(stderr, " @%u.%06u (%.06f from start); duration %u us\n", demuxedTrack->presentationTime().tv_sec, demuxedTrack->presentationTime().tv_usec, demuxedTrack->presentationTime().tv_sec+demuxedTrack->presentationTime().tv_usec/1000000.0-fPresentationTimeOffset, demuxedTrack->durationInMicroseconds());
+#endif
+ setParseState();
+ FramedSource::afterGetting(demuxedTrack); // completes delivery
+ } else { // normal case
+ fCurrentParseState = DELIVERING_FRAME_BYTES;
+ setParseState();
+ }
+ return True;
+ } while (0);
+
+ // An error occurred. Try to recover:
+#ifdef DEBUG
+ fprintf(stderr, "deliverFrameWithinBlock(): Error parsing data; trying to recover...\n");
+#endif
+ fCurrentParseState = LOOKING_FOR_BLOCK;
+ return True;
+}
+
+void MatroskaFileParser::deliverFrameBytes() {
+ do {
+ MatroskaTrack* track = fOurFile.lookup(fBlockTrackNumber);
+ if (track == NULL) break; // shouldn't happen
+
+ MatroskaDemuxedTrack* demuxedTrack = fOurDemux->lookupDemuxedTrack(fBlockTrackNumber);
+ if (demuxedTrack == NULL) break; // shouldn't happen
+
+ unsigned const BANK_SIZE = bankSize();
+ while (fCurFrameNumBytesToGet > 0) {
+ // Hack: We can get no more than BANK_SIZE bytes at a time:
+ unsigned numBytesToGet = fCurFrameNumBytesToGet > BANK_SIZE ? BANK_SIZE : fCurFrameNumBytesToGet;
+ getBytes(fCurFrameTo, numBytesToGet);
+ fCurFrameTo += numBytesToGet;
+ fCurFrameNumBytesToGet -= numBytesToGet;
+ fCurOffsetWithinFrame += numBytesToGet;
+ setParseState();
+ }
+ while (fCurFrameNumBytesToSkip > 0) {
+ // Hack: We can skip no more than BANK_SIZE bytes at a time:
+ unsigned numBytesToSkip = fCurFrameNumBytesToSkip > BANK_SIZE ? BANK_SIZE : fCurFrameNumBytesToSkip;
+ skipBytes(numBytesToSkip);
+ fCurFrameNumBytesToSkip -= numBytesToSkip;
+ fCurOffsetWithinFrame += numBytesToSkip;
+ setParseState();
+ }
+#ifdef DEBUG
+ fprintf(stderr, "\tdelivered frame #%d: %d bytes", fNextFrameNumberToDeliver, demuxedTrack->frameSize());
+ if (track->haveSubframes()) fprintf(stderr, "[offset %d]", fCurOffsetWithinFrame - track->subframeSizeSize - demuxedTrack->frameSize() - demuxedTrack->numTruncatedBytes());
+ if (demuxedTrack->numTruncatedBytes() > 0) fprintf(stderr, " (%d bytes truncated)", demuxedTrack->numTruncatedBytes());
+ fprintf(stderr, " @%u.%06u (%.06f from start); duration %u us\n", demuxedTrack->presentationTime().tv_sec, demuxedTrack->presentationTime().tv_usec, demuxedTrack->presentationTime().tv_sec+demuxedTrack->presentationTime().tv_usec/1000000.0-fPresentationTimeOffset, demuxedTrack->durationInMicroseconds());
+#endif
+
+ if (!track->haveSubframes()
+ || fCurOffsetWithinFrame + track->subframeSizeSize >= fFrameSizesWithinBlock[fNextFrameNumberToDeliver]) {
+ // Either we don't have subframes, or there's no more room for another subframe => We're completely done with this frame now:
+ ++fNextFrameNumberToDeliver;
+ fCurOffsetWithinFrame = 0;
+ }
+ if (fNextFrameNumberToDeliver == fNumFramesInBlock) {
+ // We've delivered all of the frames from this block. Look for another block next:
+ fCurrentParseState = LOOKING_FOR_BLOCK;
+ } else {
+ fCurrentParseState = DELIVERING_FRAME_WITHIN_BLOCK;
+ }
+
+ setParseState();
+ FramedSource::afterGetting(demuxedTrack); // completes delivery
+ return;
+ } while (0);
+
+ // An error occurred. Try to recover:
+#ifdef DEBUG
+ fprintf(stderr, "deliverFrameBytes(): Error parsing data; trying to recover...\n");
+#endif
+ fCurrentParseState = LOOKING_FOR_BLOCK;
+}
+
+void MatroskaFileParser
+::getCommonFrameBytes(MatroskaTrack* track, u_int8_t* to, unsigned numBytesToGet, unsigned numBytesToSkip) {
+ if (track->headerStrippedBytesSize > fCurOffsetWithinFrame) {
+ // We have some common 'header stripped' bytes that remain to be prepended to the frame. Use these first:
+ unsigned numRemainingHeaderStrippedBytes = track->headerStrippedBytesSize - fCurOffsetWithinFrame;
+ unsigned numHeaderStrippedBytesToGet;
+ if (numBytesToGet <= numRemainingHeaderStrippedBytes) {
+ numHeaderStrippedBytesToGet = numBytesToGet;
+ numBytesToGet = 0;
+ if (numBytesToGet + numBytesToSkip <= numRemainingHeaderStrippedBytes) {
+ numBytesToSkip = 0;
+ } else {
+ numBytesToSkip = numBytesToGet + numBytesToSkip - numRemainingHeaderStrippedBytes;
+ }
+ } else {
+ numHeaderStrippedBytesToGet = numRemainingHeaderStrippedBytes;
+ numBytesToGet = numBytesToGet - numRemainingHeaderStrippedBytes;
+ }
+
+ if (numHeaderStrippedBytesToGet > 0) {
+ memmove(to, &track->headerStrippedBytes[fCurOffsetWithinFrame], numHeaderStrippedBytesToGet);
+ to += numHeaderStrippedBytesToGet;
+ fCurOffsetWithinFrame += numHeaderStrippedBytesToGet;
+ }
+ }
+
+ fCurFrameTo = to;
+ fCurFrameNumBytesToGet = numBytesToGet;
+ fCurFrameNumBytesToSkip = numBytesToSkip;
+}
+
+Boolean MatroskaFileParser::parseEBMLNumber(EBMLNumber& num) {
+ unsigned i;
+ u_int8_t bitmask = 0x80;
+ for (i = 0; i < EBML_NUMBER_MAX_LEN; ++i) {
+ while (1) {
+ if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) return False; // We've hit our pre-set limit
+ num.data[i] = get1Byte();
+ ++fCurOffsetInFile;
+
+ // If we're looking for an id, skip any leading bytes that don't contain a '1' in the first 4 bits:
+ if (i == 0/*we're a leading byte*/ && !num.stripLeading1/*we're looking for an id*/ && (num.data[i]&0xF0) == 0) {
+ setParseState(); // ensures that we make forward progress if the parsing gets interrupted
+ continue;
+ }
+ break;
+ }
+ if ((num.data[0]&bitmask) != 0) {
+ // num[i] is the last byte of the id
+ if (num.stripLeading1) num.data[0] &=~ bitmask;
+ break;
+ }
+ bitmask >>= 1;
+ }
+ if (i == EBML_NUMBER_MAX_LEN) return False;
+
+ num.len = i+1;
+ return True;
+}
+
+Boolean MatroskaFileParser::parseEBMLIdAndSize(EBMLId& id, EBMLDataSize& size) {
+ return parseEBMLNumber(id) && parseEBMLNumber(size);
+}
+
+Boolean MatroskaFileParser::parseEBMLVal_unsigned64(EBMLDataSize& size, u_int64_t& result) {
+ u_int64_t sv = size.val();
+ if (sv > 8) return False; // size too large
+
+ result = 0; // initially
+ for (unsigned i = (unsigned)sv; i > 0; --i) {
+ if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) return False; // We've hit our pre-set limit
+
+ u_int8_t c = get1Byte();
+ ++fCurOffsetInFile;
+
+ result = result*256 + c;
+ }
+
+ return True;
+}
+
+Boolean MatroskaFileParser::parseEBMLVal_unsigned(EBMLDataSize& size, unsigned& result) {
+ if (size.val() > 4) return False; // size too large
+
+ u_int64_t result64;
+ if (!parseEBMLVal_unsigned64(size, result64)) return False;
+
+ result = (unsigned)result64;
+
+ return True;
+}
+
+Boolean MatroskaFileParser::parseEBMLVal_float(EBMLDataSize& size, float& result) {
+ if (size.val() == 4) {
+ // Normal case. Read the value as if it were a 4-byte integer, then copy it to the 'float' result:
+ unsigned resultAsUnsigned;
+ if (!parseEBMLVal_unsigned(size, resultAsUnsigned)) return False;
+
+ if (sizeof result != sizeof resultAsUnsigned) return False;
+ memcpy(&result, &resultAsUnsigned, sizeof result);
+ return True;
+ } else if (size.val() == 8) {
+ // Read the value as if it were an 8-byte integer, then copy it to a 'double', the convert that to the 'float' result:
+ u_int64_t resultAsUnsigned64;
+ if (!parseEBMLVal_unsigned64(size, resultAsUnsigned64)) return False;
+
+ double resultDouble;
+ if (sizeof resultDouble != sizeof resultAsUnsigned64) return False;
+ memcpy(&resultDouble, &resultAsUnsigned64, sizeof resultDouble);
+
+ result = (float)resultDouble;
+ return True;
+ } else {
+ // Unworkable size
+ return False;
+ }
+}
+
+Boolean MatroskaFileParser::parseEBMLVal_string(EBMLDataSize& size, char*& result) {
+ unsigned resultLength = (unsigned)size.val();
+ result = new char[resultLength + 1]; // allow for the trailing '\0'
+ if (result == NULL) return False;
+
+ char* p = result;
+ unsigned i;
+ for (i = 0; i < resultLength; ++i) {
+ if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) break; // We've hit our pre-set limit
+
+ u_int8_t c = get1Byte();
+ ++fCurOffsetInFile;
+
+ *p++ = c;
+ }
+ if (i < resultLength) { // an error occurred
+ delete[] result;
+ result = NULL;
+ return False;
+ }
+ *p = '\0';
+
+ return True;
+}
+
+Boolean MatroskaFileParser::parseEBMLVal_binary(EBMLDataSize& size, u_int8_t*& result) {
+ unsigned resultLength = (unsigned)size.val();
+ result = new u_int8_t[resultLength];
+ if (result == NULL) return False;
+
+ u_int8_t* p = result;
+ unsigned i;
+ for (i = 0; i < resultLength; ++i) {
+ if (fLimitOffsetInFile > 0 && fCurOffsetInFile > fLimitOffsetInFile) break; // We've hit our pre-set limit
+
+ u_int8_t c = get1Byte();
+ ++fCurOffsetInFile;
+
+ *p++ = c;
+ }
+ if (i < resultLength) { // an error occurred
+ delete[] result;
+ result = NULL;
+ return False;
+ }
+
+ return True;
+}
+
+void MatroskaFileParser::skipHeader(EBMLDataSize const& size) {
+ u_int64_t sv = (unsigned)size.val();
+#ifdef DEBUG
+ fprintf(stderr, "\tskipping %llu bytes\n", sv);
+#endif
+
+ fNumHeaderBytesToSkip = sv;
+ skipRemainingHeaderBytes(False);
+}
+
+void MatroskaFileParser::skipRemainingHeaderBytes(Boolean isContinuation) {
+ if (fNumHeaderBytesToSkip == 0) return; // common case
+
+ // Hack: To avoid tripping into a parser 'internal error' if we try to skip an excessively large
+ // distance, break up the skipping into manageable chunks, to ensure forward progress:
+ unsigned const maxBytesToSkip = bankSize();
+ while (fNumHeaderBytesToSkip > 0) {
+ unsigned numBytesToSkipNow
+ = fNumHeaderBytesToSkip < maxBytesToSkip ? (unsigned)fNumHeaderBytesToSkip : maxBytesToSkip;
+ setParseState();
+ skipBytes(numBytesToSkipNow);
+#ifdef DEBUG
+ if (isContinuation || numBytesToSkipNow < fNumHeaderBytesToSkip) {
+ fprintf(stderr, "\t\t(skipped %u bytes; %llu bytes remaining)\n",
+ numBytesToSkipNow, fNumHeaderBytesToSkip - numBytesToSkipNow);
+ }
+#endif
+ fCurOffsetInFile += numBytesToSkipNow;
+ fNumHeaderBytesToSkip -= numBytesToSkipNow;
+ }
+}
+
+void MatroskaFileParser::setParseState() {
+ fSavedCurOffsetInFile = fCurOffsetInFile;
+ fSavedCurOffsetWithinFrame = fCurOffsetWithinFrame;
+ saveParserState();
+}
+
+void MatroskaFileParser::restoreSavedParserState() {
+ StreamParser::restoreSavedParserState();
+ fCurOffsetInFile = fSavedCurOffsetInFile;
+ fCurOffsetWithinFrame = fSavedCurOffsetWithinFrame;
+}
+
+void MatroskaFileParser::seekToFilePosition(u_int64_t offsetInFile) {
+ ByteStreamFileSource* fileSource = (ByteStreamFileSource*)fInputSource; // we know it's a "ByteStreamFileSource"
+ if (fileSource != NULL) {
+ fileSource->seekToByteAbsolute(offsetInFile);
+ resetStateAfterSeeking();
+ }
+}
+
+void MatroskaFileParser::seekToEndOfFile() {
+ ByteStreamFileSource* fileSource = (ByteStreamFileSource*)fInputSource; // we know it's a "ByteStreamFileSource"
+ if (fileSource != NULL) {
+ fileSource->seekToEnd();
+ resetStateAfterSeeking();
+ }
+}
+
+void MatroskaFileParser::resetStateAfterSeeking() {
+ // Because we're resuming parsing after seeking to a new position in the file, reset the parser state:
+ fCurOffsetInFile = fSavedCurOffsetInFile = 0;
+ fCurOffsetWithinFrame = fSavedCurOffsetWithinFrame = 0;
+ flushInput();
+}
diff --git a/liveMedia/MatroskaFileParser.hh b/liveMedia/MatroskaFileParser.hh
new file mode 100644
index 0000000..48fbf00
--- /dev/null
+++ b/liveMedia/MatroskaFileParser.hh
@@ -0,0 +1,134 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A parser for a Matroska file.
+// C++ header
+
+#ifndef _MATROSKA_FILE_PARSER_HH
+
+#ifndef _STREAM_PARSER_HH
+#include "StreamParser.hh"
+#endif
+#ifndef _MATROSKA_FILE_HH
+#include "MatroskaFile.hh"
+#endif
+#ifndef _EBML_NUMBER_HH
+#include "EBMLNumber.hh"
+#endif
+
+// An enum representing the current state of the parser:
+enum MatroskaParseState {
+ PARSING_START_OF_FILE,
+ LOOKING_FOR_TRACKS,
+ PARSING_TRACK,
+ PARSING_CUES,
+ LOOKING_FOR_CLUSTER,
+ LOOKING_FOR_BLOCK,
+ PARSING_BLOCK,
+ DELIVERING_FRAME_WITHIN_BLOCK,
+ DELIVERING_FRAME_BYTES
+};
+
+class MatroskaFileParser: public StreamParser {
+public:
+ MatroskaFileParser(MatroskaFile& ourFile, FramedSource* inputSource,
+ FramedSource::onCloseFunc* onEndFunc, void* onEndClientData,
+ MatroskaDemux* ourDemux = NULL);
+ virtual ~MatroskaFileParser();
+
+ void seekToTime(double& seekNPT);
+
+ // StreamParser 'client continue' function:
+ static void continueParsing(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime);
+ void continueParsing();
+
+private:
+ // Parsing functions:
+ Boolean parse();
+ // returns True iff we have finished parsing to the end of all 'Track' headers (on initialization)
+
+ Boolean parseStartOfFile();
+ void lookForNextTrack();
+ Boolean parseTrack();
+ Boolean parseCues();
+
+ void lookForNextBlock();
+ void parseBlock();
+ Boolean deliverFrameWithinBlock();
+ void deliverFrameBytes();
+
+ void getCommonFrameBytes(MatroskaTrack* track, u_int8_t* to, unsigned numBytesToGet, unsigned numBytesToSkip);
+
+ Boolean parseEBMLNumber(EBMLNumber& num);
+ Boolean parseEBMLIdAndSize(EBMLId& id, EBMLDataSize& size);
+ Boolean parseEBMLVal_unsigned64(EBMLDataSize& size, u_int64_t& result);
+ Boolean parseEBMLVal_unsigned(EBMLDataSize& size, unsigned& result);
+ Boolean parseEBMLVal_float(EBMLDataSize& size, float& result);
+ Boolean parseEBMLVal_string(EBMLDataSize& size, char*& result);
+ // Note: "result" is dynamically allocated; the caller must delete[] it later
+ Boolean parseEBMLVal_binary(EBMLDataSize& size, u_int8_t*& result);
+ // Note: "result" is dynamically allocated; the caller must delete[] it later
+ void skipHeader(EBMLDataSize const& size);
+ void skipRemainingHeaderBytes(Boolean isContinuation);
+
+ void setParseState();
+
+ void seekToFilePosition(u_int64_t offsetInFile);
+ void seekToEndOfFile();
+ void resetStateAfterSeeking(); // common code, called by both of the above
+
+private: // redefined virtual functions
+ virtual void restoreSavedParserState();
+
+private:
+ // General state for parsing:
+ MatroskaFile& fOurFile;
+ FramedSource* fInputSource;
+ FramedSource::onCloseFunc* fOnEndFunc;
+ void* fOnEndClientData;
+ MatroskaDemux* fOurDemux;
+ MatroskaParseState fCurrentParseState;
+ u_int64_t fCurOffsetInFile, fSavedCurOffsetInFile, fLimitOffsetInFile;
+
+ // For skipping over (possibly large) headers:
+ u_int64_t fNumHeaderBytesToSkip;
+
+ // For parsing 'Seek ID's:
+ EBMLId fLastSeekId;
+
+ // Parameters of the most recently-parsed 'Cluster':
+ unsigned fClusterTimecode;
+
+ // Parameters of the most recently-parsed 'Block':
+ unsigned fBlockSize;
+ unsigned fBlockTrackNumber;
+ short fBlockTimecode;
+ unsigned fNumFramesInBlock;
+ unsigned* fFrameSizesWithinBlock;
+
+ // Parameters of the most recently-parsed frame within a 'Block':
+ double fPresentationTimeOffset;
+ unsigned fNextFrameNumberToDeliver;
+ unsigned fCurOffsetWithinFrame, fSavedCurOffsetWithinFrame; // used if track->haveSubframes()
+
+ // Parameters of the (sub)frame that's currently being delivered:
+ u_int8_t* fCurFrameTo;
+ unsigned fCurFrameNumBytesToGet;
+ unsigned fCurFrameNumBytesToSkip;
+};
+
+#endif
diff --git a/liveMedia/MatroskaFileServerDemux.cpp b/liveMedia/MatroskaFileServerDemux.cpp
new file mode 100644
index 0000000..4153569
--- /dev/null
+++ b/liveMedia/MatroskaFileServerDemux.cpp
@@ -0,0 +1,121 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A server demultiplexor for a Matroska file
+// Implementation
+
+#include "MatroskaFileServerDemux.hh"
+#include "MP3AudioMatroskaFileServerMediaSubsession.hh"
+#include "MatroskaFileServerMediaSubsession.hh"
+
+void MatroskaFileServerDemux
+::createNew(UsageEnvironment& env, char const* fileName,
+ onCreationFunc* onCreation, void* onCreationClientData,
+ char const* preferredLanguage) {
+ (void)new MatroskaFileServerDemux(env, fileName,
+ onCreation, onCreationClientData,
+ preferredLanguage);
+}
+
+ServerMediaSubsession* MatroskaFileServerDemux::newServerMediaSubsession() {
+ unsigned dummyResultTrackNumber;
+ return newServerMediaSubsession(dummyResultTrackNumber);
+}
+
+ServerMediaSubsession* MatroskaFileServerDemux
+::newServerMediaSubsession(unsigned& resultTrackNumber) {
+ ServerMediaSubsession* result;
+ resultTrackNumber = 0;
+
+ for (result = NULL; result == NULL && fNextTrackTypeToCheck != MATROSKA_TRACK_TYPE_OTHER; fNextTrackTypeToCheck <<= 1) {
+ if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_VIDEO) resultTrackNumber = fOurMatroskaFile->chosenVideoTrackNumber();
+ else if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_AUDIO) resultTrackNumber = fOurMatroskaFile->chosenAudioTrackNumber();
+ else if (fNextTrackTypeToCheck == MATROSKA_TRACK_TYPE_SUBTITLE) resultTrackNumber = fOurMatroskaFile->chosenSubtitleTrackNumber();
+
+ result = newServerMediaSubsessionByTrackNumber(resultTrackNumber);
+ }
+
+ return result;
+}
+
+ServerMediaSubsession* MatroskaFileServerDemux
+::newServerMediaSubsessionByTrackNumber(unsigned trackNumber) {
+ MatroskaTrack* track = fOurMatroskaFile->lookup(trackNumber);
+ if (track == NULL) return NULL;
+
+ // Use the track's "codecID" string to figure out which "ServerMediaSubsession" subclass to use:
+ ServerMediaSubsession* result = NULL;
+ if (strcmp(track->mimeType, "audio/MPEG") == 0) {
+ result = MP3AudioMatroskaFileServerMediaSubsession::createNew(*this, track);
+ } else {
+ result = MatroskaFileServerMediaSubsession::createNew(*this, track);
+ }
+
+ if (result != NULL) {
+#ifdef DEBUG
+ fprintf(stderr, "Created 'ServerMediaSubsession' object for track #%d: %s (%s)\n", track->trackNumber, track->codecID, track->mimeType);
+#endif
+ }
+
+ return result;
+}
+
+FramedSource* MatroskaFileServerDemux::newDemuxedTrack(unsigned clientSessionId, unsigned trackNumber) {
+ MatroskaDemux* demuxToUse = NULL;
+
+ if (clientSessionId != 0 && clientSessionId == fLastClientSessionId) {
+ demuxToUse = fLastCreatedDemux; // use the same demultiplexor as before
+ // Note: This code relies upon the fact that the creation of streams for different
+ // client sessions do not overlap - so all demuxed tracks are created for one "MatroskaDemux" at a time.
+ // Also, the "clientSessionId != 0" test is a hack, because 'session 0' is special; its audio and video streams
+ // are created and destroyed one-at-a-time, rather than both streams being
+ // created, and then (later) both streams being destroyed (as is the case
+ // for other ('real') session ids). Because of this, a separate demultiplexor is used for each 'session 0' track.
+ }
+
+ if (demuxToUse == NULL) demuxToUse = fOurMatroskaFile->newDemux();
+
+ fLastClientSessionId = clientSessionId;
+ fLastCreatedDemux = demuxToUse;
+
+ return demuxToUse->newDemuxedTrackByTrackNumber(trackNumber);
+}
+
+MatroskaFileServerDemux
+::MatroskaFileServerDemux(UsageEnvironment& env, char const* fileName,
+ onCreationFunc* onCreation, void* onCreationClientData,
+ char const* preferredLanguage)
+ : Medium(env),
+ fFileName(fileName), fOnCreation(onCreation), fOnCreationClientData(onCreationClientData),
+ fNextTrackTypeToCheck(0x1), fLastClientSessionId(0), fLastCreatedDemux(NULL) {
+ MatroskaFile::createNew(env, fileName, onMatroskaFileCreation, this, preferredLanguage);
+}
+
+MatroskaFileServerDemux::~MatroskaFileServerDemux() {
+ Medium::close(fOurMatroskaFile);
+}
+
+void MatroskaFileServerDemux::onMatroskaFileCreation(MatroskaFile* newFile, void* clientData) {
+ ((MatroskaFileServerDemux*)clientData)->onMatroskaFileCreation(newFile);
+}
+
+void MatroskaFileServerDemux::onMatroskaFileCreation(MatroskaFile* newFile) {
+ fOurMatroskaFile = newFile;
+
+ // Now, call our own creation notification function:
+ if (fOnCreation != NULL) (*fOnCreation)(this, fOnCreationClientData);
+}
diff --git a/liveMedia/MatroskaFileServerMediaSubsession.cpp b/liveMedia/MatroskaFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..b070336
--- /dev/null
+++ b/liveMedia/MatroskaFileServerMediaSubsession.cpp
@@ -0,0 +1,65 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a track within a Matroska file.
+// Implementation
+
+#include "MatroskaFileServerMediaSubsession.hh"
+#include "MatroskaDemuxedTrack.hh"
+#include "FramedFilter.hh"
+
+MatroskaFileServerMediaSubsession* MatroskaFileServerMediaSubsession
+::createNew(MatroskaFileServerDemux& demux, MatroskaTrack* track) {
+ return new MatroskaFileServerMediaSubsession(demux, track);
+}
+
+MatroskaFileServerMediaSubsession
+::MatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, MatroskaTrack* track)
+ : FileServerMediaSubsession(demux.envir(), demux.fileName(), False),
+ fOurDemux(demux), fTrack(track), fNumFiltersInFrontOfTrack(0) {
+}
+
+MatroskaFileServerMediaSubsession::~MatroskaFileServerMediaSubsession() {
+}
+
+float MatroskaFileServerMediaSubsession::duration() const { return fOurDemux.fileDuration(); }
+
+void MatroskaFileServerMediaSubsession
+::seekStreamSource(FramedSource* inputSource, double& seekNPT, double /*streamDuration*/, u_int64_t& /*numBytes*/) {
+ for (unsigned i = 0; i < fNumFiltersInFrontOfTrack; ++i) {
+ // "inputSource" is a filter. Go back to *its* source:
+ inputSource = ((FramedFilter*)inputSource)->inputSource();
+ }
+ ((MatroskaDemuxedTrack*)inputSource)->seekToTime(seekNPT);
+}
+
+FramedSource* MatroskaFileServerMediaSubsession
+::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) {
+ FramedSource* baseSource = fOurDemux.newDemuxedTrack(clientSessionId, fTrack->trackNumber);
+ if (baseSource == NULL) return NULL;
+
+ return fOurDemux.ourMatroskaFile()
+ ->createSourceForStreaming(baseSource, fTrack->trackNumber,
+ estBitrate, fNumFiltersInFrontOfTrack);
+}
+
+RTPSink* MatroskaFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) {
+ return fOurDemux.ourMatroskaFile()
+ ->createRTPSinkForTrackNumber(fTrack->trackNumber, rtpGroupsock, rtpPayloadTypeIfDynamic);
+}
diff --git a/liveMedia/MatroskaFileServerMediaSubsession.hh b/liveMedia/MatroskaFileServerMediaSubsession.hh
new file mode 100644
index 0000000..7cbebae
--- /dev/null
+++ b/liveMedia/MatroskaFileServerMediaSubsession.hh
@@ -0,0 +1,55 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a track within a Matroska file.
+// C++ header
+
+#ifndef _MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _MATROSKA_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+#ifndef _MATROSKA_FILE_SERVER_DEMUX_HH
+#include "MatroskaFileServerDemux.hh"
+#endif
+
+class MatroskaFileServerMediaSubsession: public FileServerMediaSubsession {
+public:
+ static MatroskaFileServerMediaSubsession*
+ createNew(MatroskaFileServerDemux& demux, MatroskaTrack* track);
+
+protected:
+ MatroskaFileServerMediaSubsession(MatroskaFileServerDemux& demux, MatroskaTrack* track);
+ // called only by createNew(), or by subclass constructors
+ virtual ~MatroskaFileServerMediaSubsession();
+
+protected: // redefined virtual functions
+ virtual float duration() const;
+ virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes);
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource);
+
+protected:
+ MatroskaFileServerDemux& fOurDemux;
+ MatroskaTrack* fTrack;
+ unsigned fNumFiltersInFrontOfTrack;
+};
+
+#endif
diff --git a/liveMedia/Media.cpp b/liveMedia/Media.cpp
new file mode 100644
index 0000000..7905bc1
--- /dev/null
+++ b/liveMedia/Media.cpp
@@ -0,0 +1,163 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Media
+// Implementation
+
+#include "Media.hh"
+#include "HashTable.hh"
+
+////////// Medium //////////
+
+Medium::Medium(UsageEnvironment& env)
+ : fEnviron(env), fNextTask(NULL) {
+ // First generate a name for the new medium:
+ MediaLookupTable::ourMedia(env)->generateNewName(fMediumName, mediumNameMaxLen);
+ env.setResultMsg(fMediumName);
+
+ // Then add it to our table:
+ MediaLookupTable::ourMedia(env)->addNew(this, fMediumName);
+}
+
+Medium::~Medium() {
+ // Remove any tasks that might be pending for us:
+ fEnviron.taskScheduler().unscheduleDelayedTask(fNextTask);
+}
+
+Boolean Medium::lookupByName(UsageEnvironment& env, char const* mediumName,
+ Medium*& resultMedium) {
+ resultMedium = MediaLookupTable::ourMedia(env)->lookup(mediumName);
+ if (resultMedium == NULL) {
+ env.setResultMsg("Medium ", mediumName, " does not exist");
+ return False;
+ }
+
+ return True;
+}
+
+void Medium::close(UsageEnvironment& env, char const* name) {
+ MediaLookupTable::ourMedia(env)->remove(name);
+}
+
+void Medium::close(Medium* medium) {
+ if (medium == NULL) return;
+
+ close(medium->envir(), medium->name());
+}
+
+Boolean Medium::isSource() const {
+ return False; // default implementation
+}
+
+Boolean Medium::isSink() const {
+ return False; // default implementation
+}
+
+Boolean Medium::isRTCPInstance() const {
+ return False; // default implementation
+}
+
+Boolean Medium::isRTSPClient() const {
+ return False; // default implementation
+}
+
+Boolean Medium::isRTSPServer() const {
+ return False; // default implementation
+}
+
+Boolean Medium::isMediaSession() const {
+ return False; // default implementation
+}
+
+Boolean Medium::isServerMediaSession() const {
+ return False; // default implementation
+}
+
+
+////////// _Tables implementation //////////
+
+_Tables* _Tables::getOurTables(UsageEnvironment& env, Boolean createIfNotPresent) {
+ if (env.liveMediaPriv == NULL && createIfNotPresent) {
+ env.liveMediaPriv = new _Tables(env);
+ }
+ return (_Tables*)(env.liveMediaPriv);
+}
+
+void _Tables::reclaimIfPossible() {
+ if (mediaTable == NULL && socketTable == NULL) {
+ fEnv.liveMediaPriv = NULL;
+ delete this;
+ }
+}
+
+_Tables::_Tables(UsageEnvironment& env)
+ : mediaTable(NULL), socketTable(NULL), fEnv(env) {
+}
+
+_Tables::~_Tables() {
+}
+
+
+////////// MediaLookupTable implementation //////////
+
+MediaLookupTable* MediaLookupTable::ourMedia(UsageEnvironment& env) {
+ _Tables* ourTables = _Tables::getOurTables(env);
+ if (ourTables->mediaTable == NULL) {
+ // Create a new table to record the media that are to be created in
+ // this environment:
+ ourTables->mediaTable = new MediaLookupTable(env);
+ }
+ return ourTables->mediaTable;
+}
+
+Medium* MediaLookupTable::lookup(char const* name) const {
+ return (Medium*)(fTable->Lookup(name));
+}
+
+void MediaLookupTable::addNew(Medium* medium, char* mediumName) {
+ fTable->Add(mediumName, (void*)medium);
+}
+
+void MediaLookupTable::remove(char const* name) {
+ Medium* medium = lookup(name);
+ if (medium != NULL) {
+ fTable->Remove(name);
+ if (fTable->IsEmpty()) {
+ // We can also delete ourselves (to reclaim space):
+ _Tables* ourTables = _Tables::getOurTables(fEnv);
+ delete this;
+ ourTables->mediaTable = NULL;
+ ourTables->reclaimIfPossible();
+ }
+
+ delete medium;
+ }
+}
+
+void MediaLookupTable::generateNewName(char* mediumName,
+ unsigned /*maxLen*/) {
+ // We should really use snprintf() here, but not all systems have it
+ sprintf(mediumName, "liveMedia%d", fNameGenerator++);
+}
+
+MediaLookupTable::MediaLookupTable(UsageEnvironment& env)
+ : fEnv(env), fTable(HashTable::create(STRING_HASH_KEYS)), fNameGenerator(0) {
+}
+
+MediaLookupTable::~MediaLookupTable() {
+ delete fTable;
+}
diff --git a/liveMedia/MediaSession.cpp b/liveMedia/MediaSession.cpp
new file mode 100644
index 0000000..5525c44
--- /dev/null
+++ b/liveMedia/MediaSession.cpp
@@ -0,0 +1,1495 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A data structure that represents a session that consists of
+// potentially multiple (audio and/or video) sub-sessions
+// Implementation
+
+#include "liveMedia.hh"
+#include "Locale.hh"
+#include "Base64.hh"
+#include "GroupsockHelper.hh"
+#include <ctype.h>
+
+////////// MediaSession //////////
+
+MediaSession* MediaSession::createNew(UsageEnvironment& env,
+ char const* sdpDescription) {
+ MediaSession* newSession = new MediaSession(env);
+ if (newSession != NULL) {
+ if (!newSession->initializeWithSDP(sdpDescription)) {
+ delete newSession;
+ return NULL;
+ }
+ }
+
+ return newSession;
+}
+
+Boolean MediaSession::lookupByName(UsageEnvironment& env,
+ char const* instanceName,
+ MediaSession*& resultSession) {
+ resultSession = NULL; // unless we succeed
+
+ Medium* medium;
+ if (!Medium::lookupByName(env, instanceName, medium)) return False;
+
+ if (!medium->isMediaSession()) {
+ env.setResultMsg(instanceName, " is not a 'MediaSession' object");
+ return False;
+ }
+
+ resultSession = (MediaSession*)medium;
+ return True;
+}
+
+MediaSession::MediaSession(UsageEnvironment& env)
+ : Medium(env),
+ fSubsessionsHead(NULL), fSubsessionsTail(NULL),
+ fConnectionEndpointName(NULL),
+ fMaxPlayStartTime(0.0f), fMaxPlayEndTime(0.0f), fAbsStartTime(NULL), fAbsEndTime(NULL),
+ fScale(1.0f), fSpeed(1.0f),
+ fMediaSessionType(NULL), fSessionName(NULL), fSessionDescription(NULL), fControlPath(NULL),
+ fMIKEYState(NULL), fCrypto(NULL) {
+ fSourceFilterAddr.s_addr = 0;
+
+ // Get our host name, and use this for the RTCP CNAME:
+ const unsigned maxCNAMElen = 100;
+ char CNAME[maxCNAMElen+1];
+#ifndef CRIS
+ gethostname((char*)CNAME, maxCNAMElen);
+#else
+ // "gethostname()" isn't defined for this platform
+ sprintf(CNAME, "unknown host %d", (unsigned)(our_random()*0x7FFFFFFF));
+#endif
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ fCNAME = strDup(CNAME);
+}
+
+MediaSession::~MediaSession() {
+ delete fSubsessionsHead;
+ delete[] fCNAME;
+ delete[] fConnectionEndpointName;
+ delete[] fAbsStartTime; delete[] fAbsEndTime;
+ delete[] fMediaSessionType;
+ delete[] fSessionName;
+ delete[] fSessionDescription;
+ delete[] fControlPath;
+ delete fCrypto; delete fMIKEYState;
+}
+
+Boolean MediaSession::isMediaSession() const {
+ return True;
+}
+
+MediaSubsession* MediaSession::createNewMediaSubsession() {
+ // default implementation:
+ return new MediaSubsession(*this);
+}
+
+Boolean MediaSession::initializeWithSDP(char const* sdpDescription) {
+ if (sdpDescription == NULL) return False;
+
+ // Begin by processing all SDP lines until we see the first "m="
+ char const* sdpLine = sdpDescription;
+ char const* nextSDPLine;
+ while (1) {
+ if (!parseSDPLine(sdpLine, nextSDPLine)) return False;
+ //##### We should really check for the correct SDP version (v=0)
+ if (sdpLine[0] == 'm') break;
+ sdpLine = nextSDPLine;
+ if (sdpLine == NULL) break; // there are no m= lines at all
+
+ // Check for various special SDP lines that we understand:
+ if (parseSDPLine_s(sdpLine)) continue;
+ if (parseSDPLine_i(sdpLine)) continue;
+ if (parseSDPLine_c(sdpLine)) continue;
+ if (parseSDPAttribute_control(sdpLine)) continue;
+ if (parseSDPAttribute_range(sdpLine)) continue;
+ if (parseSDPAttribute_type(sdpLine)) continue;
+ if (parseSDPAttribute_source_filter(sdpLine)) continue;
+ if (parseSDPAttribute_key_mgmt(sdpLine)) continue;
+ }
+
+ while (sdpLine != NULL) {
+ // We have a "m=" line, representing a new sub-session:
+ MediaSubsession* subsession = createNewMediaSubsession();
+ if (subsession == NULL) {
+ envir().setResultMsg("Unable to create new MediaSubsession");
+ return False;
+ }
+
+ // Parse the line as "m=<medium_name> <client_portNum> <proto> <fmt>"
+ // or "m=<medium_name> <client_portNum>/<num_ports> <proto> <fmt>"
+ // (Should we be checking for >1 payload format number here?)#####
+ char* mediumName = strDupSize(sdpLine); // ensures we have enough space
+ char const* protocolName = NULL;
+ unsigned payloadFormat;
+ if ((sscanf(sdpLine, "m=%s %hu RTP/AVP %u",
+ mediumName, &subsession->fClientPortNum, &payloadFormat) == 3 ||
+ sscanf(sdpLine, "m=%s %hu/%*u RTP/AVP %u",
+ mediumName, &subsession->fClientPortNum, &payloadFormat) == 3)
+ && payloadFormat <= 127) {
+ protocolName = "RTP";
+ } else if ((sscanf(sdpLine, "m=%s %hu RTP/SAVP %u",
+ mediumName, &subsession->fClientPortNum, &payloadFormat) == 3 ||
+ sscanf(sdpLine, "m=%s %hu/%*u RTP/SAVP %u",
+ mediumName, &subsession->fClientPortNum, &payloadFormat) == 3)
+ && payloadFormat <= 127) {
+ protocolName = "SRTP";
+ } else if ((sscanf(sdpLine, "m=%s %hu UDP %u",
+ mediumName, &subsession->fClientPortNum, &payloadFormat) == 3 ||
+ sscanf(sdpLine, "m=%s %hu udp %u",
+ mediumName, &subsession->fClientPortNum, &payloadFormat) == 3 ||
+ sscanf(sdpLine, "m=%s %hu RAW/RAW/UDP %u",
+ mediumName, &subsession->fClientPortNum, &payloadFormat) == 3)
+ && payloadFormat <= 127) {
+ // This is a RAW UDP source
+ protocolName = "UDP";
+ } else {
+ // This "m=" line is bad; output an error message saying so:
+ char* sdpLineStr;
+ if (nextSDPLine == NULL) {
+ sdpLineStr = (char*)sdpLine;
+ } else {
+ sdpLineStr = strDup(sdpLine);
+ sdpLineStr[nextSDPLine-sdpLine] = '\0';
+ }
+ envir() << "Bad SDP \"m=\" line: " << sdpLineStr << "\n";
+ if (sdpLineStr != (char*)sdpLine) delete[] sdpLineStr;
+
+ delete[] mediumName;
+ delete subsession;
+
+ // Skip the following SDP lines, up until the next "m=":
+ while (1) {
+ sdpLine = nextSDPLine;
+ if (sdpLine == NULL) break; // we've reached the end
+ if (!parseSDPLine(sdpLine, nextSDPLine)) return False;
+
+ if (sdpLine[0] == 'm') break; // we've reached the next subsession
+ }
+ continue;
+ }
+
+ // Insert this subsession at the end of the list:
+ if (fSubsessionsTail == NULL) {
+ fSubsessionsHead = fSubsessionsTail = subsession;
+ } else {
+ fSubsessionsTail->setNext(subsession);
+ fSubsessionsTail = subsession;
+ }
+
+ subsession->serverPortNum = subsession->fClientPortNum; // by default
+
+ char const* mStart = sdpLine;
+ subsession->fSavedSDPLines = strDup(mStart);
+
+ subsession->fMediumName = strDup(mediumName);
+ delete[] mediumName;
+ subsession->fProtocolName = strDup(protocolName);
+ subsession->fRTPPayloadFormat = payloadFormat;
+
+ // Process the following SDP lines, up until the next "m=":
+ while (1) {
+ sdpLine = nextSDPLine;
+ if (sdpLine == NULL) break; // we've reached the end
+ if (!parseSDPLine(sdpLine, nextSDPLine)) return False;
+
+ if (sdpLine[0] == 'm') break; // we've reached the next subsession
+
+ // Check for various special SDP lines that we understand:
+ if (subsession->parseSDPLine_c(sdpLine)) continue;
+ if (subsession->parseSDPLine_b(sdpLine)) continue;
+ if (subsession->parseSDPAttribute_rtpmap(sdpLine)) continue;
+ if (subsession->parseSDPAttribute_rtcpmux(sdpLine)) continue;
+ if (subsession->parseSDPAttribute_control(sdpLine)) continue;
+ if (subsession->parseSDPAttribute_range(sdpLine)) continue;
+ if (subsession->parseSDPAttribute_fmtp(sdpLine)) continue;
+ if (subsession->parseSDPAttribute_source_filter(sdpLine)) continue;
+ if (subsession->parseSDPAttribute_x_dimensions(sdpLine)) continue;
+ if (subsession->parseSDPAttribute_framerate(sdpLine)) continue;
+ if (subsession->parseSDPAttribute_key_mgmt(sdpLine)) continue;
+
+ // (Later, check for malformed lines, and other valid SDP lines#####)
+ }
+ if (sdpLine != NULL) subsession->fSavedSDPLines[sdpLine-mStart] = '\0';
+
+ // If we don't yet know the codec name, try looking it up from the
+ // list of static payload types:
+ if (subsession->fCodecName == NULL) {
+ subsession->fCodecName
+ = lookupPayloadFormat(subsession->fRTPPayloadFormat,
+ subsession->fRTPTimestampFrequency,
+ subsession->fNumChannels);
+ if (subsession->fCodecName == NULL) {
+ char typeStr[20];
+ sprintf(typeStr, "%d", subsession->fRTPPayloadFormat);
+ envir().setResultMsg("Unknown codec name for RTP payload type ",
+ typeStr);
+ return False;
+ }
+ }
+
+ // If we don't yet know this subsession's RTP timestamp frequency
+ // (because it uses a dynamic payload type and the corresponding
+ // SDP "rtpmap" attribute erroneously didn't specify it),
+ // then guess it now:
+ if (subsession->fRTPTimestampFrequency == 0) {
+ subsession->fRTPTimestampFrequency
+ = guessRTPTimestampFrequency(subsession->fMediumName,
+ subsession->fCodecName);
+ }
+ }
+
+ return True;
+}
+
+Boolean MediaSession::parseSDPLine(char const* inputLine,
+ char const*& nextLine){
+ // Begin by finding the start of the next line (if any):
+ nextLine = NULL;
+ for (char const* ptr = inputLine; *ptr != '\0'; ++ptr) {
+ if (*ptr == '\r' || *ptr == '\n') {
+ // We found the end of the line
+ ++ptr;
+ while (*ptr == '\r' || *ptr == '\n') ++ptr;
+ nextLine = ptr;
+ if (nextLine[0] == '\0') nextLine = NULL; // special case for end
+ break;
+ }
+ }
+
+ // Then, check that this line is a SDP line of the form <char>=<etc>
+ // (However, we also accept blank lines in the input.)
+ if (inputLine[0] == '\r' || inputLine[0] == '\n') return True;
+ if (strlen(inputLine) < 2 || inputLine[1] != '='
+ || inputLine[0] < 'a' || inputLine[0] > 'z') {
+ envir().setResultMsg("Invalid SDP line: ", inputLine);
+ return False;
+ }
+
+ return True;
+}
+
+// Common code used to parse many string values within SDP lines:
+static Boolean parseStringValue(char const* sdpLine, char const* searchFormat, char*& result) {
+ Boolean parseSuccess = False;
+ char* buffer = strDupSize(sdpLine);
+
+ if (sscanf(sdpLine, searchFormat, buffer) == 1) {
+ delete[] result; result = strDup(buffer);
+ parseSuccess = True;
+ }
+ delete[] buffer;
+
+ return parseSuccess;
+}
+
+static Boolean parseTwoStringValues(char const* sdpLine, char const* searchFormat,
+ char*& result1, char*& result2) {
+ Boolean parseSuccess = False;
+ size_t sdpLineSize = strlen(sdpLine) + 1;
+ char* buffer1 = new char[sdpLineSize];
+ char* buffer2 = new char[sdpLineSize];
+
+ if (sscanf(sdpLine, searchFormat, buffer1, buffer2) == 2) {
+ delete[] result1; result1 = strDup(buffer1);
+ delete[] result2; result2 = strDup(buffer2);
+ parseSuccess = True;
+ }
+ delete[] buffer1;
+ delete[] buffer2;
+
+ return parseSuccess;
+}
+
+static MIKEYState* parseSDPAttribute_key_mgmtToMIKEY(char const* sdpLine) {
+ char* keyMgmtPrtclId = NULL;
+ char* keyMgmtData = NULL;
+ MIKEYState* resultMIKEYState = NULL;
+
+ do {
+ // Check for a "a=key-mgmt:<prtcl-id> <keymgmt-data>" line:
+ if (!parseTwoStringValues(sdpLine, "a=key-mgmt:%s %s", keyMgmtPrtclId, keyMgmtData)) break;
+
+ // We understand only the 'protocol id' "mikey":
+ if (strcmp(keyMgmtPrtclId, "mikey") != 0) break;
+
+ // Base64-decode the "keyMgmtData" string:
+ unsigned keyMgmtData_decodedSize;
+ u_int8_t* keyMgmtData_decoded = base64Decode(keyMgmtData, keyMgmtData_decodedSize);
+ if (keyMgmtData_decoded == NULL) break;
+
+ resultMIKEYState = MIKEYState::createNew(keyMgmtData_decoded, keyMgmtData_decodedSize);
+ } while (0);
+
+ delete[] keyMgmtPrtclId;
+ delete[] keyMgmtData;
+ return resultMIKEYState;
+}
+
+Boolean MediaSession::parseSDPLine_s(char const* sdpLine) {
+ // Check for "s=<session name>" line
+ return parseStringValue(sdpLine, "s=%[^\r\n]", fSessionName);
+}
+
+Boolean MediaSession::parseSDPLine_i(char const* sdpLine) {
+ // Check for "i=<session description>" line
+ return parseStringValue(sdpLine, "i=%[^\r\n]", fSessionDescription);
+}
+
+Boolean MediaSession::parseSDPLine_c(char const* sdpLine) {
+ // Check for "c=IN IP4 <connection-endpoint>"
+ // or "c=IN IP4 <connection-endpoint>/<ttl+numAddresses>"
+ // (Later, do something with <ttl+numAddresses> also #####)
+ return parseStringValue(sdpLine, "c=IN IP4 %[^/\r\n]", fConnectionEndpointName);
+}
+
+Boolean MediaSession::parseSDPAttribute_type(char const* sdpLine) {
+ // Check for a "a=type:broadcast|meeting|moderated|test|H.332|recvonly" line:
+ return parseStringValue(sdpLine, "a=type: %[^ ]", fMediaSessionType);
+}
+
+Boolean MediaSession::parseSDPAttribute_control(char const* sdpLine) {
+ // Check for a "a=control:<control-path>" line:
+ return parseStringValue(sdpLine, "a=control: %s", fControlPath);
+}
+
+static Boolean parseRangeAttribute(char const* sdpLine, double& startTime, double& endTime) {
+ return sscanf(sdpLine, "a=range: npt = %lg - %lg", &startTime, &endTime) == 2;
+}
+
+static Boolean parseRangeAttribute(char const* sdpLine, char*& absStartTime, char*& absEndTime) {
+ size_t len = strlen(sdpLine) + 1;
+ char* as = new char[len];
+ char* ae = new char[len];
+ int sscanfResult = sscanf(sdpLine, "a=range: clock = %[^-\r\n]-%[^\r\n]", as, ae);
+ if (sscanfResult == 2) {
+ absStartTime = as;
+ absEndTime = ae;
+ } else if (sscanfResult == 1) {
+ absStartTime = as;
+ delete[] ae;
+ } else {
+ delete[] as; delete[] ae;
+ return False;
+ }
+
+ return True;
+}
+
+Boolean MediaSession::parseSDPAttribute_range(char const* sdpLine) {
+ // Check for a "a=range:npt=<startTime>-<endTime>" line:
+ // (Later handle other kinds of "a=range" attributes also???#####)
+ Boolean parseSuccess = False;
+
+ double playStartTime;
+ double playEndTime;
+ if (parseRangeAttribute(sdpLine, playStartTime, playEndTime)) {
+ parseSuccess = True;
+ if (playStartTime > fMaxPlayStartTime) {
+ fMaxPlayStartTime = playStartTime;
+ }
+ if (playEndTime > fMaxPlayEndTime) {
+ fMaxPlayEndTime = playEndTime;
+ }
+ } else if (parseRangeAttribute(sdpLine, _absStartTime(), _absEndTime())) {
+ parseSuccess = True;
+ }
+
+ return parseSuccess;
+}
+
+static Boolean parseSourceFilterAttribute(char const* sdpLine,
+ struct in_addr& sourceAddr) {
+ // Check for a "a=source-filter:incl IN IP4 <something> <source>" line.
+ // Note: At present, we don't check that <something> really matches
+ // one of our multicast addresses. We also don't support more than
+ // one <source> #####
+ Boolean result = False; // until we succeed
+ char* sourceName = NULL;
+ do {
+ if (!parseStringValue(sdpLine, "a=source-filter: incl IN IP4 %*s %s", sourceName)) break;
+
+ // Now, convert this name to an address, if we can:
+ NetAddressList addresses(sourceName);
+ if (addresses.numAddresses() == 0) break;
+
+ netAddressBits sourceAddrBits
+ = *(netAddressBits*)(addresses.firstAddress()->data());
+ if (sourceAddrBits == 0) break;
+
+ sourceAddr.s_addr = sourceAddrBits;
+ result = True;
+ } while (0);
+
+ delete[] sourceName;
+ return result;
+}
+
+Boolean MediaSession
+::parseSDPAttribute_source_filter(char const* sdpLine) {
+ return parseSourceFilterAttribute(sdpLine, fSourceFilterAddr);
+}
+
+Boolean MediaSession::parseSDPAttribute_key_mgmt(char const* sdpLine) {
+ MIKEYState* newMIKEYState = parseSDPAttribute_key_mgmtToMIKEY(sdpLine);
+ if (newMIKEYState == NULL) return False;
+
+ delete fCrypto; delete fMIKEYState;
+ fMIKEYState = newMIKEYState;
+ fCrypto = new SRTPCryptographicContext(*fMIKEYState);
+
+ return True;
+}
+
+char* MediaSession::lookupPayloadFormat(unsigned char rtpPayloadType,
+ unsigned& freq, unsigned& nCh) {
+ // Look up the codec name and timestamp frequency for known (static)
+ // RTP payload formats.
+ char const* temp = NULL;
+ switch (rtpPayloadType) {
+ case 0: {temp = "PCMU"; freq = 8000; nCh = 1; break;}
+ case 2: {temp = "G726-32"; freq = 8000; nCh = 1; break;}
+ case 3: {temp = "GSM"; freq = 8000; nCh = 1; break;}
+ case 4: {temp = "G723"; freq = 8000; nCh = 1; break;}
+ case 5: {temp = "DVI4"; freq = 8000; nCh = 1; break;}
+ case 6: {temp = "DVI4"; freq = 16000; nCh = 1; break;}
+ case 7: {temp = "LPC"; freq = 8000; nCh = 1; break;}
+ case 8: {temp = "PCMA"; freq = 8000; nCh = 1; break;}
+ case 9: {temp = "G722"; freq = 8000; nCh = 1; break;}
+ case 10: {temp = "L16"; freq = 44100; nCh = 2; break;}
+ case 11: {temp = "L16"; freq = 44100; nCh = 1; break;}
+ case 12: {temp = "QCELP"; freq = 8000; nCh = 1; break;}
+ case 14: {temp = "MPA"; freq = 90000; nCh = 1; break;}
+ // 'number of channels' is actually encoded in the media stream
+ case 15: {temp = "G728"; freq = 8000; nCh = 1; break;}
+ case 16: {temp = "DVI4"; freq = 11025; nCh = 1; break;}
+ case 17: {temp = "DVI4"; freq = 22050; nCh = 1; break;}
+ case 18: {temp = "G729"; freq = 8000; nCh = 1; break;}
+ case 25: {temp = "CELB"; freq = 90000; nCh = 1; break;}
+ case 26: {temp = "JPEG"; freq = 90000; nCh = 1; break;}
+ case 28: {temp = "NV"; freq = 90000; nCh = 1; break;}
+ case 31: {temp = "H261"; freq = 90000; nCh = 1; break;}
+ case 32: {temp = "MPV"; freq = 90000; nCh = 1; break;}
+ case 33: {temp = "MP2T"; freq = 90000; nCh = 1; break;}
+ case 34: {temp = "H263"; freq = 90000; nCh = 1; break;}
+ };
+
+ return strDup(temp);
+}
+
+unsigned MediaSession::guessRTPTimestampFrequency(char const* mediumName,
+ char const* codecName) {
+ // By default, we assume that audio sessions use a frequency of 8000,
+ // video sessions use a frequency of 90000,
+ // and text sessions use a frequency of 1000.
+ // Begin by checking for known exceptions to this rule
+ // (where the frequency is known unambiguously (e.g., not like "DVI4"))
+ if (strcmp(codecName, "L16") == 0) return 44100;
+ if (strcmp(codecName, "MPA") == 0
+ || strcmp(codecName, "MPA-ROBUST") == 0
+ || strcmp(codecName, "X-MP3-DRAFT-00") == 0) return 90000;
+
+ // Now, guess default values:
+ if (strcmp(mediumName, "video") == 0) return 90000;
+ else if (strcmp(mediumName, "text") == 0) return 1000;
+ return 8000; // for "audio", and any other medium
+}
+
+char* MediaSession::absStartTime() const {
+ if (fAbsStartTime != NULL) return fAbsStartTime;
+
+ // If a subsession has an 'absolute' start time, then use that:
+ MediaSubsessionIterator iter(*this);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ if (subsession->_absStartTime() != NULL) return subsession->_absStartTime();
+ }
+ return NULL;
+}
+
+char* MediaSession::absEndTime() const {
+ if (fAbsEndTime != NULL) return fAbsEndTime;
+
+ // If a subsession has an 'absolute' end time, then use that:
+ MediaSubsessionIterator iter(*this);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ if (subsession->_absEndTime() != NULL) return subsession->_absEndTime();
+ }
+ return NULL;
+}
+
+Boolean MediaSession
+::initiateByMediaType(char const* mimeType,
+ MediaSubsession*& resultSubsession,
+ int useSpecialRTPoffset) {
+ // Look through this session's subsessions for media that match "mimeType"
+ resultSubsession = NULL;
+ MediaSubsessionIterator iter(*this);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ Boolean wasAlreadyInitiated = subsession->readSource() != NULL;
+ if (!wasAlreadyInitiated) {
+ // Try to create a source for this subsession:
+ if (!subsession->initiate(useSpecialRTPoffset)) return False;
+ }
+
+ // Make sure the source's MIME type is one that we handle:
+ if (strcmp(subsession->readSource()->MIMEtype(), mimeType) != 0) {
+ if (!wasAlreadyInitiated) subsession->deInitiate();
+ continue;
+ }
+
+ resultSubsession = subsession;
+ break; // use this
+ }
+
+ if (resultSubsession == NULL) {
+ envir().setResultMsg("Session has no usable media subsession");
+ return False;
+ }
+
+ return True;
+}
+
+
+////////// MediaSubsessionIterator //////////
+
+MediaSubsessionIterator::MediaSubsessionIterator(MediaSession const& session)
+ : fOurSession(session) {
+ reset();
+}
+
+MediaSubsessionIterator::~MediaSubsessionIterator() {
+}
+
+MediaSubsession* MediaSubsessionIterator::next() {
+ MediaSubsession* result = fNextPtr;
+
+ if (fNextPtr != NULL) fNextPtr = fNextPtr->fNext;
+
+ return result;
+}
+
+void MediaSubsessionIterator::reset() {
+ fNextPtr = fOurSession.fSubsessionsHead;
+}
+
+
+////////// SDPAttribute definition //////////
+
+class SDPAttribute {
+public:
+ SDPAttribute(char const* strValue, Boolean valueIsHexadecimal);
+ virtual ~SDPAttribute();
+
+ char const* strValue() const { return fStrValue; }
+ char const* strValueToLower() const { return fStrValueToLower; }
+ int intValue() const { return fIntValue; }
+ Boolean valueIsHexadecimal() const { return fValueIsHexadecimal; }
+
+private:
+ char* fStrValue;
+ char* fStrValueToLower;
+ int fIntValue;
+ Boolean fValueIsHexadecimal;
+};
+
+
+////////// MediaSubsession //////////
+
+MediaSubsession::MediaSubsession(MediaSession& parent)
+ : serverPortNum(0), sink(NULL), miscPtr(NULL),
+ fParent(parent), fNext(NULL),
+ fConnectionEndpointName(NULL),
+ fClientPortNum(0), fRTPPayloadFormat(0xFF),
+ fSavedSDPLines(NULL), fMediumName(NULL), fCodecName(NULL), fProtocolName(NULL),
+ fRTPTimestampFrequency(0), fMultiplexRTCPWithRTP(False), fControlPath(NULL),
+ fMIKEYState(NULL), fCrypto(NULL),
+ fSourceFilterAddr(parent.sourceFilterAddr()), fBandwidth(0),
+ fPlayStartTime(0.0), fPlayEndTime(0.0), fAbsStartTime(NULL), fAbsEndTime(NULL),
+ fVideoWidth(0), fVideoHeight(0), fVideoFPS(0), fNumChannels(1), fScale(1.0f), fNPT_PTS_Offset(0.0f),
+ fAttributeTable(HashTable::create(STRING_HASH_KEYS)),
+ fRTPSocket(NULL), fRTCPSocket(NULL),
+ fRTPSource(NULL), fRTCPInstance(NULL), fReadSource(NULL),
+ fReceiveRawMP3ADUs(False), fReceiveRawJPEGFrames(False),
+ fSessionId(NULL) {
+ rtpInfo.seqNum = 0; rtpInfo.timestamp = 0; rtpInfo.infoIsNew = False;
+
+ // A few attributes have unusual default values. Set these now:
+ setAttribute("profile-level-id", "0", True/*value is hexadecimal*/); // used with "video/H264"
+ // This won't work for MPEG-4 (unless the value is <10), because for MPEG-4, the value
+ // is assumed to be a decimal string, not a hexadecimal string. NEED TO FIX #####
+ setAttribute("profile-id", "1"); // used with "video/H265"
+ setAttribute("level-id", "93"); // used with "video/H265"
+ setAttribute("interop-constraints", "B00000000000"); // used with "video/H265"
+ setAttribute("sampling", "RGB"); // used with "video/JPEG2000"
+}
+
+MediaSubsession::~MediaSubsession() {
+ deInitiate();
+
+ delete[] fConnectionEndpointName; delete[] fSavedSDPLines;
+ delete[] fMediumName; delete[] fCodecName; delete[] fProtocolName;
+ delete[] fControlPath;
+ delete fCrypto; delete fMIKEYState;
+ delete[] fAbsStartTime; delete[] fAbsEndTime;
+ delete[] fSessionId;
+
+ // Empty and delete our 'attributes table':
+ SDPAttribute* attr;
+ while ((attr = (SDPAttribute*)fAttributeTable->RemoveNext()) != NULL) {
+ delete attr;
+ }
+ delete fAttributeTable;
+
+ delete fNext;
+}
+
+void MediaSubsession::addFilter(FramedFilter* filter){
+ fReadSource = filter;
+}
+
+double MediaSubsession::playStartTime() const {
+ if (fPlayStartTime > 0) return fPlayStartTime;
+
+ return fParent.playStartTime();
+}
+
+double MediaSubsession::playEndTime() const {
+ if (fPlayEndTime > 0) return fPlayEndTime;
+
+ return fParent.playEndTime();
+}
+
+char* MediaSubsession::absStartTime() const {
+ if (fAbsStartTime != NULL) return fAbsStartTime;
+
+ return fParent.absStartTime();
+}
+
+char* MediaSubsession::absEndTime() const {
+ if (fAbsEndTime != NULL) return fAbsEndTime;
+
+ return fParent.absEndTime();
+}
+
+static Boolean const honorSDPPortChoice
+#ifdef IGNORE_UNICAST_SDP_PORTS
+= False;
+#else
+= True;
+#endif
+
+Boolean MediaSubsession::initiate(int useSpecialRTPoffset) {
+ if (fReadSource != NULL) return True; // has already been initiated
+
+ do {
+ if (fCodecName == NULL) {
+ env().setResultMsg("Codec is unspecified");
+ break;
+ }
+
+ // Create RTP and RTCP 'Groupsocks' on which to receive incoming data.
+ // (Groupsocks will work even for unicast addresses)
+ struct in_addr tempAddr;
+ tempAddr.s_addr = connectionEndpointAddress();
+ // This could get changed later, as a result of a RTSP "SETUP"
+
+ Boolean const useSRTP = strcmp(fProtocolName, "SRTP") == 0;
+ Boolean const protocolIsRTP = useSRTP || strcmp(fProtocolName, "RTP") == 0;
+
+ if (fClientPortNum != 0 && (honorSDPPortChoice || IsMulticastAddress(tempAddr.s_addr))) {
+ // The sockets' port numbers were specified for us. Use these:
+ if (protocolIsRTP && !fMultiplexRTCPWithRTP) {
+ fClientPortNum = fClientPortNum&~1;
+ // use an even-numbered port for RTP, and the next (odd-numbered) port for RTCP
+ }
+ if (isSSM()) {
+ fRTPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, fClientPortNum);
+ } else {
+ fRTPSocket = new Groupsock(env(), tempAddr, fClientPortNum, 255);
+ }
+ if (fRTPSocket == NULL) {
+ env().setResultMsg("Failed to create RTP socket");
+ break;
+ }
+
+ if (protocolIsRTP) {
+ if (fMultiplexRTCPWithRTP) {
+ // Use the RTP 'groupsock' object for RTCP as well:
+ fRTCPSocket = fRTPSocket;
+ } else {
+ // Set our RTCP port to be the RTP port + 1:
+ portNumBits const rtcpPortNum = fClientPortNum|1;
+ if (isSSM()) {
+ fRTCPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, rtcpPortNum);
+ } else {
+ fRTCPSocket = new Groupsock(env(), tempAddr, rtcpPortNum, 255);
+ }
+ }
+ }
+ } else {
+ // Port numbers were not specified in advance, so we use ephemeral port numbers.
+ // Create sockets until we get a port-number pair (even: RTP; even+1: RTCP).
+ // (However, if we're multiplexing RTCP with RTP, then we create only one socket,
+ // and the port number can be even or odd.)
+ // We need to make sure that we don't keep trying to use the same bad port numbers over
+ // and over again, so we store bad sockets in a table, and delete them all when we're done.
+ HashTable* socketHashTable = HashTable::create(ONE_WORD_HASH_KEYS);
+ if (socketHashTable == NULL) break;
+ Boolean success = False;
+ NoReuse dummy(env());
+ // ensures that our new ephemeral port number won't be one that's already in use
+
+ while (1) {
+ // Create a new socket:
+ if (isSSM()) {
+ fRTPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, 0);
+ } else {
+ fRTPSocket = new Groupsock(env(), tempAddr, 0, 255);
+ }
+ if (fRTPSocket == NULL) {
+ env().setResultMsg("MediaSession::initiate(): unable to create RTP and RTCP sockets");
+ break;
+ }
+
+ // Get the client port number:
+ Port clientPort(0);
+ if (!getSourcePort(env(), fRTPSocket->socketNum(), clientPort)) {
+ break;
+ }
+ fClientPortNum = ntohs(clientPort.num());
+
+ if (fMultiplexRTCPWithRTP) {
+ // Use this RTP 'groupsock' object for RTCP as well:
+ fRTCPSocket = fRTPSocket;
+ success = True;
+ break;
+ }
+
+ // To be usable for RTP, the client port number must be even:
+ if ((fClientPortNum&1) != 0) { // it's odd
+ // Record this socket in our table, and keep trying:
+ unsigned key = (unsigned)fClientPortNum;
+ Groupsock* existing = (Groupsock*)socketHashTable->Add((char const*)key, fRTPSocket);
+ delete existing; // in case it wasn't NULL
+ continue;
+ }
+
+ // Make sure we can use the next (i.e., odd) port number, for RTCP:
+ portNumBits rtcpPortNum = fClientPortNum|1;
+ if (isSSM()) {
+ fRTCPSocket = new Groupsock(env(), tempAddr, fSourceFilterAddr, rtcpPortNum);
+ } else {
+ fRTCPSocket = new Groupsock(env(), tempAddr, rtcpPortNum, 255);
+ }
+ if (fRTCPSocket != NULL && fRTCPSocket->socketNum() >= 0) {
+ // Success! Use these two sockets.
+ success = True;
+ break;
+ } else {
+ // We couldn't create the RTCP socket (perhaps that port number's already in use elsewhere?).
+ delete fRTCPSocket; fRTCPSocket = NULL;
+
+ // Record the first socket in our table, and keep trying:
+ unsigned key = (unsigned)fClientPortNum;
+ Groupsock* existing = (Groupsock*)socketHashTable->Add((char const*)key, fRTPSocket);
+ delete existing; // in case it wasn't NULL
+ continue;
+ }
+ }
+
+ // Clean up the socket hash table (and contents):
+ Groupsock* oldGS;
+ while ((oldGS = (Groupsock*)socketHashTable->RemoveNext()) != NULL) {
+ delete oldGS;
+ }
+ delete socketHashTable;
+
+ if (!success) break; // a fatal error occurred trying to create the RTP and RTCP sockets; we can't continue
+ }
+
+ // Try to use a big receive buffer for RTP - at least 0.1 second of
+ // specified bandwidth and at least 50 KB
+ unsigned rtpBufSize = fBandwidth * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes
+ if (rtpBufSize < 50 * 1024)
+ rtpBufSize = 50 * 1024;
+ increaseReceiveBufferTo(env(), fRTPSocket->socketNum(), rtpBufSize);
+
+ if (isSSM() && fRTCPSocket != NULL) {
+ // Special case for RTCP SSM: Send RTCP packets back to the source via unicast:
+ fRTCPSocket->changeDestinationParameters(fSourceFilterAddr,0,~0);
+ }
+
+ // Create "fRTPSource" and "fReadSource":
+ if (!createSourceObjects(useSpecialRTPoffset)) break;
+
+ if (fReadSource == NULL) {
+ env().setResultMsg("Failed to create read source");
+ break;
+ }
+
+ SRTPCryptographicContext* ourCrypto = NULL;
+ if (useSRTP) {
+ // For SRTP, we need key management. If MIKEY (key management) state wasn't given
+ // to us in the SDP description, then create it now:
+ ourCrypto = getCrypto();
+ if (ourCrypto == NULL) { // then fMIKEYState is also NULL; create both
+ fMIKEYState = new MIKEYState();
+ ourCrypto = fCrypto = new SRTPCryptographicContext(*fMIKEYState);
+ }
+
+ if (fRTPSource != NULL) fRTPSource->setCrypto(ourCrypto);
+ }
+
+ // Finally, create our RTCP instance. (It starts running automatically)
+ if (fRTPSource != NULL && fRTCPSocket != NULL) {
+ // If bandwidth is specified, use it and add 5% for RTCP overhead.
+ // Otherwise make a guess at 500 kbps.
+ unsigned totSessionBandwidth
+ = fBandwidth ? fBandwidth + fBandwidth / 20 : 500;
+ fRTCPInstance = RTCPInstance::createNew(env(), fRTCPSocket,
+ totSessionBandwidth,
+ (unsigned char const*)
+ fParent.CNAME(),
+ NULL /* we're a client */,
+ fRTPSource,
+ False /* we're not a data transmitter */,
+ ourCrypto);
+ if (fRTCPInstance == NULL) {
+ env().setResultMsg("Failed to create RTCP instance");
+ break;
+ }
+ }
+
+ return True;
+ } while (0);
+
+ deInitiate();
+ fClientPortNum = 0;
+ return False;
+}
+
+void MediaSubsession::deInitiate() {
+ Medium::close(fRTCPInstance); fRTCPInstance = NULL;
+
+ Medium::close(fReadSource); // this is assumed to also close fRTPSource
+ fReadSource = NULL; fRTPSource = NULL;
+
+ delete fRTPSocket;
+ if (fRTCPSocket != fRTPSocket) delete fRTCPSocket;
+ fRTPSocket = NULL; fRTCPSocket = NULL;
+}
+
+Boolean MediaSubsession::setClientPortNum(unsigned short portNum) {
+ if (fReadSource != NULL) {
+ env().setResultMsg("A read source has already been created");
+ return False;
+ }
+
+ fClientPortNum = portNum;
+ return True;
+}
+
+char const* MediaSubsession::attrVal_str(char const* attrName) const {
+ SDPAttribute* attr = (SDPAttribute*)(fAttributeTable->Lookup(attrName));
+ if (attr == NULL) return "";
+
+ return attr->strValue();
+}
+
+char const* MediaSubsession::attrVal_strToLower(char const* attrName) const {
+ SDPAttribute* attr = (SDPAttribute*)(fAttributeTable->Lookup(attrName));
+ if (attr == NULL) return "";
+
+ return attr->strValueToLower();
+}
+
+unsigned MediaSubsession::attrVal_int(char const* attrName) const {
+ SDPAttribute* attr = (SDPAttribute*)(fAttributeTable->Lookup(attrName));
+ if (attr == NULL) return 0;
+
+ return attr->intValue();
+}
+
+char const* MediaSubsession::fmtp_config() const {
+ char const* result = attrVal_str("config");
+ if (result[0] == '\0') result = attrVal_str("configuration");
+
+ return result;
+}
+
+netAddressBits MediaSubsession::connectionEndpointAddress() const {
+ do {
+ // Get the endpoint name from with us, or our parent session:
+ char const* endpointString = connectionEndpointName();
+ if (endpointString == NULL) {
+ endpointString = parentSession().connectionEndpointName();
+ }
+ if (endpointString == NULL) break;
+
+ // Now, convert this name to an address, if we can:
+ NetAddressList addresses(endpointString);
+ if (addresses.numAddresses() == 0) break;
+
+ return *(netAddressBits*)(addresses.firstAddress()->data());
+ } while (0);
+
+ // No address known:
+ return 0;
+}
+
+void MediaSubsession::setDestinations(netAddressBits defaultDestAddress) {
+ // Get the destination address from the connection endpoint name
+ // (This will be 0 if it's not known, in which case we use the default)
+ netAddressBits destAddress = connectionEndpointAddress();
+ if (destAddress == 0) destAddress = defaultDestAddress;
+ struct in_addr destAddr; destAddr.s_addr = destAddress;
+
+ // The destination TTL remains unchanged:
+ int destTTL = ~0; // means: don't change
+
+ if (fRTPSocket != NULL) {
+ Port destPort(serverPortNum);
+ fRTPSocket->changeDestinationParameters(destAddr, destPort, destTTL);
+ }
+ if (fRTCPSocket != NULL && !isSSM() && !fMultiplexRTCPWithRTP) {
+ // Note: For SSM sessions, the dest address for RTCP was already set.
+ Port destPort(serverPortNum+1);
+ fRTCPSocket->changeDestinationParameters(destAddr, destPort, destTTL);
+ }
+}
+
+void MediaSubsession::setSessionId(char const* sessionId) {
+ delete[] fSessionId;
+ fSessionId = strDup(sessionId);
+}
+
+double MediaSubsession::getNormalPlayTime(struct timeval const& presentationTime) {
+ if (rtpSource() == NULL || rtpSource()->timestampFrequency() == 0) return 0.0; // no RTP source, or bad freq!
+
+ // First, check whether our "RTPSource" object has already been synchronized using RTCP.
+ // If it hasn't, then - as a special case - we need to use the RTP timestamp to compute the NPT.
+ if (!rtpSource()->hasBeenSynchronizedUsingRTCP()) {
+ if (!rtpInfo.infoIsNew) return 0.0; // the "rtpInfo" structure has not been filled in
+ u_int32_t timestampOffset = rtpSource()->curPacketRTPTimestamp() - rtpInfo.timestamp;
+ double nptOffset = (timestampOffset/(double)(rtpSource()->timestampFrequency()))*scale();
+ double npt = playStartTime() + nptOffset;
+
+ return npt;
+ } else {
+ // Common case: We have been synchronized using RTCP. This means that the "presentationTime" parameter
+ // will be accurate, and so we should use this to compute the NPT.
+ double ptsDouble = (double)(presentationTime.tv_sec + presentationTime.tv_usec/1000000.0);
+
+ if (rtpInfo.infoIsNew) {
+ // This is the first time we've been called with a synchronized presentation time since the "rtpInfo"
+ // structure was last filled in. Use this "presentationTime" to compute "fNPT_PTS_Offset":
+ if (seqNumLT(rtpSource()->curPacketRTPSeqNum(), rtpInfo.seqNum)) return -0.1; // sanity check; ignore old packets
+ u_int32_t timestampOffset = rtpSource()->curPacketRTPTimestamp() - rtpInfo.timestamp;
+ double nptOffset = (timestampOffset/(double)(rtpSource()->timestampFrequency()))*scale();
+ double npt = playStartTime() + nptOffset;
+ fNPT_PTS_Offset = npt - ptsDouble*scale();
+ rtpInfo.infoIsNew = False; // for next time
+
+ return npt;
+ } else {
+ // Use the precomputed "fNPT_PTS_Offset" to compute the NPT from the PTS:
+ if (fNPT_PTS_Offset == 0.0) return 0.0; // error: The "rtpInfo" structure was apparently never filled in
+ return (double)(ptsDouble*scale() + fNPT_PTS_Offset);
+ }
+ }
+}
+
+void MediaSubsession
+::setAttribute(char const* name, char const* value, Boolean valueIsHexadecimal) {
+ // Replace any existing attribute record with this name (except that the 'valueIsHexadecimal'
+ // property will be inherited from it, if it exists).
+ SDPAttribute* oldAttr = (SDPAttribute*)fAttributeTable->Lookup(name);
+ if (oldAttr != NULL) {
+ valueIsHexadecimal = oldAttr->valueIsHexadecimal();
+ fAttributeTable->Remove(name);
+ delete oldAttr;
+ }
+
+ SDPAttribute* newAttr = new SDPAttribute(value, valueIsHexadecimal);
+ (void)fAttributeTable->Add(name, newAttr);
+}
+
+Boolean MediaSubsession::parseSDPLine_c(char const* sdpLine) {
+ // Check for "c=IN IP4 <connection-endpoint>"
+ // or "c=IN IP4 <connection-endpoint>/<ttl+numAddresses>"
+ // (Later, do something with <ttl+numAddresses> also #####)
+ return parseStringValue(sdpLine, "c=IN IP4 %[^/\r\n]", fConnectionEndpointName);
+}
+
+Boolean MediaSubsession::parseSDPLine_b(char const* sdpLine) {
+ // Check for "b=<bwtype>:<bandwidth>" line
+ // RTP applications are expected to use bwtype="AS"
+ return sscanf(sdpLine, "b=AS:%u", &fBandwidth) == 1;
+}
+
+Boolean MediaSubsession::parseSDPAttribute_rtpmap(char const* sdpLine) {
+ // Check for a "a=rtpmap:<fmt> <codec>/<freq>" line:
+ // (Also check without the "/<freq>"; RealNetworks omits this)
+ // Also check for a trailing "/<numChannels>".
+ Boolean parseSuccess = False;
+
+ unsigned rtpmapPayloadFormat;
+ char* codecName = strDupSize(sdpLine); // ensures we have enough space
+ unsigned rtpTimestampFrequency = 0;
+ unsigned numChannels = 1;
+ if (sscanf(sdpLine, "a=rtpmap: %u %[^/]/%u/%u",
+ &rtpmapPayloadFormat, codecName, &rtpTimestampFrequency,
+ &numChannels) == 4
+ || sscanf(sdpLine, "a=rtpmap: %u %[^/]/%u",
+ &rtpmapPayloadFormat, codecName, &rtpTimestampFrequency) == 3
+ || sscanf(sdpLine, "a=rtpmap: %u %s",
+ &rtpmapPayloadFormat, codecName) == 2) {
+ parseSuccess = True;
+ if (rtpmapPayloadFormat == fRTPPayloadFormat) {
+ // This "rtpmap" matches our payload format, so set our
+ // codec name and timestamp frequency:
+ // (First, make sure the codec name is upper case)
+ {
+ Locale l("POSIX");
+ for (char* p = codecName; *p != '\0'; ++p) *p = toupper(*p);
+ }
+ delete[] fCodecName; fCodecName = strDup(codecName);
+ fRTPTimestampFrequency = rtpTimestampFrequency;
+ fNumChannels = numChannels;
+ }
+ }
+ delete[] codecName;
+
+ return parseSuccess;
+}
+
+Boolean MediaSubsession::parseSDPAttribute_rtcpmux(char const* sdpLine) {
+ if (strncmp(sdpLine, "a=rtcp-mux", 10) == 0) {
+ fMultiplexRTCPWithRTP = True;
+ return True;
+ }
+
+ return False;
+}
+
+Boolean MediaSubsession::parseSDPAttribute_control(char const* sdpLine) {
+ // Check for a "a=control:<control-path>" line:
+ return parseStringValue(sdpLine, "a=control: %s", fControlPath);
+}
+
+Boolean MediaSubsession::parseSDPAttribute_range(char const* sdpLine) {
+ // Check for a "a=range:npt=<startTime>-<endTime>" line:
+ // (Later handle other kinds of "a=range" attributes also???#####)
+ Boolean parseSuccess = False;
+
+ double playStartTime;
+ double playEndTime;
+ if (parseRangeAttribute(sdpLine, playStartTime, playEndTime)) {
+ parseSuccess = True;
+ if (playStartTime > fPlayStartTime) {
+ fPlayStartTime = playStartTime;
+ if (playStartTime > fParent.playStartTime()) {
+ fParent.playStartTime() = playStartTime;
+ }
+ }
+ if (playEndTime > fPlayEndTime) {
+ fPlayEndTime = playEndTime;
+ if (playEndTime > fParent.playEndTime()) {
+ fParent.playEndTime() = playEndTime;
+ }
+ }
+ } else if (parseRangeAttribute(sdpLine, _absStartTime(), _absEndTime())) {
+ parseSuccess = True;
+ }
+
+ return parseSuccess;
+}
+
+Boolean MediaSubsession::parseSDPAttribute_fmtp(char const* sdpLine) {
+ // Check for a "a=fmtp:" line:
+ // Later: Check that payload format number matches; #####
+ do {
+ if (strncmp(sdpLine, "a=fmtp:", 7) != 0) break; sdpLine += 7;
+ while (isdigit(*sdpLine)) ++sdpLine;
+
+ // The remaining "sdpLine" should be a sequence of
+ // <name>=<value>;
+ // or
+ // <name>;
+ // parameter assignments. Look at each of these.
+ unsigned const sdpLineLen = strlen(sdpLine);
+ char* nameStr = new char[sdpLineLen+1];
+ char* valueStr = new char[sdpLineLen+1];
+
+ while (*sdpLine != '\0' && *sdpLine != '\r' && *sdpLine != '\n') {
+ int sscanfResult = sscanf(sdpLine, " %[^=; \t\r\n] = %[^; \t\r\n]", nameStr, valueStr);
+ if (sscanfResult >= 1) {
+ // <name> or <name>=<value>
+ // Convert <name> to lower-case, to ease comparison:
+ Locale l("POSIX");
+ for (char* c = nameStr; *c != '\0'; ++c) *c = tolower(*c);
+
+ if (sscanfResult == 1) {
+ // <name>
+ setAttribute(nameStr);
+ } else {
+ // <name>=<value>
+ setAttribute(nameStr, valueStr);
+ }
+ }
+
+ // Move to the next parameter assignment string:
+ while (*sdpLine != '\0' && *sdpLine != '\r' && *sdpLine != '\n' && *sdpLine != ';') ++sdpLine;
+ while (*sdpLine == ';') ++sdpLine;
+ }
+ delete[] nameStr; delete[] valueStr;
+ return True;
+ } while (0);
+
+ return False;
+}
+
+Boolean MediaSubsession
+::parseSDPAttribute_source_filter(char const* sdpLine) {
+ return parseSourceFilterAttribute(sdpLine, fSourceFilterAddr);
+}
+
+Boolean MediaSubsession::parseSDPAttribute_x_dimensions(char const* sdpLine) {
+ // Check for a "a=x-dimensions:<width>,<height>" line:
+ Boolean parseSuccess = False;
+
+ int width, height;
+ if (sscanf(sdpLine, "a=x-dimensions:%d,%d", &width, &height) == 2) {
+ parseSuccess = True;
+ fVideoWidth = (unsigned short)width;
+ fVideoHeight = (unsigned short)height;
+ }
+
+ return parseSuccess;
+}
+
+Boolean MediaSubsession::parseSDPAttribute_framerate(char const* sdpLine) {
+ // Check for a "a=framerate: <fps>" or "a=x-framerate: <fps>" line:
+ Boolean parseSuccess = False;
+
+ float frate;
+ int rate;
+ if (sscanf(sdpLine, "a=framerate: %f", &frate) == 1 || sscanf(sdpLine, "a=framerate:%f", &frate) == 1) {
+ parseSuccess = True;
+ fVideoFPS = (unsigned)frate;
+ } else if (sscanf(sdpLine, "a=x-framerate: %d", &rate) == 1) {
+ parseSuccess = True;
+ fVideoFPS = (unsigned)rate;
+ }
+
+ return parseSuccess;
+}
+
+Boolean MediaSubsession::parseSDPAttribute_key_mgmt(char const* sdpLine) {
+ MIKEYState* newMIKEYState = parseSDPAttribute_key_mgmtToMIKEY(sdpLine);
+ if (newMIKEYState == NULL) return False;
+
+ delete fCrypto; delete fMIKEYState;
+ fMIKEYState = newMIKEYState;
+ fCrypto = new SRTPCryptographicContext(*fMIKEYState);
+
+ return True;
+}
+
+Boolean MediaSubsession::createSourceObjects(int useSpecialRTPoffset) {
+ do {
+ // First, check "fProtocolName"
+ if (strcmp(fProtocolName, "UDP") == 0) {
+ // A UDP-packetized stream (*not* a RTP stream)
+ fReadSource = BasicUDPSource::createNew(env(), fRTPSocket);
+ fRTPSource = NULL; // Note!
+
+ if (strcmp(fCodecName, "MP2T") == 0) { // MPEG-2 Transport Stream
+ fReadSource = MPEG2TransportStreamFramer::createNew(env(), fReadSource);
+ // this sets "durationInMicroseconds" correctly, based on the PCR values
+ }
+ } else {
+ // Check "fCodecName" against the set of codecs that we support,
+ // and create our RTP source accordingly
+ // (Later make this code more efficient, as this set grows #####)
+ // (Also, add more fmts that can be implemented by SimpleRTPSource#####)
+ Boolean createSimpleRTPSource = False; // by default; can be changed below
+ Boolean doNormalMBitRule = False; // default behavior if "createSimpleRTPSource" is True
+ if (strcmp(fCodecName, "QCELP") == 0) { // QCELP audio
+ fReadSource =
+ QCELPAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ // Note that fReadSource will differ from fRTPSource in this case
+ } else if (strcmp(fCodecName, "AMR") == 0) { // AMR audio (narrowband)
+ fReadSource =
+ AMRAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,
+ fRTPPayloadFormat, False /*isWideband*/,
+ fNumChannels, attrVal_bool("octet-align"),
+ attrVal_unsigned("interleaving"),
+ attrVal_bool("robust-sorting"),
+ attrVal_bool("crc"));
+ // Note that fReadSource will differ from fRTPSource in this case
+ } else if (strcmp(fCodecName, "AMR-WB") == 0) { // AMR audio (wideband)
+ fReadSource =
+ AMRAudioRTPSource::createNew(env(), fRTPSocket, fRTPSource,
+ fRTPPayloadFormat, True /*isWideband*/,
+ fNumChannels, attrVal_bool("octet-align"),
+ attrVal_unsigned("interleaving"),
+ attrVal_bool("robust-sorting"),
+ attrVal_bool("crc"));
+ // Note that fReadSource will differ from fRTPSource in this case
+ } else if (strcmp(fCodecName, "MPA") == 0) { // MPEG-1 or 2 audio
+ fReadSource = fRTPSource
+ = MPEG1or2AudioRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "MPA-ROBUST") == 0) { // robust MP3 audio
+ fReadSource = fRTPSource
+ = MP3ADURTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ if (fRTPSource == NULL) break;
+
+ if (!fReceiveRawMP3ADUs) {
+ // Add a filter that deinterleaves the ADUs after depacketizing them:
+ MP3ADUdeinterleaver* deinterleaver
+ = MP3ADUdeinterleaver::createNew(env(), fRTPSource);
+ if (deinterleaver == NULL) break;
+
+ // Add another filter that converts these ADUs to MP3 frames:
+ fReadSource = MP3FromADUSource::createNew(env(), deinterleaver);
+ }
+ } else if (strcmp(fCodecName, "X-MP3-DRAFT-00") == 0) {
+ // a non-standard variant of "MPA-ROBUST" used by RealNetworks
+ // (one 'ADU'ized MP3 frame per packet; no headers)
+ fRTPSource
+ = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
+ fRTPTimestampFrequency,
+ "audio/MPA-ROBUST" /*hack*/);
+ if (fRTPSource == NULL) break;
+
+ // Add a filter that converts these ADUs to MP3 frames:
+ fReadSource = MP3FromADUSource::createNew(env(), fRTPSource,
+ False /*no ADU header*/);
+ } else if (strcmp(fCodecName, "MP4A-LATM") == 0) { // MPEG-4 LATM audio
+ fReadSource = fRTPSource
+ = MPEG4LATMAudioRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "VORBIS") == 0) { // Vorbis audio
+ fReadSource = fRTPSource
+ = VorbisAudioRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "THEORA") == 0) { // Theora video
+ fReadSource = fRTPSource
+ = TheoraVideoRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat);
+ } else if (strcmp(fCodecName, "RAW") == 0) { // Uncompressed raw video (RFC 4175)
+ fReadSource = fRTPSource
+ = RawVideoRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat, fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "VP8") == 0) { // VP8 video
+ fReadSource = fRTPSource
+ = VP8VideoRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "VP9") == 0) { // VP9 video
+ fReadSource = fRTPSource
+ = VP9VideoRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "AC3") == 0 || strcmp(fCodecName, "EAC3") == 0) { // AC3 audio
+ fReadSource = fRTPSource
+ = AC3AudioRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "MP4V-ES") == 0) { // MPEG-4 Elementary Stream video
+ fReadSource = fRTPSource
+ = MPEG4ESVideoRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "MPEG4-GENERIC") == 0) {
+ fReadSource = fRTPSource
+ = MPEG4GenericRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency,
+ fMediumName, attrVal_strToLower("mode"),
+ attrVal_unsigned("sizelength"),
+ attrVal_unsigned("indexlength"),
+ attrVal_unsigned("indexdeltalength"));
+ } else if (strcmp(fCodecName, "MPV") == 0) { // MPEG-1 or 2 video
+ fReadSource = fRTPSource
+ = MPEG1or2VideoRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "MP2T") == 0) { // MPEG-2 Transport Stream
+ fRTPSource = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
+ fRTPTimestampFrequency, "video/MP2T",
+ 0, False);
+ fReadSource = MPEG2TransportStreamFramer::createNew(env(), fRTPSource);
+ // this sets "durationInMicroseconds" correctly, based on the PCR values
+ } else if (strcmp(fCodecName, "H261") == 0) { // H.261
+ fReadSource = fRTPSource
+ = H261VideoRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "H263-1998") == 0 ||
+ strcmp(fCodecName, "H263-2000") == 0) { // H.263+
+ fReadSource = fRTPSource
+ = H263plusVideoRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "H264") == 0) {
+ fReadSource = fRTPSource
+ = H264VideoRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "H265") == 0) {
+ Boolean expectDONFields = attrVal_unsigned("sprop-depack-buf-nalus") > 0;
+ fReadSource = fRTPSource
+ = H265VideoRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ expectDONFields,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "DV") == 0) {
+ fReadSource = fRTPSource
+ = DVVideoRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency);
+ } else if (strcmp(fCodecName, "JPEG") == 0) { // motion JPEG
+ if (fReceiveRawJPEGFrames) {
+ // Special case (used when proxying JPEG/RTP streams): Receive each JPEG/RTP packet, including the special RTP headers:
+ fReadSource = fRTPSource
+ = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
+ fRTPTimestampFrequency, "video/JPEG",
+ 0/*special offset*/, False/*doNormalMBitRule => ignore the 'M' bit*/);
+ } else {
+ // Normal case: Receive each JPEG frame as a complete, displayable JPEG image:
+ fReadSource = fRTPSource
+ = JPEGVideoRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency,
+ videoWidth(),
+ videoHeight());
+ }
+ } else if (strcmp(fCodecName, "JPEG2000") == 0) { // JPEG 2000 video
+ fReadSource = fRTPSource
+ = JPEG2000VideoRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
+ fRTPTimestampFrequency,
+ attrVal_str("sampling"));
+ } else if (strcmp(fCodecName, "X-QT") == 0
+ || strcmp(fCodecName, "X-QUICKTIME") == 0) {
+ // Generic QuickTime streams, as defined in
+ // <http://developer.apple.com/quicktime/icefloe/dispatch026.html>
+ char* mimeType
+ = new char[strlen(mediumName()) + strlen(codecName()) + 2] ;
+ sprintf(mimeType, "%s/%s", mediumName(), codecName());
+ fReadSource = fRTPSource
+ = QuickTimeGenericRTPSource::createNew(env(), fRTPSocket,
+ fRTPPayloadFormat,
+ fRTPTimestampFrequency,
+ mimeType);
+ delete[] mimeType;
+ } else if ( strcmp(fCodecName, "PCMU") == 0 // PCM u-law audio
+ || strcmp(fCodecName, "GSM") == 0 // GSM audio
+ || strcmp(fCodecName, "DVI4") == 0 // DVI4 (IMA ADPCM) audio
+ || strcmp(fCodecName, "PCMA") == 0 // PCM a-law audio
+ || strcmp(fCodecName, "MP1S") == 0 // MPEG-1 System Stream
+ || strcmp(fCodecName, "MP2P") == 0 // MPEG-2 Program Stream
+ || strcmp(fCodecName, "L8") == 0 // 8-bit linear audio
+ || strcmp(fCodecName, "L16") == 0 // 16-bit linear audio
+ || strcmp(fCodecName, "L20") == 0 // 20-bit linear audio (RFC 3190)
+ || strcmp(fCodecName, "L24") == 0 // 24-bit linear audio (RFC 3190)
+ || strcmp(fCodecName, "G722") == 0 // G.722 audio (RFC 3551)
+ || strcmp(fCodecName, "G726-16") == 0 // G.726, 16 kbps
+ || strcmp(fCodecName, "G726-24") == 0 // G.726, 24 kbps
+ || strcmp(fCodecName, "G726-32") == 0 // G.726, 32 kbps
+ || strcmp(fCodecName, "G726-40") == 0 // G.726, 40 kbps
+ || strcmp(fCodecName, "SPEEX") == 0 // SPEEX audio
+ || strcmp(fCodecName, "ILBC") == 0 // iLBC audio
+ || strcmp(fCodecName, "OPUS") == 0 // Opus audio
+ || strcmp(fCodecName, "T140") == 0 // T.140 text (RFC 4103)
+ || strcmp(fCodecName, "DAT12") == 0 // 12-bit nonlinear audio (RFC 3190)
+ || strcmp(fCodecName, "VND.ONVIF.METADATA") == 0 // 'ONVIF' 'metadata' (a XML document)
+ ) {
+ createSimpleRTPSource = True;
+ useSpecialRTPoffset = 0;
+ if (strcmp(fCodecName, "VND.ONVIF.METADATA") == 0) {
+ // This RTP payload format uses the RTP "M" bit to indicate the end of the content (a XML document):
+ doNormalMBitRule = True;
+ }
+ } else if (useSpecialRTPoffset >= 0) {
+ // We don't know this RTP payload format, but try to receive
+ // it using a 'SimpleRTPSource' with the specified header offset:
+ createSimpleRTPSource = True;
+ } else {
+ env().setResultMsg("RTP payload format unknown or not supported");
+ break;
+ }
+
+ if (createSimpleRTPSource) {
+ char* mimeType
+ = new char[strlen(mediumName()) + strlen(codecName()) + 2] ;
+ sprintf(mimeType, "%s/%s", mediumName(), codecName());
+ fReadSource = fRTPSource
+ = SimpleRTPSource::createNew(env(), fRTPSocket, fRTPPayloadFormat,
+ fRTPTimestampFrequency, mimeType,
+ (unsigned)useSpecialRTPoffset,
+ doNormalMBitRule);
+ delete[] mimeType;
+ }
+ }
+
+ return True;
+ } while (0);
+
+ return False; // an error occurred
+}
+
+
+////////// SDPAttribute implementation //////////
+
+SDPAttribute::SDPAttribute(char const* strValue, Boolean valueIsHexadecimal)
+ : fStrValue(strDup(strValue)), fStrValueToLower(NULL), fValueIsHexadecimal(valueIsHexadecimal) {
+ if (fStrValue == NULL) {
+ // No value was given for this attribute, so consider it to be a Boolean, with value True:
+ fIntValue = 1;
+ } else {
+ // Create a 'tolower' version of "fStrValue", in case it's needed:
+ Locale l("POSIX");
+ size_t strSize;
+
+ fStrValueToLower = strDupSize(fStrValue, strSize);
+ for (unsigned i = 0; i < strSize-1; ++i) fStrValueToLower[i] = tolower(fStrValue[i]);
+ fStrValueToLower[strSize-1] = '\0';
+
+ // Try to parse "fStrValueToLower" as an integer. If we can't, assume an integer value of 0:
+ if (sscanf(fStrValueToLower, valueIsHexadecimal ? "%x" : "%d", &fIntValue) != 1) {
+ fIntValue = 0;
+ }
+ }
+}
+
+SDPAttribute::~SDPAttribute() {
+ delete[] fStrValue;
+ delete[] fStrValueToLower;
+}
diff --git a/liveMedia/MediaSink.cpp b/liveMedia/MediaSink.cpp
new file mode 100644
index 0000000..ab144ad
--- /dev/null
+++ b/liveMedia/MediaSink.cpp
@@ -0,0 +1,225 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Media Sinks
+// Implementation
+
+#include "MediaSink.hh"
+#include "GroupsockHelper.hh"
+#include <string.h>
+
+////////// MediaSink //////////
+
+MediaSink::MediaSink(UsageEnvironment& env)
+ : Medium(env), fSource(NULL) {
+}
+
+MediaSink::~MediaSink() {
+ stopPlaying();
+}
+
+Boolean MediaSink::isSink() const {
+ return True;
+}
+
+Boolean MediaSink::lookupByName(UsageEnvironment& env, char const* sinkName,
+ MediaSink*& resultSink) {
+ resultSink = NULL; // unless we succeed
+
+ Medium* medium;
+ if (!Medium::lookupByName(env, sinkName, medium)) return False;
+
+ if (!medium->isSink()) {
+ env.setResultMsg(sinkName, " is not a media sink");
+ return False;
+ }
+
+ resultSink = (MediaSink*)medium;
+ return True;
+}
+
+Boolean MediaSink::sourceIsCompatibleWithUs(MediaSource& source) {
+ // We currently support only framed sources.
+ return source.isFramedSource();
+}
+
+Boolean MediaSink::startPlaying(MediaSource& source,
+ afterPlayingFunc* afterFunc,
+ void* afterClientData) {
+ // Make sure we're not already being played:
+ if (fSource != NULL) {
+ envir().setResultMsg("This sink is already being played");
+ return False;
+ }
+
+ // Make sure our source is compatible:
+ if (!sourceIsCompatibleWithUs(source)) {
+ envir().setResultMsg("MediaSink::startPlaying(): source is not compatible!");
+ return False;
+ }
+ fSource = (FramedSource*)&source;
+
+ fAfterFunc = afterFunc;
+ fAfterClientData = afterClientData;
+ return continuePlaying();
+}
+
+void MediaSink::stopPlaying() {
+ // First, tell the source that we're no longer interested:
+ if (fSource != NULL) fSource->stopGettingFrames();
+
+ // Cancel any pending tasks:
+ envir().taskScheduler().unscheduleDelayedTask(nextTask());
+
+ fSource = NULL; // indicates that we can be played again
+ fAfterFunc = NULL;
+}
+
+void MediaSink::onSourceClosure(void* clientData) {
+ MediaSink* sink = (MediaSink*)clientData;
+ sink->onSourceClosure();
+}
+
+void MediaSink::onSourceClosure() {
+ // Cancel any pending tasks:
+ envir().taskScheduler().unscheduleDelayedTask(nextTask());
+
+ fSource = NULL; // indicates that we can be played again
+ if (fAfterFunc != NULL) {
+ (*fAfterFunc)(fAfterClientData);
+ }
+}
+
+Boolean MediaSink::isRTPSink() const {
+ return False; // default implementation
+}
+
+////////// OutPacketBuffer //////////
+
+unsigned OutPacketBuffer::maxSize = 60000; // by default
+
+OutPacketBuffer
+::OutPacketBuffer(unsigned preferredPacketSize, unsigned maxPacketSize, unsigned maxBufferSize)
+ : fPreferred(preferredPacketSize), fMax(maxPacketSize),
+ fOverflowDataSize(0) {
+ if (maxBufferSize == 0) maxBufferSize = maxSize;
+ unsigned maxNumPackets = (maxBufferSize + (maxPacketSize-1))/maxPacketSize;
+ fLimit = maxNumPackets*maxPacketSize;
+ fBuf = new unsigned char[fLimit];
+ resetPacketStart();
+ resetOffset();
+ resetOverflowData();
+}
+
+OutPacketBuffer::~OutPacketBuffer() {
+ delete[] fBuf;
+}
+
+void OutPacketBuffer::enqueue(unsigned char const* from, unsigned numBytes) {
+ if (numBytes > totalBytesAvailable()) {
+#ifdef DEBUG
+ fprintf(stderr, "OutPacketBuffer::enqueue() warning: %d > %d\n", numBytes, totalBytesAvailable());
+#endif
+ numBytes = totalBytesAvailable();
+ }
+
+ if (curPtr() != from) memmove(curPtr(), from, numBytes);
+ increment(numBytes);
+}
+
+void OutPacketBuffer::enqueueWord(u_int32_t word) {
+ u_int32_t nWord = htonl(word);
+ enqueue((unsigned char*)&nWord, 4);
+}
+
+void OutPacketBuffer::insert(unsigned char const* from, unsigned numBytes,
+ unsigned toPosition) {
+ unsigned realToPosition = fPacketStart + toPosition;
+ if (realToPosition + numBytes > fLimit) {
+ if (realToPosition > fLimit) return; // we can't do this
+ numBytes = fLimit - realToPosition;
+ }
+
+ memmove(&fBuf[realToPosition], from, numBytes);
+ if (toPosition + numBytes > fCurOffset) {
+ fCurOffset = toPosition + numBytes;
+ }
+}
+
+void OutPacketBuffer::insertWord(u_int32_t word, unsigned toPosition) {
+ u_int32_t nWord = htonl(word);
+ insert((unsigned char*)&nWord, 4, toPosition);
+}
+
+void OutPacketBuffer::extract(unsigned char* to, unsigned numBytes,
+ unsigned fromPosition) {
+ unsigned realFromPosition = fPacketStart + fromPosition;
+ if (realFromPosition + numBytes > fLimit) { // sanity check
+ if (realFromPosition > fLimit) return; // we can't do this
+ numBytes = fLimit - realFromPosition;
+ }
+
+ memmove(to, &fBuf[realFromPosition], numBytes);
+}
+
+u_int32_t OutPacketBuffer::extractWord(unsigned fromPosition) {
+ u_int32_t nWord;
+ extract((unsigned char*)&nWord, 4, fromPosition);
+ return ntohl(nWord);
+}
+
+void OutPacketBuffer::skipBytes(unsigned numBytes) {
+ if (numBytes > totalBytesAvailable()) {
+ numBytes = totalBytesAvailable();
+ }
+
+ increment(numBytes);
+}
+
+void OutPacketBuffer
+::setOverflowData(unsigned overflowDataOffset,
+ unsigned overflowDataSize,
+ struct timeval const& presentationTime,
+ unsigned durationInMicroseconds) {
+ fOverflowDataOffset = overflowDataOffset;
+ fOverflowDataSize = overflowDataSize;
+ fOverflowPresentationTime = presentationTime;
+ fOverflowDurationInMicroseconds = durationInMicroseconds;
+}
+
+void OutPacketBuffer::useOverflowData() {
+ enqueue(&fBuf[fPacketStart + fOverflowDataOffset], fOverflowDataSize);
+ fCurOffset -= fOverflowDataSize; // undoes increment performed by "enqueue"
+ resetOverflowData();
+}
+
+void OutPacketBuffer::adjustPacketStart(unsigned numBytes) {
+ fPacketStart += numBytes;
+ if (fOverflowDataOffset >= numBytes) {
+ fOverflowDataOffset -= numBytes;
+ } else {
+ fOverflowDataOffset = 0;
+ fOverflowDataSize = 0; // an error otherwise
+ }
+}
+
+void OutPacketBuffer::resetPacketStart() {
+ if (fOverflowDataSize > 0) {
+ fOverflowDataOffset += fPacketStart;
+ }
+ fPacketStart = 0;
+}
diff --git a/liveMedia/MediaSource.cpp b/liveMedia/MediaSource.cpp
new file mode 100644
index 0000000..0bf5ee8
--- /dev/null
+++ b/liveMedia/MediaSource.cpp
@@ -0,0 +1,91 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Media Sources
+// Implementation
+
+#include "MediaSource.hh"
+
+////////// MediaSource //////////
+
+MediaSource::MediaSource(UsageEnvironment& env)
+ : Medium(env) {
+}
+
+MediaSource::~MediaSource() {
+}
+
+Boolean MediaSource::isSource() const {
+ return True;
+}
+
+char const* MediaSource::MIMEtype() const {
+ return "application/OCTET-STREAM"; // default type
+}
+
+Boolean MediaSource::isFramedSource() const {
+ return False; // default implementation
+}
+Boolean MediaSource::isRTPSource() const {
+ return False; // default implementation
+}
+Boolean MediaSource::isMPEG1or2VideoStreamFramer() const {
+ return False; // default implementation
+}
+Boolean MediaSource::isMPEG4VideoStreamFramer() const {
+ return False; // default implementation
+}
+Boolean MediaSource::isH264VideoStreamFramer() const {
+ return False; // default implementation
+}
+Boolean MediaSource::isH265VideoStreamFramer() const {
+ return False; // default implementation
+}
+Boolean MediaSource::isDVVideoStreamFramer() const {
+ return False; // default implementation
+}
+Boolean MediaSource::isJPEGVideoSource() const {
+ return False; // default implementation
+}
+Boolean MediaSource::isAMRAudioSource() const {
+ return False; // default implementation
+}
+Boolean MediaSource::isMPEG2TransportStreamMultiplexor() const {
+ return False; // default implementation
+}
+
+Boolean MediaSource::lookupByName(UsageEnvironment& env,
+ char const* sourceName,
+ MediaSource*& resultSource) {
+ resultSource = NULL; // unless we succeed
+
+ Medium* medium;
+ if (!Medium::lookupByName(env, sourceName, medium)) return False;
+
+ if (!medium->isSource()) {
+ env.setResultMsg(sourceName, " is not a media source");
+ return False;
+ }
+
+ resultSource = (MediaSource*)medium;
+ return True;
+}
+
+void MediaSource::getAttributes() const {
+ // Default implementation
+ envir().setResultMsg("");
+}
diff --git a/liveMedia/MultiFramedRTPSink.cpp b/liveMedia/MultiFramedRTPSink.cpp
new file mode 100644
index 0000000..e00407e
--- /dev/null
+++ b/liveMedia/MultiFramedRTPSink.cpp
@@ -0,0 +1,431 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for a common kind of payload format: Those which pack multiple,
+// complete codec frames (as many as possible) into each RTP packet.
+// Implementation
+
+#include "MultiFramedRTPSink.hh"
+#include "GroupsockHelper.hh"
+
+////////// MultiFramedRTPSink //////////
+
+void MultiFramedRTPSink::setPacketSizes(unsigned preferredPacketSize,
+ unsigned maxPacketSize) {
+ if (preferredPacketSize > maxPacketSize || preferredPacketSize == 0) return;
+ // sanity check
+
+ delete fOutBuf;
+ fOutBuf = new OutPacketBuffer(preferredPacketSize, maxPacketSize);
+ fOurMaxPacketSize = maxPacketSize; // save value, in case subclasses need it
+}
+
+#ifndef RTP_PAYLOAD_MAX_SIZE
+#define RTP_PAYLOAD_MAX_SIZE 1456
+ // Default max packet size (1500, minus allowance for IP, UDP, UMTP headers)
+ // (Also, make it a multiple of 4 bytes, just in case that matters.)
+#endif
+#ifndef RTP_PAYLOAD_PREFERRED_SIZE
+#define RTP_PAYLOAD_PREFERRED_SIZE ((RTP_PAYLOAD_MAX_SIZE) < 1000 ? (RTP_PAYLOAD_MAX_SIZE) : 1000)
+#endif
+
+MultiFramedRTPSink::MultiFramedRTPSink(UsageEnvironment& env,
+ Groupsock* rtpGS,
+ unsigned char rtpPayloadType,
+ unsigned rtpTimestampFrequency,
+ char const* rtpPayloadFormatName,
+ unsigned numChannels)
+ : RTPSink(env, rtpGS, rtpPayloadType, rtpTimestampFrequency,
+ rtpPayloadFormatName, numChannels),
+ fOutBuf(NULL), fCurFragmentationOffset(0), fPreviousFrameEndedFragmentation(False),
+ fOnSendErrorFunc(NULL), fOnSendErrorData(NULL) {
+ setPacketSizes((RTP_PAYLOAD_PREFERRED_SIZE), (RTP_PAYLOAD_MAX_SIZE));
+}
+
+MultiFramedRTPSink::~MultiFramedRTPSink() {
+ delete fOutBuf;
+}
+
+void MultiFramedRTPSink
+::doSpecialFrameHandling(unsigned /*fragmentationOffset*/,
+ unsigned char* /*frameStart*/,
+ unsigned /*numBytesInFrame*/,
+ struct timeval framePresentationTime,
+ unsigned /*numRemainingBytes*/) {
+ // default implementation: If this is the first frame in the packet,
+ // use its presentationTime for the RTP timestamp:
+ if (isFirstFrameInPacket()) {
+ setTimestamp(framePresentationTime);
+ }
+}
+
+Boolean MultiFramedRTPSink::allowFragmentationAfterStart() const {
+ return False; // by default
+}
+
+Boolean MultiFramedRTPSink::allowOtherFramesAfterLastFragment() const {
+ return False; // by default
+}
+
+Boolean MultiFramedRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ return True; // by default
+}
+
+unsigned MultiFramedRTPSink::specialHeaderSize() const {
+ // default implementation: Assume no special header:
+ return 0;
+}
+
+unsigned MultiFramedRTPSink::frameSpecificHeaderSize() const {
+ // default implementation: Assume no frame-specific header:
+ return 0;
+}
+
+unsigned MultiFramedRTPSink::computeOverflowForNewFrame(unsigned newFrameSize) const {
+ // default implementation: Just call numOverflowBytes()
+ return fOutBuf->numOverflowBytes(newFrameSize);
+}
+
+void MultiFramedRTPSink::setMarkerBit() {
+ unsigned rtpHdr = fOutBuf->extractWord(0);
+ rtpHdr |= 0x00800000;
+ fOutBuf->insertWord(rtpHdr, 0);
+}
+
+void MultiFramedRTPSink::setTimestamp(struct timeval framePresentationTime) {
+ // First, convert the presentation time to a 32-bit RTP timestamp:
+ fCurrentTimestamp = convertToRTPTimestamp(framePresentationTime);
+
+ // Then, insert it into the RTP packet:
+ fOutBuf->insertWord(fCurrentTimestamp, fTimestampPosition);
+}
+
+void MultiFramedRTPSink::setSpecialHeaderWord(unsigned word,
+ unsigned wordPosition) {
+ fOutBuf->insertWord(word, fSpecialHeaderPosition + 4*wordPosition);
+}
+
+void MultiFramedRTPSink::setSpecialHeaderBytes(unsigned char const* bytes,
+ unsigned numBytes,
+ unsigned bytePosition) {
+ fOutBuf->insert(bytes, numBytes, fSpecialHeaderPosition + bytePosition);
+}
+
+void MultiFramedRTPSink::setFrameSpecificHeaderWord(unsigned word,
+ unsigned wordPosition) {
+ fOutBuf->insertWord(word, fCurFrameSpecificHeaderPosition + 4*wordPosition);
+}
+
+void MultiFramedRTPSink::setFrameSpecificHeaderBytes(unsigned char const* bytes,
+ unsigned numBytes,
+ unsigned bytePosition) {
+ fOutBuf->insert(bytes, numBytes, fCurFrameSpecificHeaderPosition + bytePosition);
+}
+
+void MultiFramedRTPSink::setFramePadding(unsigned numPaddingBytes) {
+ if (numPaddingBytes > 0) {
+ // Add the padding bytes (with the last one being the padding size):
+ unsigned char paddingBuffer[255]; //max padding
+ memset(paddingBuffer, 0, numPaddingBytes);
+ paddingBuffer[numPaddingBytes-1] = numPaddingBytes;
+ fOutBuf->enqueue(paddingBuffer, numPaddingBytes);
+
+ // Set the RTP padding bit:
+ unsigned rtpHdr = fOutBuf->extractWord(0);
+ rtpHdr |= 0x20000000;
+ fOutBuf->insertWord(rtpHdr, 0);
+ }
+}
+
+Boolean MultiFramedRTPSink::continuePlaying() {
+ // Send the first packet.
+ // (This will also schedule any future sends.)
+ buildAndSendPacket(True);
+ return True;
+}
+
+void MultiFramedRTPSink::stopPlaying() {
+ fOutBuf->resetPacketStart();
+ fOutBuf->resetOffset();
+ fOutBuf->resetOverflowData();
+
+ // Then call the default "stopPlaying()" function:
+ MediaSink::stopPlaying();
+}
+
+void MultiFramedRTPSink::buildAndSendPacket(Boolean isFirstPacket) {
+ nextTask() = NULL;
+ fIsFirstPacket = isFirstPacket;
+
+ // Set up the RTP header:
+ unsigned rtpHdr = 0x80000000; // RTP version 2; marker ('M') bit not set (by default; it can be set later)
+ rtpHdr |= (fRTPPayloadType<<16);
+ rtpHdr |= fSeqNo; // sequence number
+ fOutBuf->enqueueWord(rtpHdr);
+
+ // Note where the RTP timestamp will go.
+ // (We can't fill this in until we start packing payload frames.)
+ fTimestampPosition = fOutBuf->curPacketSize();
+ fOutBuf->skipBytes(4); // leave a hole for the timestamp
+
+ fOutBuf->enqueueWord(SSRC());
+
+ // Allow for a special, payload-format-specific header following the
+ // RTP header:
+ fSpecialHeaderPosition = fOutBuf->curPacketSize();
+ fSpecialHeaderSize = specialHeaderSize();
+ fOutBuf->skipBytes(fSpecialHeaderSize);
+
+ // Begin packing as many (complete) frames into the packet as we can:
+ fTotalFrameSpecificHeaderSizes = 0;
+ fNoFramesLeft = False;
+ fNumFramesUsedSoFar = 0;
+ packFrame();
+}
+
+void MultiFramedRTPSink::packFrame() {
+ // Get the next frame.
+
+ // First, skip over the space we'll use for any frame-specific header:
+ fCurFrameSpecificHeaderPosition = fOutBuf->curPacketSize();
+ fCurFrameSpecificHeaderSize = frameSpecificHeaderSize();
+ fOutBuf->skipBytes(fCurFrameSpecificHeaderSize);
+ fTotalFrameSpecificHeaderSizes += fCurFrameSpecificHeaderSize;
+
+ // See if we have an overflow frame that was too big for the last pkt
+ if (fOutBuf->haveOverflowData()) {
+ // Use this frame before reading a new one from the source
+ unsigned frameSize = fOutBuf->overflowDataSize();
+ struct timeval presentationTime = fOutBuf->overflowPresentationTime();
+ unsigned durationInMicroseconds = fOutBuf->overflowDurationInMicroseconds();
+ fOutBuf->useOverflowData();
+
+ afterGettingFrame1(frameSize, 0, presentationTime, durationInMicroseconds);
+ } else {
+ // Normal case: we need to read a new frame from the source
+ if (fSource == NULL) return;
+ fSource->getNextFrame(fOutBuf->curPtr(), fOutBuf->totalBytesAvailable(),
+ afterGettingFrame, this, ourHandleClosure, this);
+ }
+}
+
+void MultiFramedRTPSink
+::afterGettingFrame(void* clientData, unsigned numBytesRead,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ MultiFramedRTPSink* sink = (MultiFramedRTPSink*)clientData;
+ sink->afterGettingFrame1(numBytesRead, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+void MultiFramedRTPSink
+::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ if (fIsFirstPacket) {
+ // Record the fact that we're starting to play now:
+ gettimeofday(&fNextSendTime, NULL);
+ }
+
+ fMostRecentPresentationTime = presentationTime;
+ if (fInitialPresentationTime.tv_sec == 0 && fInitialPresentationTime.tv_usec == 0) {
+ fInitialPresentationTime = presentationTime;
+ }
+
+ if (numTruncatedBytes > 0) {
+ unsigned const bufferSize = fOutBuf->totalBytesAvailable();
+ envir() << "MultiFramedRTPSink::afterGettingFrame1(): The input frame data was too large for our buffer size ("
+ << bufferSize << "). "
+ << numTruncatedBytes << " bytes of trailing data was dropped! Correct this by increasing \"OutPacketBuffer::maxSize\" to at least "
+ << OutPacketBuffer::maxSize + numTruncatedBytes << ", *before* creating this 'RTPSink'. (Current value is "
+ << OutPacketBuffer::maxSize << ".)\n";
+ }
+ unsigned curFragmentationOffset = fCurFragmentationOffset;
+ unsigned numFrameBytesToUse = frameSize;
+ unsigned overflowBytes = 0;
+
+ // If we have already packed one or more frames into this packet,
+ // check whether this new frame is eligible to be packed after them.
+ // (This is independent of whether the packet has enough room for this
+ // new frame; that check comes later.)
+ if (fNumFramesUsedSoFar > 0) {
+ if ((fPreviousFrameEndedFragmentation
+ && !allowOtherFramesAfterLastFragment())
+ || !frameCanAppearAfterPacketStart(fOutBuf->curPtr(), frameSize)) {
+ // Save away this frame for next time:
+ numFrameBytesToUse = 0;
+ fOutBuf->setOverflowData(fOutBuf->curPacketSize(), frameSize,
+ presentationTime, durationInMicroseconds);
+ }
+ }
+ fPreviousFrameEndedFragmentation = False;
+
+ if (numFrameBytesToUse > 0) {
+ // Check whether this frame overflows the packet
+ if (fOutBuf->wouldOverflow(frameSize)) {
+ // Don't use this frame now; instead, save it as overflow data, and
+ // send it in the next packet instead. However, if the frame is too
+ // big to fit in a packet by itself, then we need to fragment it (and
+ // use some of it in this packet, if the payload format permits this.)
+ if (isTooBigForAPacket(frameSize)
+ && (fNumFramesUsedSoFar == 0 || allowFragmentationAfterStart())) {
+ // We need to fragment this frame, and use some of it now:
+ overflowBytes = computeOverflowForNewFrame(frameSize);
+ numFrameBytesToUse -= overflowBytes;
+ fCurFragmentationOffset += numFrameBytesToUse;
+ } else {
+ // We don't use any of this frame now:
+ overflowBytes = frameSize;
+ numFrameBytesToUse = 0;
+ }
+ fOutBuf->setOverflowData(fOutBuf->curPacketSize() + numFrameBytesToUse,
+ overflowBytes, presentationTime, durationInMicroseconds);
+ } else if (fCurFragmentationOffset > 0) {
+ // This is the last fragment of a frame that was fragmented over
+ // more than one packet. Do any special handling for this case:
+ fCurFragmentationOffset = 0;
+ fPreviousFrameEndedFragmentation = True;
+ }
+ }
+
+ if (numFrameBytesToUse == 0 && frameSize > 0) {
+ // Send our packet now, because we have filled it up:
+ sendPacketIfNecessary();
+ } else {
+ // Use this frame in our outgoing packet:
+ unsigned char* frameStart = fOutBuf->curPtr();
+ fOutBuf->increment(numFrameBytesToUse);
+ // do this now, in case "doSpecialFrameHandling()" calls "setFramePadding()" to append padding bytes
+
+ // Here's where any payload format specific processing gets done:
+ doSpecialFrameHandling(curFragmentationOffset, frameStart,
+ numFrameBytesToUse, presentationTime,
+ overflowBytes);
+
+ ++fNumFramesUsedSoFar;
+
+ // Update the time at which the next packet should be sent, based
+ // on the duration of the frame that we just packed into it.
+ // However, if this frame has overflow data remaining, then don't
+ // count its duration yet.
+ if (overflowBytes == 0) {
+ fNextSendTime.tv_usec += durationInMicroseconds;
+ fNextSendTime.tv_sec += fNextSendTime.tv_usec/1000000;
+ fNextSendTime.tv_usec %= 1000000;
+ }
+
+ // Send our packet now if (i) it's already at our preferred size, or
+ // (ii) (heuristic) another frame of the same size as the one we just
+ // read would overflow the packet, or
+ // (iii) it contains the last fragment of a fragmented frame, and we
+ // don't allow anything else to follow this or
+ // (iv) only one frame per packet is allowed:
+ if (fOutBuf->isPreferredSize()
+ || fOutBuf->wouldOverflow(numFrameBytesToUse)
+ || (fPreviousFrameEndedFragmentation &&
+ !allowOtherFramesAfterLastFragment())
+ || !frameCanAppearAfterPacketStart(fOutBuf->curPtr() - frameSize,
+ frameSize) ) {
+ // The packet is ready to be sent now
+ sendPacketIfNecessary();
+ } else {
+ // There's room for more frames; try getting another:
+ packFrame();
+ }
+ }
+}
+
+static unsigned const rtpHeaderSize = 12;
+
+Boolean MultiFramedRTPSink::isTooBigForAPacket(unsigned numBytes) const {
+ // Check whether a 'numBytes'-byte frame - together with a RTP header and
+ // (possible) special headers - would be too big for an output packet:
+ // (Later allow for RTP extension header!) #####
+ numBytes += rtpHeaderSize + specialHeaderSize() + frameSpecificHeaderSize();
+ return fOutBuf->isTooBigForAPacket(numBytes);
+}
+
+void MultiFramedRTPSink::sendPacketIfNecessary() {
+ if (fNumFramesUsedSoFar > 0) {
+ // Send the packet:
+#ifdef TEST_LOSS
+ if ((our_random()%10) != 0) // simulate 10% packet loss #####
+#endif
+ if (!fRTPInterface.sendPacket(fOutBuf->packet(), fOutBuf->curPacketSize())) {
+ // if failure handler has been specified, call it
+ if (fOnSendErrorFunc != NULL) (*fOnSendErrorFunc)(fOnSendErrorData);
+ }
+ ++fPacketCount;
+ fTotalOctetCount += fOutBuf->curPacketSize();
+ fOctetCount += fOutBuf->curPacketSize()
+ - rtpHeaderSize - fSpecialHeaderSize - fTotalFrameSpecificHeaderSizes;
+
+ ++fSeqNo; // for next time
+ }
+
+ if (fOutBuf->haveOverflowData()
+ && fOutBuf->totalBytesAvailable() > fOutBuf->totalBufferSize()/2) {
+ // Efficiency hack: Reset the packet start pointer to just in front of
+ // the overflow data (allowing for the RTP header and special headers),
+ // so that we probably don't have to "memmove()" the overflow data
+ // into place when building the next packet:
+ unsigned newPacketStart = fOutBuf->curPacketSize()
+ - (rtpHeaderSize + fSpecialHeaderSize + frameSpecificHeaderSize());
+ fOutBuf->adjustPacketStart(newPacketStart);
+ } else {
+ // Normal case: Reset the packet start pointer back to the start:
+ fOutBuf->resetPacketStart();
+ }
+ fOutBuf->resetOffset();
+ fNumFramesUsedSoFar = 0;
+
+ if (fNoFramesLeft) {
+ // We're done:
+ onSourceClosure();
+ } else {
+ // We have more frames left to send. Figure out when the next frame
+ // is due to start playing, then make sure that we wait this long before
+ // sending the next packet.
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ int secsDiff = fNextSendTime.tv_sec - timeNow.tv_sec;
+ int64_t uSecondsToGo = secsDiff*1000000 + (fNextSendTime.tv_usec - timeNow.tv_usec);
+ if (uSecondsToGo < 0 || secsDiff < 0) { // sanity check: Make sure that the time-to-delay is non-negative:
+ uSecondsToGo = 0;
+ }
+
+ // Delay this amount of time:
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecondsToGo, (TaskFunc*)sendNext, this);
+ }
+}
+
+// The following is called after each delay between packet sends:
+void MultiFramedRTPSink::sendNext(void* firstArg) {
+ MultiFramedRTPSink* sink = (MultiFramedRTPSink*)firstArg;
+ sink->buildAndSendPacket(False);
+}
+
+void MultiFramedRTPSink::ourHandleClosure(void* clientData) {
+ MultiFramedRTPSink* sink = (MultiFramedRTPSink*)clientData;
+ // There are no frames left, but we may have a partially built packet
+ // to send
+ sink->fNoFramesLeft = True;
+ sink->sendPacketIfNecessary();
+}
diff --git a/liveMedia/MultiFramedRTPSource.cpp b/liveMedia/MultiFramedRTPSource.cpp
new file mode 100644
index 0000000..15f8cd0
--- /dev/null
+++ b/liveMedia/MultiFramedRTPSource.cpp
@@ -0,0 +1,638 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP source for a common kind of payload format: Those that pack multiple,
+// complete codec frames (as many as possible) into each RTP packet.
+// Implementation
+
+#include "MultiFramedRTPSource.hh"
+#include "RTCP.hh"
+#include "GroupsockHelper.hh"
+#include <string.h>
+
+////////// ReorderingPacketBuffer definition //////////
+
+class ReorderingPacketBuffer {
+public:
+ ReorderingPacketBuffer(BufferedPacketFactory* packetFactory);
+ virtual ~ReorderingPacketBuffer();
+ void reset();
+
+ BufferedPacket* getFreePacket(MultiFramedRTPSource* ourSource);
+ Boolean storePacket(BufferedPacket* bPacket);
+ BufferedPacket* getNextCompletedPacket(Boolean& packetLossPreceded);
+ void releaseUsedPacket(BufferedPacket* packet);
+ void freePacket(BufferedPacket* packet) {
+ if (packet != fSavedPacket) {
+ delete packet;
+ } else {
+ fSavedPacketFree = True;
+ }
+ }
+ Boolean isEmpty() const { return fHeadPacket == NULL; }
+
+ void setThresholdTime(unsigned uSeconds) { fThresholdTime = uSeconds; }
+ void resetHaveSeenFirstPacket() { fHaveSeenFirstPacket = False; }
+
+private:
+ BufferedPacketFactory* fPacketFactory;
+ unsigned fThresholdTime; // uSeconds
+ Boolean fHaveSeenFirstPacket; // used to set initial "fNextExpectedSeqNo"
+ unsigned short fNextExpectedSeqNo;
+ BufferedPacket* fHeadPacket;
+ BufferedPacket* fTailPacket;
+ BufferedPacket* fSavedPacket;
+ // to avoid calling new/free in the common case
+ Boolean fSavedPacketFree;
+};
+
+
+////////// MultiFramedRTPSource implementation //////////
+
+MultiFramedRTPSource
+::MultiFramedRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ BufferedPacketFactory* packetFactory)
+ : RTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency) {
+ reset();
+ fReorderingBuffer = new ReorderingPacketBuffer(packetFactory);
+
+ // Try to use a big receive buffer for RTP:
+ increaseReceiveBufferTo(env, RTPgs->socketNum(), 50*1024);
+}
+
+void MultiFramedRTPSource::reset() {
+ fCurrentPacketBeginsFrame = True; // by default
+ fCurrentPacketCompletesFrame = True; // by default
+ fAreDoingNetworkReads = False;
+ fPacketReadInProgress = NULL;
+ fNeedDelivery = False;
+ fPacketLossInFragmentedFrame = False;
+}
+
+MultiFramedRTPSource::~MultiFramedRTPSource() {
+ delete fReorderingBuffer;
+}
+
+Boolean MultiFramedRTPSource
+::processSpecialHeader(BufferedPacket* /*packet*/,
+ unsigned& resultSpecialHeaderSize) {
+ // Default implementation: Assume no special header:
+ resultSpecialHeaderSize = 0;
+ return True;
+}
+
+Boolean MultiFramedRTPSource
+::packetIsUsableInJitterCalculation(unsigned char* /*packet*/,
+ unsigned /*packetSize*/) {
+ // Default implementation:
+ return True;
+}
+
+void MultiFramedRTPSource::doStopGettingFrames() {
+ if (fPacketReadInProgress != NULL) {
+ fReorderingBuffer->freePacket(fPacketReadInProgress);
+ fPacketReadInProgress = NULL;
+ }
+ envir().taskScheduler().unscheduleDelayedTask(nextTask());
+ fRTPInterface.stopNetworkReading();
+ fReorderingBuffer->reset();
+ reset();
+}
+
+void MultiFramedRTPSource::doGetNextFrame() {
+ if (!fAreDoingNetworkReads) {
+ // Turn on background read handling of incoming packets:
+ fAreDoingNetworkReads = True;
+ TaskScheduler::BackgroundHandlerProc* handler
+ = (TaskScheduler::BackgroundHandlerProc*)&networkReadHandler;
+ fRTPInterface.startNetworkReading(handler);
+ }
+
+ fSavedTo = fTo;
+ fSavedMaxSize = fMaxSize;
+ fFrameSize = 0; // for now
+ fNeedDelivery = True;
+ doGetNextFrame1();
+}
+
+void MultiFramedRTPSource::doGetNextFrame1() {
+ while (fNeedDelivery) {
+ // If we already have packet data available, then deliver it now.
+ Boolean packetLossPrecededThis;
+ BufferedPacket* nextPacket
+ = fReorderingBuffer->getNextCompletedPacket(packetLossPrecededThis);
+ if (nextPacket == NULL) break;
+
+ fNeedDelivery = False;
+
+ if (nextPacket->useCount() == 0) {
+ // Before using the packet, check whether it has a special header
+ // that needs to be processed:
+ unsigned specialHeaderSize;
+ if (!processSpecialHeader(nextPacket, specialHeaderSize)) {
+ // Something's wrong with the header; reject the packet:
+ fReorderingBuffer->releaseUsedPacket(nextPacket);
+ fNeedDelivery = True;
+ continue;
+ }
+ nextPacket->skip(specialHeaderSize);
+ }
+
+ // Check whether we're part of a multi-packet frame, and whether
+ // there was packet loss that would render this packet unusable:
+ if (fCurrentPacketBeginsFrame) {
+ if (packetLossPrecededThis || fPacketLossInFragmentedFrame) {
+ // We didn't get all of the previous frame.
+ // Forget any data that we used from it:
+ fTo = fSavedTo; fMaxSize = fSavedMaxSize;
+ fFrameSize = 0;
+ }
+ fPacketLossInFragmentedFrame = False;
+ } else if (packetLossPrecededThis) {
+ // We're in a multi-packet frame, with preceding packet loss
+ fPacketLossInFragmentedFrame = True;
+ }
+ if (fPacketLossInFragmentedFrame) {
+ // This packet is unusable; reject it:
+ fReorderingBuffer->releaseUsedPacket(nextPacket);
+ fNeedDelivery = True;
+ continue;
+ }
+
+ // The packet is usable. Deliver all or part of it to our caller:
+ unsigned frameSize;
+ nextPacket->use(fTo, fMaxSize, frameSize, fNumTruncatedBytes,
+ fCurPacketRTPSeqNum, fCurPacketRTPTimestamp,
+ fPresentationTime, fCurPacketHasBeenSynchronizedUsingRTCP,
+ fCurPacketMarkerBit);
+ fFrameSize += frameSize;
+
+ if (!nextPacket->hasUsableData()) {
+ // We're completely done with this packet now
+ fReorderingBuffer->releaseUsedPacket(nextPacket);
+ }
+
+ if (fCurrentPacketCompletesFrame && fFrameSize > 0) {
+ // We have all the data that the client wants.
+ if (fNumTruncatedBytes > 0) {
+ envir() << "MultiFramedRTPSource::doGetNextFrame1(): The total received frame size exceeds the client's buffer size ("
+ << fSavedMaxSize << "). "
+ << fNumTruncatedBytes << " bytes of trailing data will be dropped!\n";
+ }
+ // Call our own 'after getting' function, so that the downstream object can consume the data:
+ if (fReorderingBuffer->isEmpty()) {
+ // Common case optimization: There are no more queued incoming packets, so this code will not get
+ // executed again without having first returned to the event loop. Call our 'after getting' function
+ // directly, because there's no risk of a long chain of recursion (and thus stack overflow):
+ afterGetting(this);
+ } else {
+ // Special case: Call our 'after getting' function via the event loop.
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
+ (TaskFunc*)FramedSource::afterGetting, this);
+ }
+ } else {
+ // This packet contained fragmented data, and does not complete
+ // the data that the client wants. Keep getting data:
+ fTo += frameSize; fMaxSize -= frameSize;
+ fNeedDelivery = True;
+ }
+ }
+}
+
+void MultiFramedRTPSource
+::setPacketReorderingThresholdTime(unsigned uSeconds) {
+ fReorderingBuffer->setThresholdTime(uSeconds);
+}
+
+#define ADVANCE(n) do { bPacket->skip(n); } while (0)
+
+void MultiFramedRTPSource::networkReadHandler(MultiFramedRTPSource* source, int /*mask*/) {
+ source->networkReadHandler1();
+}
+
+void MultiFramedRTPSource::networkReadHandler1() {
+ BufferedPacket* bPacket = fPacketReadInProgress;
+ if (bPacket == NULL) {
+ // Normal case: Get a free BufferedPacket descriptor to hold the new network packet:
+ bPacket = fReorderingBuffer->getFreePacket(this);
+ }
+
+ // Read the network packet, and perform sanity checks on the RTP header:
+ Boolean readSuccess = False;
+ do {
+ struct sockaddr_in fromAddress;
+ Boolean packetReadWasIncomplete = fPacketReadInProgress != NULL;
+ if (!bPacket->fillInData(fRTPInterface, fromAddress, packetReadWasIncomplete)) {
+ if (bPacket->bytesAvailable() == 0) { // should not happen??
+ envir() << "MultiFramedRTPSource internal error: Hit limit when reading incoming packet over TCP\n";
+ }
+ fPacketReadInProgress = NULL;
+ break;
+ }
+ if (packetReadWasIncomplete) {
+ // We need additional read(s) before we can process the incoming packet:
+ fPacketReadInProgress = bPacket;
+ return;
+ } else {
+ fPacketReadInProgress = NULL;
+ }
+#ifdef TEST_LOSS
+ setPacketReorderingThresholdTime(0);
+ // don't wait for 'lost' packets to arrive out-of-order later
+ if ((our_random()%10) == 0) break; // simulate 10% packet loss
+#endif
+
+ if (fCrypto != NULL) { // The packet is SRTP; authenticate/decrypt it first
+ unsigned newPacketSize;
+ if (!fCrypto->processIncomingSRTPPacket(bPacket->data(), bPacket->dataSize(), newPacketSize)) break;
+ if (newPacketSize > bPacket->dataSize()) break; // sanity check; shouldn't happen
+ bPacket->removePadding(bPacket->dataSize() - newPacketSize); // treat MKI+auth as padding
+ }
+
+ // Check for the 12-byte RTP header:
+ if (bPacket->dataSize() < 12) break;
+ unsigned rtpHdr = ntohl(*(u_int32_t*)(bPacket->data())); ADVANCE(4);
+ Boolean rtpMarkerBit = (rtpHdr&0x00800000) != 0;
+ unsigned rtpTimestamp = ntohl(*(u_int32_t*)(bPacket->data()));ADVANCE(4);
+ unsigned rtpSSRC = ntohl(*(u_int32_t*)(bPacket->data())); ADVANCE(4);
+
+ // Check the RTP version number (it should be 2):
+ if ((rtpHdr&0xC0000000) != 0x80000000) break;
+
+ // Check the Payload Type.
+ unsigned char rtpPayloadType = (unsigned char)((rtpHdr&0x007F0000)>>16);
+ if (rtpPayloadType != rtpPayloadFormat()) {
+ if (fRTCPInstanceForMultiplexedRTCPPackets != NULL
+ && rtpPayloadType >= 64 && rtpPayloadType <= 95) {
+ // This is a multiplexed RTCP packet, and we've been asked to deliver such packets.
+ // Do so now:
+ fRTCPInstanceForMultiplexedRTCPPackets
+ ->injectReport(bPacket->data()-12, bPacket->dataSize()+12, fromAddress);
+ }
+ break;
+ }
+
+ // Skip over any CSRC identifiers in the header:
+ unsigned cc = (rtpHdr>>24)&0x0F;
+ if (bPacket->dataSize() < cc*4) break;
+ ADVANCE(cc*4);
+
+ // Check for (& ignore) any RTP header extension
+ if (rtpHdr&0x10000000) {
+ if (bPacket->dataSize() < 4) break;
+ unsigned extHdr = ntohl(*(u_int32_t*)(bPacket->data())); ADVANCE(4);
+ unsigned remExtSize = 4*(extHdr&0xFFFF);
+ if (bPacket->dataSize() < remExtSize) break;
+ ADVANCE(remExtSize);
+ }
+
+ // Discard any padding bytes:
+ if (rtpHdr&0x20000000) {
+ if (bPacket->dataSize() == 0) break;
+ unsigned numPaddingBytes
+ = (unsigned)(bPacket->data())[bPacket->dataSize()-1];
+ if (bPacket->dataSize() < numPaddingBytes) break;
+ bPacket->removePadding(numPaddingBytes);
+ }
+
+ // The rest of the packet is the usable data. Record and save it:
+ if (rtpSSRC != fLastReceivedSSRC) {
+ // The SSRC of incoming packets has changed. Unfortunately we don't yet handle streams that contain multiple SSRCs,
+ // but we can handle a single-SSRC stream where the SSRC changes occasionally:
+ fLastReceivedSSRC = rtpSSRC;
+ fReorderingBuffer->resetHaveSeenFirstPacket();
+ }
+ unsigned short rtpSeqNo = (unsigned short)(rtpHdr&0xFFFF);
+ Boolean usableInJitterCalculation
+ = packetIsUsableInJitterCalculation((bPacket->data()),
+ bPacket->dataSize());
+ struct timeval presentationTime; // computed by:
+ Boolean hasBeenSyncedUsingRTCP; // computed by:
+ receptionStatsDB()
+ .noteIncomingPacket(rtpSSRC, rtpSeqNo, rtpTimestamp,
+ timestampFrequency(),
+ usableInJitterCalculation, presentationTime,
+ hasBeenSyncedUsingRTCP, bPacket->dataSize());
+
+ // Fill in the rest of the packet descriptor, and store it:
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ bPacket->assignMiscParams(rtpSeqNo, rtpTimestamp, presentationTime,
+ hasBeenSyncedUsingRTCP, rtpMarkerBit,
+ timeNow);
+ if (!fReorderingBuffer->storePacket(bPacket)) break;
+
+ readSuccess = True;
+ } while (0);
+ if (!readSuccess) fReorderingBuffer->freePacket(bPacket);
+
+ doGetNextFrame1();
+ // If we didn't get proper data this time, we'll get another chance
+}
+
+
+////////// BufferedPacket and BufferedPacketFactory implementation /////
+
+#define MAX_PACKET_SIZE 65536
+
+BufferedPacket::BufferedPacket()
+ : fPacketSize(MAX_PACKET_SIZE),
+ fBuf(new unsigned char[MAX_PACKET_SIZE]),
+ fNextPacket(NULL) {
+}
+
+BufferedPacket::~BufferedPacket() {
+ delete fNextPacket;
+ delete[] fBuf;
+}
+
+void BufferedPacket::reset() {
+ fHead = fTail = 0;
+ fUseCount = 0;
+ fIsFirstPacket = False; // by default
+}
+
+// The following function has been deprecated:
+unsigned BufferedPacket
+::nextEnclosedFrameSize(unsigned char*& /*framePtr*/, unsigned dataSize) {
+ // By default, use the entire buffered data, even though it may consist
+ // of more than one frame, on the assumption that the client doesn't
+ // care. (This is more efficient than delivering a frame at a time)
+ return dataSize;
+}
+
+void BufferedPacket
+::getNextEnclosedFrameParameters(unsigned char*& framePtr, unsigned dataSize,
+ unsigned& frameSize,
+ unsigned& frameDurationInMicroseconds) {
+ // By default, use the entire buffered data, even though it may consist
+ // of more than one frame, on the assumption that the client doesn't
+ // care. (This is more efficient than delivering a frame at a time)
+
+ // For backwards-compatibility with existing uses of (the now deprecated)
+ // "nextEnclosedFrameSize()", call that function to implement this one:
+ frameSize = nextEnclosedFrameSize(framePtr, dataSize);
+
+ frameDurationInMicroseconds = 0; // by default. Subclasses should correct this.
+}
+
+Boolean BufferedPacket::fillInData(RTPInterface& rtpInterface, struct sockaddr_in& fromAddress,
+ Boolean& packetReadWasIncomplete) {
+ if (!packetReadWasIncomplete) reset();
+
+ unsigned const maxBytesToRead = bytesAvailable();
+ if (maxBytesToRead == 0) return False; // exceeded buffer size when reading over TCP
+
+ unsigned numBytesRead;
+ int tcpSocketNum; // not used
+ unsigned char tcpStreamChannelId; // not used
+ if (!rtpInterface.handleRead(&fBuf[fTail], maxBytesToRead,
+ numBytesRead, fromAddress,
+ tcpSocketNum, tcpStreamChannelId,
+ packetReadWasIncomplete)) {
+ return False;
+ }
+ fTail += numBytesRead;
+ return True;
+}
+
+void BufferedPacket
+::assignMiscParams(unsigned short rtpSeqNo, unsigned rtpTimestamp,
+ struct timeval presentationTime,
+ Boolean hasBeenSyncedUsingRTCP, Boolean rtpMarkerBit,
+ struct timeval timeReceived) {
+ fRTPSeqNo = rtpSeqNo;
+ fRTPTimestamp = rtpTimestamp;
+ fPresentationTime = presentationTime;
+ fHasBeenSyncedUsingRTCP = hasBeenSyncedUsingRTCP;
+ fRTPMarkerBit = rtpMarkerBit;
+ fTimeReceived = timeReceived;
+}
+
+void BufferedPacket::skip(unsigned numBytes) {
+ fHead += numBytes;
+ if (fHead > fTail) fHead = fTail;
+}
+
+void BufferedPacket::removePadding(unsigned numBytes) {
+ if (numBytes > fTail-fHead) numBytes = fTail-fHead;
+ fTail -= numBytes;
+}
+
+void BufferedPacket::appendData(unsigned char* newData, unsigned numBytes) {
+ if (numBytes > fPacketSize-fTail) numBytes = fPacketSize - fTail;
+ memmove(&fBuf[fTail], newData, numBytes);
+ fTail += numBytes;
+}
+
+void BufferedPacket::use(unsigned char* to, unsigned toSize,
+ unsigned& bytesUsed, unsigned& bytesTruncated,
+ unsigned short& rtpSeqNo, unsigned& rtpTimestamp,
+ struct timeval& presentationTime,
+ Boolean& hasBeenSyncedUsingRTCP,
+ Boolean& rtpMarkerBit) {
+ unsigned char* origFramePtr = &fBuf[fHead];
+ unsigned char* newFramePtr = origFramePtr; // may change in the call below
+ unsigned frameSize, frameDurationInMicroseconds;
+ getNextEnclosedFrameParameters(newFramePtr, fTail - fHead,
+ frameSize, frameDurationInMicroseconds);
+ if (frameSize > toSize) {
+ bytesTruncated += frameSize - toSize;
+ bytesUsed = toSize;
+ } else {
+ bytesTruncated = 0;
+ bytesUsed = frameSize;
+ }
+
+ memmove(to, newFramePtr, bytesUsed);
+ fHead += (newFramePtr - origFramePtr) + frameSize;
+ ++fUseCount;
+
+ rtpSeqNo = fRTPSeqNo;
+ rtpTimestamp = fRTPTimestamp;
+ presentationTime = fPresentationTime;
+ hasBeenSyncedUsingRTCP = fHasBeenSyncedUsingRTCP;
+ rtpMarkerBit = fRTPMarkerBit;
+
+ // Update "fPresentationTime" for the next enclosed frame (if any):
+ fPresentationTime.tv_usec += frameDurationInMicroseconds;
+ if (fPresentationTime.tv_usec >= 1000000) {
+ fPresentationTime.tv_sec += fPresentationTime.tv_usec/1000000;
+ fPresentationTime.tv_usec = fPresentationTime.tv_usec%1000000;
+ }
+}
+
+BufferedPacketFactory::BufferedPacketFactory() {
+}
+
+BufferedPacketFactory::~BufferedPacketFactory() {
+}
+
+BufferedPacket* BufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* /*ourSource*/) {
+ return new BufferedPacket;
+}
+
+
+////////// ReorderingPacketBuffer implementation //////////
+
+ReorderingPacketBuffer
+::ReorderingPacketBuffer(BufferedPacketFactory* packetFactory)
+ : fThresholdTime(100000) /* default reordering threshold: 100 ms */,
+ fHaveSeenFirstPacket(False), fHeadPacket(NULL), fTailPacket(NULL), fSavedPacket(NULL), fSavedPacketFree(True) {
+ fPacketFactory = (packetFactory == NULL)
+ ? (new BufferedPacketFactory)
+ : packetFactory;
+}
+
+ReorderingPacketBuffer::~ReorderingPacketBuffer() {
+ reset();
+ delete fPacketFactory;
+}
+
+void ReorderingPacketBuffer::reset() {
+ if (fSavedPacketFree) delete fSavedPacket; // because fSavedPacket is not in the list
+ delete fHeadPacket; // will also delete fSavedPacket if it's in the list
+ resetHaveSeenFirstPacket();
+ fHeadPacket = fTailPacket = fSavedPacket = NULL;
+}
+
+BufferedPacket* ReorderingPacketBuffer::getFreePacket(MultiFramedRTPSource* ourSource) {
+ if (fSavedPacket == NULL) { // we're being called for the first time
+ fSavedPacket = fPacketFactory->createNewPacket(ourSource);
+ fSavedPacketFree = True;
+ }
+
+ if (fSavedPacketFree == True) {
+ fSavedPacketFree = False;
+ return fSavedPacket;
+ } else {
+ return fPacketFactory->createNewPacket(ourSource);
+ }
+}
+
+Boolean ReorderingPacketBuffer::storePacket(BufferedPacket* bPacket) {
+ unsigned short rtpSeqNo = bPacket->rtpSeqNo();
+
+ if (!fHaveSeenFirstPacket) {
+ fNextExpectedSeqNo = rtpSeqNo; // initialization
+ bPacket->isFirstPacket() = True;
+ fHaveSeenFirstPacket = True;
+ }
+
+ // Ignore this packet if its sequence number is less than the one
+ // that we're looking for (in this case, it's been excessively delayed).
+ if (seqNumLT(rtpSeqNo, fNextExpectedSeqNo)) return False;
+
+ if (fTailPacket == NULL) {
+ // Common case: There are no packets in the queue; this will be the first one:
+ bPacket->nextPacket() = NULL;
+ fHeadPacket = fTailPacket = bPacket;
+ return True;
+ }
+
+ if (seqNumLT(fTailPacket->rtpSeqNo(), rtpSeqNo)) {
+ // The next-most common case: There are packets already in the queue; this packet arrived in order => put it at the tail:
+ bPacket->nextPacket() = NULL;
+ fTailPacket->nextPacket() = bPacket;
+ fTailPacket = bPacket;
+ return True;
+ }
+
+ if (rtpSeqNo == fTailPacket->rtpSeqNo()) {
+ // This is a duplicate packet - ignore it
+ return False;
+ }
+
+ // Rare case: This packet is out-of-order. Run through the list (from the head), to figure out where it belongs:
+ BufferedPacket* beforePtr = NULL;
+ BufferedPacket* afterPtr = fHeadPacket;
+ while (afterPtr != NULL) {
+ if (seqNumLT(rtpSeqNo, afterPtr->rtpSeqNo())) break; // it comes here
+ if (rtpSeqNo == afterPtr->rtpSeqNo()) {
+ // This is a duplicate packet - ignore it
+ return False;
+ }
+
+ beforePtr = afterPtr;
+ afterPtr = afterPtr->nextPacket();
+ }
+
+ // Link our new packet between "beforePtr" and "afterPtr":
+ bPacket->nextPacket() = afterPtr;
+ if (beforePtr == NULL) {
+ fHeadPacket = bPacket;
+ } else {
+ beforePtr->nextPacket() = bPacket;
+ }
+
+ return True;
+}
+
+void ReorderingPacketBuffer::releaseUsedPacket(BufferedPacket* packet) {
+ // ASSERT: packet == fHeadPacket
+ // ASSERT: fNextExpectedSeqNo == packet->rtpSeqNo()
+ ++fNextExpectedSeqNo; // because we're finished with this packet now
+
+ fHeadPacket = fHeadPacket->nextPacket();
+ if (!fHeadPacket) {
+ fTailPacket = NULL;
+ }
+ packet->nextPacket() = NULL;
+
+ freePacket(packet);
+}
+
+BufferedPacket* ReorderingPacketBuffer
+::getNextCompletedPacket(Boolean& packetLossPreceded) {
+ if (fHeadPacket == NULL) return NULL;
+
+ // Check whether the next packet we want is already at the head
+ // of the queue:
+ // ASSERT: fHeadPacket->rtpSeqNo() >= fNextExpectedSeqNo
+ if (fHeadPacket->rtpSeqNo() == fNextExpectedSeqNo) {
+ packetLossPreceded = fHeadPacket->isFirstPacket();
+ // (The very first packet is treated as if there was packet loss beforehand.)
+ return fHeadPacket;
+ }
+
+ // We're still waiting for our desired packet to arrive. However, if
+ // our time threshold has been exceeded, then forget it, and return
+ // the head packet instead:
+ Boolean timeThresholdHasBeenExceeded;
+ if (fThresholdTime == 0) {
+ timeThresholdHasBeenExceeded = True; // optimization
+ } else {
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ unsigned uSecondsSinceReceived
+ = (timeNow.tv_sec - fHeadPacket->timeReceived().tv_sec)*1000000
+ + (timeNow.tv_usec - fHeadPacket->timeReceived().tv_usec);
+ timeThresholdHasBeenExceeded = uSecondsSinceReceived > fThresholdTime;
+ }
+ if (timeThresholdHasBeenExceeded) {
+ fNextExpectedSeqNo = fHeadPacket->rtpSeqNo();
+ // we've given up on earlier packets now
+ packetLossPreceded = True;
+ return fHeadPacket;
+ }
+
+ // Otherwise, keep waiting for our desired packet to arrive:
+ return NULL;
+}
diff --git a/liveMedia/OggDemuxedTrack.cpp b/liveMedia/OggDemuxedTrack.cpp
new file mode 100644
index 0000000..f57071b
--- /dev/null
+++ b/liveMedia/OggDemuxedTrack.cpp
@@ -0,0 +1,43 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A media track, demultiplexed from an Ogg file
+// Implementation
+
+#include "OggDemuxedTrack.hh"
+#include "OggFile.hh"
+
+OggDemuxedTrack::OggDemuxedTrack(UsageEnvironment& env, unsigned trackNumber, OggDemux& sourceDemux)
+ : FramedSource(env),
+ fOurTrackNumber(trackNumber), fOurSourceDemux(sourceDemux),
+ fCurrentPageIsContinuation(False) {
+ fNextPresentationTime.tv_sec = 0; fNextPresentationTime.tv_usec = 0;
+}
+
+OggDemuxedTrack::~OggDemuxedTrack() {
+ fOurSourceDemux.removeTrack(fOurTrackNumber);
+}
+
+void OggDemuxedTrack::doGetNextFrame() {
+ fOurSourceDemux.continueReading();
+}
+
+char const* OggDemuxedTrack::MIMEtype() const {
+ OggTrack* track = fOurSourceDemux.fOurFile.lookup(fOurTrackNumber);
+ if (track == NULL) return "(unknown)"; // shouldn't happen
+ return track->mimeType;
+}
diff --git a/liveMedia/OggDemuxedTrack.hh b/liveMedia/OggDemuxedTrack.hh
new file mode 100644
index 0000000..836a299
--- /dev/null
+++ b/liveMedia/OggDemuxedTrack.hh
@@ -0,0 +1,58 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A media track, demultiplexed from an Ogg file
+// C++ header
+
+#ifndef _OGG_DEMUXED_TRACK_HH
+#define _OGG_DEMUXED_TRACK_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class OggDemux; // forward
+
+class OggDemuxedTrack: public FramedSource {
+private: // We are created only by a OggDemux (a friend)
+ friend class OggDemux;
+ OggDemuxedTrack(UsageEnvironment& env, unsigned trackNumber, OggDemux& sourceDemux);
+ virtual ~OggDemuxedTrack();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual char const* MIMEtype() const;
+
+private: // We are accessed only by OggDemux and by OggFileParser (a friend)
+ friend class OggFileParser;
+ unsigned char*& to() { return fTo; }
+ unsigned& maxSize() { return fMaxSize; }
+ unsigned& frameSize() { return fFrameSize; }
+ unsigned& numTruncatedBytes() { return fNumTruncatedBytes; }
+ struct timeval& presentationTime() { return fPresentationTime; }
+ unsigned& durationInMicroseconds() { return fDurationInMicroseconds; }
+ struct timeval& nextPresentationTime() { return fNextPresentationTime; }
+
+private:
+ unsigned fOurTrackNumber;
+ OggDemux& fOurSourceDemux;
+ Boolean fCurrentPageIsContinuation;
+ struct timeval fNextPresentationTime;
+};
+
+#endif
diff --git a/liveMedia/OggFile.cpp b/liveMedia/OggFile.cpp
new file mode 100644
index 0000000..3c7a8c3
--- /dev/null
+++ b/liveMedia/OggFile.cpp
@@ -0,0 +1,328 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class that encapsulates an Ogg file.
+// Implementation
+
+#include "OggFileParser.hh"
+#include "OggDemuxedTrack.hh"
+#include "ByteStreamFileSource.hh"
+#include "VorbisAudioRTPSink.hh"
+#include "SimpleRTPSink.hh"
+#include "TheoraVideoRTPSink.hh"
+
+////////// OggTrackTable definition /////////
+
+// For looking up and iterating over the file's tracks:
+
+class OggTrackTable {
+public:
+ OggTrackTable();
+ virtual ~OggTrackTable();
+
+ void add(OggTrack* newTrack);
+ OggTrack* lookup(u_int32_t trackNumber);
+
+ unsigned numTracks() const;
+
+private:
+ friend class OggTrackTableIterator;
+ HashTable* fTable;
+};
+
+
+////////// OggFile implementation //////////
+
+void OggFile::createNew(UsageEnvironment& env, char const* fileName,
+ onCreationFunc* onCreation, void* onCreationClientData) {
+ new OggFile(env, fileName, onCreation, onCreationClientData);
+}
+
+OggTrack* OggFile::lookup(u_int32_t trackNumber) {
+ return fTrackTable->lookup(trackNumber);
+}
+
+OggDemux* OggFile::newDemux() {
+ OggDemux* demux = new OggDemux(*this);
+ fDemuxesTable->Add((char const*)demux, demux);
+
+ return demux;
+}
+
+unsigned OggFile::numTracks() const {
+ return fTrackTable->numTracks();
+}
+
+FramedSource* OggFile
+::createSourceForStreaming(FramedSource* baseSource, u_int32_t trackNumber,
+ unsigned& estBitrate, unsigned& numFiltersInFrontOfTrack) {
+ if (baseSource == NULL) return NULL;
+
+ FramedSource* result = baseSource; // by default
+ numFiltersInFrontOfTrack = 0; // by default
+
+ // Look at the track's MIME type to set its estimated bitrate (for use by RTCP).
+ // (Later, try to be smarter about figuring out the bitrate.) #####
+ // Some MIME types also require adding a special 'framer' in front of the source.
+ OggTrack* track = lookup(trackNumber);
+ if (track != NULL) { // should always be true
+ estBitrate = track->estBitrate;
+ }
+
+ return result;
+}
+
+RTPSink* OggFile
+::createRTPSinkForTrackNumber(u_int32_t trackNumber, Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic) {
+ OggTrack* track = lookup(trackNumber);
+ if (track == NULL || track->mimeType == NULL) return NULL;
+
+ RTPSink* result = NULL; // default value for unknown media types
+
+ if (strcmp(track->mimeType, "audio/VORBIS") == 0) {
+ // For Vorbis audio, we use the special "identification", "comment", and "setup" headers
+ // that we read when we initially read the headers at the start of the file:
+ result = VorbisAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ track->samplingFrequency, track->numChannels,
+ track->vtoHdrs.header[0], track->vtoHdrs.headerSize[0],
+ track->vtoHdrs.header[1], track->vtoHdrs.headerSize[1],
+ track->vtoHdrs.header[2], track->vtoHdrs.headerSize[2]);
+ } else if (strcmp(track->mimeType, "audio/OPUS") == 0) {
+ result = SimpleRTPSink
+ ::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ 48000, "audio", "OPUS", 2, False/*only 1 Opus 'packet' in each RTP packet*/);
+ } else if (strcmp(track->mimeType, "video/THEORA") == 0) {
+ // For Theora video, we use the special "identification", "comment", and "setup" headers
+ // that we read when we initially read the headers at the start of the file:
+ result = TheoraVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ track->vtoHdrs.header[0], track->vtoHdrs.headerSize[0],
+ track->vtoHdrs.header[1], track->vtoHdrs.headerSize[1],
+ track->vtoHdrs.header[2], track->vtoHdrs.headerSize[2]);
+ }
+
+ return result;
+}
+
+
+OggFile::OggFile(UsageEnvironment& env, char const* fileName,
+ onCreationFunc* onCreation, void* onCreationClientData)
+ : Medium(env),
+ fFileName(strDup(fileName)),
+ fOnCreation(onCreation), fOnCreationClientData(onCreationClientData) {
+ fTrackTable = new OggTrackTable;
+ fDemuxesTable = HashTable::create(ONE_WORD_HASH_KEYS);
+
+ FramedSource* inputSource = ByteStreamFileSource::createNew(envir(), fileName);
+ if (inputSource == NULL) {
+ // The specified input file does not exist!
+ fParserForInitialization = NULL;
+ handleEndOfBosPageParsing(); // we have no file, and thus no tracks, but we still need to signal this
+ } else {
+ // Initialize ourselves by parsing the file's headers:
+ fParserForInitialization
+ = new OggFileParser(*this, inputSource, handleEndOfBosPageParsing, this);
+ }
+}
+
+OggFile::~OggFile() {
+ delete fParserForInitialization;
+
+ // Delete any outstanding "OggDemux"s, and the table for them:
+ OggDemux* demux;
+ while ((demux = (OggDemux*)fDemuxesTable->RemoveNext()) != NULL) {
+ delete demux;
+ }
+ delete fDemuxesTable;
+ delete fTrackTable;
+
+ delete[] (char*)fFileName;
+}
+
+void OggFile::handleEndOfBosPageParsing(void* clientData) {
+ ((OggFile*)clientData)->handleEndOfBosPageParsing();
+}
+
+void OggFile::handleEndOfBosPageParsing() {
+ // Delete our parser, because it's done its job now:
+ delete fParserForInitialization; fParserForInitialization = NULL;
+
+ // Finally, signal our caller that we've been created and initialized:
+ if (fOnCreation != NULL) (*fOnCreation)(this, fOnCreationClientData);
+}
+
+void OggFile::addTrack(OggTrack* newTrack) {
+ fTrackTable->add(newTrack);
+}
+
+void OggFile::removeDemux(OggDemux* demux) {
+ fDemuxesTable->Remove((char const*)demux);
+}
+
+
+////////// OggTrackTable implementation /////////
+
+OggTrackTable::OggTrackTable()
+ : fTable(HashTable::create(ONE_WORD_HASH_KEYS)) {
+}
+
+OggTrackTable::~OggTrackTable() {
+ // Remove and delete all of our "OggTrack" descriptors, and the hash table itself:
+ OggTrack* track;
+ while ((track = (OggTrack*)fTable->RemoveNext()) != NULL) {
+ delete track;
+ }
+ delete fTable;
+}
+
+void OggTrackTable::add(OggTrack* newTrack) {
+ OggTrack* existingTrack
+ = (OggTrack*)fTable->Add((char const*)newTrack->trackNumber, newTrack);
+ delete existingTrack; // if any
+}
+
+OggTrack* OggTrackTable::lookup(u_int32_t trackNumber) {
+ return (OggTrack*)fTable->Lookup((char const*)trackNumber);
+}
+
+unsigned OggTrackTable::numTracks() const { return fTable->numEntries(); }
+
+OggTrackTableIterator::OggTrackTableIterator(OggTrackTable& ourTable) {
+ fIter = HashTable::Iterator::create(*(ourTable.fTable));
+}
+
+OggTrackTableIterator::~OggTrackTableIterator() {
+ delete fIter;
+}
+
+OggTrack* OggTrackTableIterator::next() {
+ char const* key;
+ return (OggTrack*)fIter->next(key);
+}
+
+
+////////// OggTrack implementation //////////
+
+OggTrack::OggTrack()
+ : trackNumber(0), mimeType(NULL),
+ samplingFrequency(48000), numChannels(2), estBitrate(100) { // default settings
+ vtoHdrs.header[0] = vtoHdrs.header[1] = vtoHdrs.header[2] = NULL;
+ vtoHdrs.headerSize[0] = vtoHdrs.headerSize[1] = vtoHdrs.headerSize[2] = 0;
+
+ vtoHdrs.vorbis_mode_count = 0;
+ vtoHdrs.vorbis_mode_blockflag = NULL;
+}
+
+OggTrack::~OggTrack() {
+ delete[] vtoHdrs.header[0]; delete[] vtoHdrs.header[1]; delete[] vtoHdrs.header[2];
+ delete[] vtoHdrs.vorbis_mode_blockflag;
+}
+
+
+///////// OggDemux implementation /////////
+
+FramedSource* OggDemux::newDemuxedTrack(u_int32_t& resultTrackNumber) {
+ OggTrack* nextTrack;
+ do {
+ nextTrack = fIter->next();
+ } while (nextTrack != NULL && nextTrack->mimeType == NULL);
+
+ if (nextTrack == NULL) { // no more tracks
+ resultTrackNumber = 0;
+ return NULL;
+ }
+
+ resultTrackNumber = nextTrack->trackNumber;
+ FramedSource* trackSource = new OggDemuxedTrack(envir(), resultTrackNumber, *this);
+ fDemuxedTracksTable->Add((char const*)resultTrackNumber, trackSource);
+ return trackSource;
+}
+
+FramedSource* OggDemux::newDemuxedTrackByTrackNumber(unsigned trackNumber) {
+ if (trackNumber == 0) return NULL;
+
+ FramedSource* trackSource = new OggDemuxedTrack(envir(), trackNumber, *this);
+ fDemuxedTracksTable->Add((char const*)trackNumber, trackSource);
+ return trackSource;
+}
+
+OggDemuxedTrack* OggDemux::lookupDemuxedTrack(u_int32_t trackNumber) {
+ return (OggDemuxedTrack*)fDemuxedTracksTable->Lookup((char const*)trackNumber);
+}
+
+OggDemux::OggDemux(OggFile& ourFile)
+ : Medium(ourFile.envir()),
+ fOurFile(ourFile), fDemuxedTracksTable(HashTable::create(ONE_WORD_HASH_KEYS)),
+ fIter(new OggTrackTableIterator(*fOurFile.fTrackTable)) {
+ FramedSource* fileSource = ByteStreamFileSource::createNew(envir(), ourFile.fileName());
+ fOurParser = new OggFileParser(ourFile, fileSource, handleEndOfFile, this, this);
+}
+
+OggDemux::~OggDemux() {
+ // Begin by acting as if we've reached the end of the source file.
+ // This should cause all of our demuxed tracks to get closed.
+ handleEndOfFile();
+
+ // Then delete our table of "OggDemuxedTrack"s
+ // - but not the "OggDemuxedTrack"s themselves; that should have already happened:
+ delete fDemuxedTracksTable;
+
+ delete fIter;
+ delete fOurParser;
+ fOurFile.removeDemux(this);
+}
+
+void OggDemux::removeTrack(u_int32_t trackNumber) {
+ fDemuxedTracksTable->Remove((char const*)trackNumber);
+ if (fDemuxedTracksTable->numEntries() == 0) {
+ // We no longer have any demuxed tracks, so delete ourselves now:
+ delete this;
+ }
+}
+
+void OggDemux::continueReading() {
+ fOurParser->continueParsing();
+}
+
+void OggDemux::handleEndOfFile(void* clientData) {
+ ((OggDemux*)clientData)->handleEndOfFile();
+}
+
+void OggDemux::handleEndOfFile() {
+ // Iterate through all of our 'demuxed tracks', handling 'end of input' on each one.
+ // Hack: Because this can cause the hash table to get modified underneath us,
+ // we don't call the handlers until after we've first iterated through all of the tracks.
+ unsigned numTracks = fDemuxedTracksTable->numEntries();
+ if (numTracks == 0) return;
+ OggDemuxedTrack** tracks = new OggDemuxedTrack*[numTracks];
+
+ HashTable::Iterator* iter = HashTable::Iterator::create(*fDemuxedTracksTable);
+ unsigned i;
+ char const* trackNumber;
+
+ for (i = 0; i < numTracks; ++i) {
+ tracks[i] = (OggDemuxedTrack*)iter->next(trackNumber);
+ }
+ delete iter;
+
+ for (i = 0; i < numTracks; ++i) {
+ if (tracks[i] == NULL) continue; // sanity check; shouldn't happen
+ tracks[i]->handleClosure();
+ }
+
+ delete[] tracks;
+}
diff --git a/liveMedia/OggFileParser.cpp b/liveMedia/OggFileParser.cpp
new file mode 100644
index 0000000..7d02a80
--- /dev/null
+++ b/liveMedia/OggFileParser.cpp
@@ -0,0 +1,1032 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A parser for an Ogg file.
+// Implementation
+
+#include "OggFileParser.hh"
+#include "OggDemuxedTrack.hh"
+#include <GroupsockHelper.hh> // for "gettimeofday()
+
+PacketSizeTable::PacketSizeTable(unsigned number_page_segments)
+ : numCompletedPackets(0), totSizes(0), nextPacketNumToDeliver(0),
+ lastPacketIsIncomplete(False) {
+ size = new unsigned[number_page_segments];
+ for (unsigned i = 0; i < number_page_segments; ++i) size[i] = 0;
+}
+
+PacketSizeTable::~PacketSizeTable() {
+ delete[] size;
+}
+
+OggFileParser::OggFileParser(OggFile& ourFile, FramedSource* inputSource,
+ FramedSource::onCloseFunc* onEndFunc, void* onEndClientData,
+ OggDemux* ourDemux)
+ : StreamParser(inputSource, onEndFunc, onEndClientData, continueParsing, this),
+ fOurFile(ourFile), fInputSource(inputSource),
+ fOnEndFunc(onEndFunc), fOnEndClientData(onEndClientData),
+ fOurDemux(ourDemux), fNumUnfulfilledTracks(0),
+ fPacketSizeTable(NULL), fCurrentTrackNumber(0), fSavedPacket(NULL) {
+ if (ourDemux == NULL) {
+ // Initialization
+ fCurrentParseState = PARSING_START_OF_FILE;
+ continueParsing();
+ } else {
+ fCurrentParseState = PARSING_AND_DELIVERING_PAGES;
+ // In this case, parsing (of page data) doesn't start until a client starts reading from a track.
+ }
+}
+
+OggFileParser::~OggFileParser() {
+ delete[] fSavedPacket;
+ delete fPacketSizeTable;
+ Medium::close(fInputSource);
+}
+
+void OggFileParser::continueParsing(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime) {
+ ((OggFileParser*)clientData)->continueParsing();
+}
+
+void OggFileParser::continueParsing() {
+ if (fInputSource != NULL) {
+ if (fInputSource->isCurrentlyAwaitingData()) return;
+ // Our input source is currently being read. Wait until that read completes
+
+ if (!parse()) {
+ // We didn't complete the parsing, because we had to read more data from the source,
+ // or because we're waiting for another read from downstream.
+ // Once that happens, we'll get called again.
+ return;
+ }
+ }
+
+ // We successfully parsed the file. Call our 'done' function now:
+ if (fOnEndFunc != NULL) (*fOnEndFunc)(fOnEndClientData);
+}
+
+Boolean OggFileParser::parse() {
+ try {
+ while (1) {
+ switch (fCurrentParseState) {
+ case PARSING_START_OF_FILE: {
+ if (parseStartOfFile()) return True;
+ }
+ case PARSING_AND_DELIVERING_PAGES: {
+ parseAndDeliverPages();
+ }
+ case DELIVERING_PACKET_WITHIN_PAGE: {
+ if (deliverPacketWithinPage()) return False;
+ }
+ }
+ }
+ } catch (int /*e*/) {
+#ifdef DEBUG
+ fprintf(stderr, "OggFileParser::parse() EXCEPTION (This is normal behavior - *not* an error)\n");
+#endif
+ return False; // the parsing got interrupted
+ }
+}
+
+Boolean OggFileParser::parseStartOfFile() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing start of file\n");
+#endif
+ // Read and parse each 'page', until we see the first non-BOS page, or until we have
+ // collected all required headers for Vorbis, Theora, or Opus track(s) (if any).
+ u_int8_t header_type_flag;
+ do {
+ header_type_flag = parseInitialPage();
+ } while ((header_type_flag&0x02) != 0 || needHeaders());
+
+#ifdef DEBUG
+ fprintf(stderr, "Finished parsing start of file\n");
+#endif
+ return True;
+}
+
+static u_int32_t byteSwap(u_int32_t x) {
+ return (x<<24)|((x<<8)&0x00FF0000)|((x>>8)&0x0000FF00)|(x>>24);
+}
+
+u_int8_t OggFileParser::parseInitialPage() {
+ u_int8_t header_type_flag;
+ u_int32_t bitstream_serial_number;
+ parseStartOfPage(header_type_flag, bitstream_serial_number);
+
+ // If this is a BOS page, examine the first 8 bytes of the first 'packet', to see whether
+ // the track data type is one that we know how to stream:
+ OggTrack* track;
+ if ((header_type_flag&0x02) != 0) { // BOS
+ char const* mimeType = NULL; // if unknown
+ if (fPacketSizeTable != NULL && fPacketSizeTable->size[0] >= 8) { // sanity check
+ char buf[8];
+ testBytes((u_int8_t*)buf, 8);
+
+ if (strncmp(&buf[1], "vorbis", 6) == 0) {
+ mimeType = "audio/VORBIS";
+ ++fNumUnfulfilledTracks;
+ } else if (strncmp(buf, "OpusHead", 8) == 0) {
+ mimeType = "audio/OPUS";
+ ++fNumUnfulfilledTracks;
+ } else if (strncmp(&buf[1], "theora", 6) == 0) {
+ mimeType = "video/THEORA";
+ ++fNumUnfulfilledTracks;
+ }
+ }
+
+ // Add a new track descriptor for this track:
+ track = new OggTrack;
+ track->trackNumber = bitstream_serial_number;
+ track->mimeType = mimeType;
+ fOurFile.addTrack(track);
+ } else { // not a BOS page
+ // Because this is not a BOS page, the specified track should already have been seen:
+ track = fOurFile.lookup(bitstream_serial_number);
+ }
+
+ if (track != NULL) { // sanity check
+#ifdef DEBUG
+ fprintf(stderr, "This track's MIME type: %s\n",
+ track->mimeType == NULL ? "(unknown)" : track->mimeType);
+#endif
+ if (track->mimeType != NULL &&
+ (strcmp(track->mimeType, "audio/VORBIS") == 0 ||
+ strcmp(track->mimeType, "video/THEORA") == 0 ||
+ strcmp(track->mimeType, "audio/OPUS") == 0)) {
+ // Special-case handling of Vorbis, Theora, or Opus tracks:
+ // Make a copy of each packet, until we get the three special headers that we need:
+ Boolean isVorbis = strcmp(track->mimeType, "audio/VORBIS") == 0;
+ Boolean isTheora = strcmp(track->mimeType, "video/THEORA") == 0;
+
+ for (unsigned j = 0; j < fPacketSizeTable->numCompletedPackets && track->weNeedHeaders(); ++j) {
+ unsigned const packetSize = fPacketSizeTable->size[j];
+ if (packetSize == 0) continue; // sanity check
+
+ delete[] fSavedPacket/*if any*/; fSavedPacket = new u_int8_t[packetSize];
+ getBytes(fSavedPacket, packetSize);
+ fPacketSizeTable->totSizes -= packetSize;
+
+ // The start of the packet tells us whether its a header that we know about:
+ Boolean headerIsKnown = False;
+ unsigned index = 0;
+ if (isVorbis) {
+ u_int8_t const firstByte = fSavedPacket[0];
+
+ headerIsKnown = firstByte == 1 || firstByte == 3 || firstByte == 5;
+ index = (firstByte-1)/2; // 1, 3, or 5 => 0, 1, or 2
+ } else if (isTheora) {
+ u_int8_t const firstByte = fSavedPacket[0];
+
+ headerIsKnown = firstByte == 0x80 || firstByte == 0x81 || firstByte == 0x82;
+ index = firstByte &~0x80; // 0x80, 0x81, or 0x82 => 0, 1, or 2
+ } else { // Opus
+ if (strncmp((char const*)fSavedPacket, "OpusHead", 8) == 0) {
+ headerIsKnown = True;
+ index = 0; // "identification" header
+ } else if (strncmp((char const*)fSavedPacket, "OpusTags", 8) == 0) {
+ headerIsKnown = True;
+ index = 1; // "comment" header
+ }
+ }
+ if (headerIsKnown) {
+#ifdef DEBUG
+ char const* headerName[3] = { "identification", "comment", "setup" };
+ fprintf(stderr, "Saved %d-byte %s \"%s\" header\n", packetSize, track->mimeType,
+ headerName[index]);
+#endif
+ // This is a header, but first check it for validity:
+ if (!validateHeader(track, fSavedPacket, packetSize)) continue;
+
+ // Save this header (deleting any old header of the same type that we'd saved before)
+ delete[] track->vtoHdrs.header[index];
+ track->vtoHdrs.header[index] = fSavedPacket;
+ fSavedPacket = NULL;
+ track->vtoHdrs.headerSize[index] = packetSize;
+
+ if (!track->weNeedHeaders()) {
+ // We now have all of the needed Vorbis, Theora, or Opus headers for this track:
+ --fNumUnfulfilledTracks;
+ }
+ // Note: The above code won't work if a required header is fragmented over
+ // more than one 'page'. We assume that that won't ever happen...
+ }
+ }
+ }
+ }
+
+ // Skip over any remaining packet data bytes:
+ if (fPacketSizeTable->totSizes > 0) {
+#ifdef DEBUG
+ fprintf(stderr, "Skipping %d remaining packet data bytes\n", fPacketSizeTable->totSizes);
+#endif
+ skipBytes(fPacketSizeTable->totSizes);
+ }
+
+ return header_type_flag;
+}
+
+// A simple bit vector class for reading bits in little-endian order.
+// (We can't use our usual "BitVector" class, because that's big-endian.)
+class LEBitVector {
+public:
+ LEBitVector(u_int8_t const* p, unsigned numBytes)
+ : fPtr(p), fEnd(&p[numBytes]), fNumBitsRemainingInCurrentByte(8) {
+ }
+
+ u_int32_t getBits(unsigned numBits/*<=32*/) {
+ if (noMoreBits()) {
+ return 0;
+ } else if (numBits == fNumBitsRemainingInCurrentByte) {
+ u_int32_t result = (*fPtr++)>>(8-fNumBitsRemainingInCurrentByte);
+ fNumBitsRemainingInCurrentByte = 8;
+
+ return result;
+ } else if (numBits < fNumBitsRemainingInCurrentByte) {
+ u_int8_t mask = 0xFF>>(8-numBits);
+ u_int32_t result = ((*fPtr)>>(8-fNumBitsRemainingInCurrentByte)) & mask;
+ fNumBitsRemainingInCurrentByte -= numBits;
+
+ return result;
+ } else { // numBits > fNumBitsRemainingInCurrentByte
+ // Do two recursive calls to get the result:
+ unsigned nbr = fNumBitsRemainingInCurrentByte;
+ u_int32_t firstBits = getBits(nbr);
+ u_int32_t nextBits = getBits(numBits - nbr);
+
+ return (nextBits<<nbr) | firstBits;
+ }
+ }
+
+ void skipBits(unsigned numBits) {
+ while (numBits > 32) {
+ (void)getBits(32);
+ numBits -= 32;
+ }
+ (void)getBits(numBits);
+ }
+
+ unsigned numBitsRemaining() { return (fEnd-fPtr-1)*8 + fNumBitsRemainingInCurrentByte; }
+ Boolean noMoreBits() const { return fPtr >= fEnd; }
+
+private:
+ u_int8_t const* fPtr;
+ u_int8_t const* fEnd;
+ unsigned fNumBitsRemainingInCurrentByte; // 1..8
+};
+
+static unsigned ilog(int n) {
+ if (n < 0) return 0;
+
+ unsigned x = (unsigned)n;
+ unsigned result = 0;
+
+ while (x > 0) {
+ ++result;
+ x >>= 1;
+ }
+
+ return result;
+}
+
+static unsigned lookup1_values(unsigned codebook_entries, unsigned codebook_dimensions) {
+ // "the greatest integer value for which [return_value] to the power of [codebook_dimensions]
+ // is less than or equal to [codebook_entries]"
+ unsigned return_value = 0;
+ unsigned powerValue;
+
+ do {
+ ++return_value;
+ // Compute powerValue = return_value ** codebook_dimensions
+ if (return_value == 1) powerValue = 1; // optimization
+ else {
+ powerValue = 1;
+ for (unsigned i = 0; i < codebook_dimensions; ++i) {
+ powerValue *= return_value;
+ }
+ }
+ } while (powerValue <= codebook_entries);
+ return_value -= 1;
+
+ return return_value;
+}
+
+static Boolean parseVorbisSetup_codebook(LEBitVector& bv) {
+ if (bv.noMoreBits()) return False;
+
+ unsigned sync = bv.getBits(24);
+ if (sync != 0x564342) return False;
+ unsigned codebook_dimensions = bv.getBits(16);
+ unsigned codebook_entries = bv.getBits(24);
+ unsigned ordered = bv.getBits(1);
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\t\t\tcodebook_dimensions: %d; codebook_entries: %d, ordered: %d\n",
+ codebook_dimensions, codebook_entries, ordered);
+#endif
+ if (!ordered) {
+ unsigned sparse = bv.getBits(1);
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\t\t\t!ordered: sparse %d\n", sparse);
+#endif
+ for (unsigned i = 0; i < codebook_entries; ++i) {
+ unsigned codewordLength;
+
+ if (sparse) {
+ unsigned flag = bv.getBits(1);
+ if (flag) {
+ codewordLength = bv.getBits(5) + 1;
+ } else {
+ codewordLength = 0;
+ }
+ } else {
+ codewordLength = bv.getBits(5) + 1;
+ }
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\t\t\t\tcodeword length[%d]:\t%d\n", i, codewordLength);
+#else
+ codewordLength = codewordLength; // to prevent compiler warning
+#endif
+ }
+ } else { // ordered
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\t\t\tordered:\n");
+#endif
+ unsigned current_entry = 0;
+ unsigned current_length = bv.getBits(5) + 1;
+ do {
+ unsigned number = bv.getBits(ilog(codebook_entries - current_entry));
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\t\t\t\tcodeword length[%d..%d]:\t%d\n",
+ current_entry, current_entry + number - 1, current_length);
+#endif
+ current_entry += number;
+ if (current_entry > codebook_entries) {
+ fprintf(stderr, "Vorbis codebook parsing error: current_entry %d > codebook_entries %d!\n", current_entry, codebook_entries);
+ return False;
+ }
+ ++current_length;
+ } while (current_entry < codebook_entries);
+ }
+
+ unsigned codebook_lookup_type = bv.getBits(4);
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\t\t\tcodebook_lookup_type: %d\n", codebook_lookup_type);
+#endif
+ if (codebook_lookup_type > 2) {
+ fprintf(stderr, "Vorbis codebook parsing error: codebook_lookup_type %d!\n", codebook_lookup_type);
+ return False;
+ } else if (codebook_lookup_type > 0) { // 1 or 2
+ bv.skipBits(32+32); // "codebook_minimum_value" and "codebook_delta_value"
+ unsigned codebook_value_bits = bv.getBits(4) + 1;
+ bv.skipBits(1); // "codebook_lookup_p"
+ unsigned codebook_lookup_values;
+ if (codebook_lookup_type == 1) {
+ codebook_lookup_values = lookup1_values(codebook_entries, codebook_dimensions);
+ } else { // 2
+ codebook_lookup_values = codebook_entries*codebook_dimensions;
+ }
+
+ bv.skipBits(codebook_lookup_values*codebook_value_bits); // "codebook_multiplicands"
+ }
+
+ return True;
+}
+
+static Boolean parseVorbisSetup_codebooks(LEBitVector& bv) {
+ if (bv.noMoreBits()) return False;
+
+ unsigned vorbis_codebook_count = bv.getBits(8) + 1;
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\tCodebooks: vorbis_codebook_count: %d\n", vorbis_codebook_count);
+#endif
+ for (unsigned i = 0; i < vorbis_codebook_count; ++i) {
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\t\tCodebook %d:\n", i);
+#endif
+ if (!parseVorbisSetup_codebook(bv)) return False;
+ }
+
+ return True;
+}
+
+static Boolean parseVorbisSetup_timeDomainTransforms(LEBitVector& bv) {
+ if (bv.noMoreBits()) return False;
+
+ unsigned vorbis_time_count = bv.getBits(6) + 1;
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\tTime domain transforms: vorbis_time_count: %d\n", vorbis_time_count);
+#endif
+ for (unsigned i = 0; i < vorbis_time_count; ++i) {
+ unsigned val = bv.getBits(16);
+ if (val != 0) {
+ fprintf(stderr, "Vorbis Time domain transforms, read non-zero value %d\n", val);
+ return False;
+ }
+ }
+
+ return True;
+}
+
+static Boolean parseVorbisSetup_floors(LEBitVector& bv) {
+ if (bv.noMoreBits()) return False;
+
+ unsigned vorbis_floor_count = bv.getBits(6) + 1;
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\tFloors: vorbis_floor_count: %d\n", vorbis_floor_count);
+#endif
+ for (unsigned i = 0; i < vorbis_floor_count; ++i) {
+ unsigned floorType = bv.getBits(16);
+ if (floorType == 0) {
+ bv.skipBits(8+16+16+6+8);
+ unsigned floor0_number_of_books = bv.getBits(4) + 1;
+ bv.skipBits(floor0_number_of_books*8);
+ } else if (floorType == 1) {
+ unsigned floor1_partitions = bv.getBits(5);
+
+ unsigned* floor1_partition_class_list = new unsigned[floor1_partitions];
+ unsigned maximum_class = 0, j;
+ for (j = 0; j < floor1_partitions; ++j) {
+ floor1_partition_class_list[j] = bv.getBits(4);
+ if (floor1_partition_class_list[j] > maximum_class) maximum_class = floor1_partition_class_list[j];
+ }
+
+ unsigned* floor1_class_dimensions = new unsigned[maximum_class + 1];
+ for (j = 0; j <= maximum_class; ++j) {
+ floor1_class_dimensions[j] = bv.getBits(3) + 1;
+ unsigned floor1_class_subclasses = bv.getBits(2);
+ if (floor1_class_subclasses != 0) {
+ bv.skipBits(8); // "floor1_class_masterbooks[j]"
+ }
+
+ unsigned twoExp_floor1_class_subclasses = 1 << floor1_class_subclasses;
+ bv.skipBits(twoExp_floor1_class_subclasses*8); // "floor1_subclass_books[j][*]"
+ }
+
+ bv.skipBits(2); // "floor1_multiplier"
+ unsigned rangebits = bv.getBits(4);
+ for (j = 0; j < floor1_partitions; ++j) {
+ unsigned current_class_number = floor1_partition_class_list[j];
+ bv.skipBits(floor1_class_dimensions[current_class_number] * rangebits);
+ }
+
+ delete[] floor1_partition_class_list;
+ delete[] floor1_class_dimensions;
+ } else { // floorType > 1
+ fprintf(stderr, "Vorbis Floors, read bad floor type %d\n", floorType);
+ return False;
+ }
+ }
+
+ return True;
+}
+
+static Boolean parseVorbisSetup_residues(LEBitVector& bv) {
+ if (bv.noMoreBits()) return False;
+
+ unsigned vorbis_residue_count = bv.getBits(6) + 1;
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\tResidues: vorbis_residue_count: %d\n", vorbis_residue_count);
+#endif
+ for (unsigned i = 0; i < vorbis_residue_count; ++i) {
+ unsigned vorbis_residue_type = bv.getBits(16);
+ if (vorbis_residue_type > 2) {
+ fprintf(stderr, "Vorbis Residues, read bad vorbis_residue_type: %d\n", vorbis_residue_type);
+ return False;
+ } else {
+ bv.skipBits(24+24+24); // "residue_begin", "residue_end", "residue_partition_size"
+ unsigned residue_classifications = bv.getBits(6) + 1;
+ bv.skipBits(8); // "residue_classbook"
+
+ u_int8_t* residue_cascade = new u_int8_t[residue_classifications];
+ unsigned j;
+ for (j = 0; j < residue_classifications; ++j) {
+ u_int8_t high_bits = 0;
+ u_int8_t low_bits = bv.getBits(3);
+ unsigned bitflag = bv.getBits(1);
+ if (bitflag) {
+ high_bits = bv.getBits(5);
+ }
+
+ residue_cascade[j] = (high_bits<<3) | low_bits;
+ }
+
+ for (j = 0; j < residue_classifications; ++j) {
+ u_int8_t const cascade = residue_cascade[j];
+ u_int8_t mask = 0x80;
+ while (mask != 0) {
+ if ((cascade&mask) != 0) bv.skipBits(8); // "residue_books[j][*]"
+ mask >>= 1;
+ }
+ }
+
+ delete[] residue_cascade;
+ }
+ }
+
+ return True;
+}
+
+static Boolean parseVorbisSetup_mappings(LEBitVector& bv, unsigned audio_channels) {
+ if (bv.noMoreBits()) return False;
+
+ unsigned vorbis_mapping_count = bv.getBits(6) + 1;
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\tMappings: vorbis_mapping_count: %d\n", vorbis_mapping_count);
+#endif
+ for (unsigned i = 0; i < vorbis_mapping_count; ++i) {
+ unsigned vorbis_mapping_type = bv.getBits(16);
+ if (vorbis_mapping_type != 0) {
+ fprintf(stderr, "Vorbis Mappings, read bad vorbis_mapping_type: %d\n", vorbis_mapping_type);
+ return False;
+ }
+
+ unsigned vorbis_mapping_submaps = 1;
+ if (bv.getBits(1)) vorbis_mapping_submaps = bv.getBits(4) + 1;
+
+ if (bv.getBits(1)) { // "square polar channel mapping is in use"
+ unsigned vorbis_mapping_coupling_steps = bv.getBits(8) + 1;
+
+ for (unsigned j = 0; j < vorbis_mapping_coupling_steps; ++j) {
+ unsigned ilog_audio_channels_minus_1 = ilog(audio_channels - 1);
+ bv.skipBits(2*ilog_audio_channels_minus_1); // "vorbis_mapping_magnitude", "vorbis_mapping_angle"
+ }
+ }
+
+ unsigned reserved = bv.getBits(2);
+ if (reserved != 0) {
+ fprintf(stderr, "Vorbis Mappings, read bad 'reserved' field\n");
+ return False;
+ }
+
+ if (vorbis_mapping_submaps > 1) {
+ for (unsigned j = 0; j < audio_channels; ++j) {
+ unsigned vorbis_mapping_mux = bv.getBits(4);
+
+ fprintf(stderr, "\t\t\t\tvorbis_mapping_mux[%d]: %d\n", j, vorbis_mapping_mux);
+ if (vorbis_mapping_mux >= vorbis_mapping_submaps) {
+ fprintf(stderr, "Vorbis Mappings, read bad \"vorbis_mapping_mux\" %d (>= \"vorbis_mapping_submaps\" %d)\n", vorbis_mapping_mux, vorbis_mapping_submaps);
+ return False;
+ }
+ }
+ }
+
+ bv.skipBits(vorbis_mapping_submaps*(8+8+8)); // "the floor and residue numbers"
+ }
+
+ return True;
+}
+
+static Boolean parseVorbisSetup_modes(LEBitVector& bv, OggTrack* track) {
+ if (bv.noMoreBits()) return False;
+
+ unsigned vorbis_mode_count = bv.getBits(6) + 1;
+ unsigned ilog_vorbis_mode_count_minus_1 = ilog(vorbis_mode_count - 1);
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\tModes: vorbis_mode_count: %d (ilog(%d-1):%d)\n",
+ vorbis_mode_count, vorbis_mode_count, ilog_vorbis_mode_count_minus_1);
+#endif
+ track->vtoHdrs.vorbis_mode_count = vorbis_mode_count;
+ track->vtoHdrs.ilog_vorbis_mode_count_minus_1 = ilog_vorbis_mode_count_minus_1;
+ track->vtoHdrs.vorbis_mode_blockflag = new u_int8_t[vorbis_mode_count];
+
+ for (unsigned i = 0; i < vorbis_mode_count; ++i) {
+ track->vtoHdrs.vorbis_mode_blockflag[i] = (u_int8_t)bv.getBits(1);
+#ifdef DEBUG_SETUP_HEADER
+ fprintf(stderr, "\t\tMode %d: vorbis_mode_blockflag: %d\n", i, track->vtoHdrs.vorbis_mode_blockflag[i]);
+#endif
+ bv.skipBits(16+16+8); // "vorbis_mode_windowtype", "vorbis_mode_transformtype", "vorbis_mode_mapping"
+ }
+
+ return True;
+}
+
+static Boolean parseVorbisSetupHeader(OggTrack* track, u_int8_t const* p, unsigned headerSize) {
+ LEBitVector bv(p, headerSize);
+ do {
+ if (!parseVorbisSetup_codebooks(bv)) break;
+ if (!parseVorbisSetup_timeDomainTransforms(bv)) break;
+ if (!parseVorbisSetup_floors(bv)) break;
+ if (!parseVorbisSetup_residues(bv)) break;
+ if (!parseVorbisSetup_mappings(bv, track->numChannels)) break;
+ if (!parseVorbisSetup_modes(bv, track)) break;
+ unsigned framingFlag = bv.getBits(1);
+ if (framingFlag == 0) {
+ fprintf(stderr, "Vorbis \"setup\" header did not end with a 'framing flag'!\n");
+ break;
+ }
+
+ return True;
+ } while (0);
+
+ // An error occurred:
+ return False;
+}
+
+#ifdef DEBUG
+#define CHECK_PTR if (p >= pEnd) return False
+#define printComment(p, len) do { for (unsigned k = 0; k < len; ++k) { CHECK_PTR; fprintf(stderr, "%c", *p++); } } while (0)
+#endif
+
+static Boolean validateCommentHeader(u_int8_t const *p, unsigned headerSize,
+ unsigned isOpus = 0) {
+ if (headerSize < 15+isOpus) { // need 7+isOpus + 4(vendor_length) + 4(user_comment_list_length)
+ fprintf(stderr, "\"comment\" header is too short (%d bytes)\n", headerSize);
+ return False;
+ }
+
+#ifdef DEBUG
+ u_int8_t const* pEnd = &p[headerSize];
+ p += 7+isOpus;
+
+ u_int32_t vendor_length = (p[3]<<24)|(p[2]<<16)|(p[1]<<8)|p[0]; p += 4;
+ fprintf(stderr, "\tvendor_string:");
+ printComment(p, vendor_length);
+ fprintf(stderr, "\n");
+
+ u_int32_t user_comment_list_length = (p[3]<<24)|(p[2]<<16)|(p[1]<<8)|p[0]; p += 4;
+ for (unsigned i = 0; i < user_comment_list_length; ++i) {
+ CHECK_PTR; u_int32_t length = (p[3]<<24)|(p[2]<<16)|(p[1]<<8)|p[0]; p += 4;
+ fprintf(stderr, "\tuser_comment[%d]:", i);
+ printComment(p, length);
+ fprintf(stderr, "\n");
+ }
+#endif
+
+ return True;
+}
+
+static unsigned blocksizeFromExponent(unsigned exponent) {
+ unsigned result = 1;
+ for (unsigned i = 0; i < exponent; ++i) result = 2*result;
+ return result;
+}
+
+Boolean OggFileParser::validateHeader(OggTrack* track, u_int8_t const* p, unsigned headerSize) {
+ // Assert: headerSize >= 7 (because we've already checked "<packet_type>XXXXXX" or "OpusXXXX")
+ if (strcmp(track->mimeType, "audio/VORBIS") == 0) {
+ u_int8_t const firstByte = p[0];
+
+ if (firstByte == 1) { // "identification" header
+ if (headerSize < 30) {
+ fprintf(stderr, "Vorbis \"identification\" header is too short (%d bytes)\n", headerSize);
+ return False;
+ } else if ((p[29]&0x1) != 1) {
+ fprintf(stderr, "Vorbis \"identification\" header: 'framing_flag' is not set\n");
+ return False;
+ }
+
+ p += 7;
+ u_int32_t vorbis_version = (p[3]<<24)|(p[2]<<16)|(p[1]<<8)|p[0]; p += 4;
+ if (vorbis_version != 0) {
+ fprintf(stderr, "Vorbis \"identification\" header has a bad 'vorbis_version': 0x%08x\n", vorbis_version);
+ return False;
+ }
+
+ u_int8_t audio_channels = *p++;
+ if (audio_channels == 0) {
+ fprintf(stderr, "Vorbis \"identification\" header: 'audio_channels' is 0!\n");
+ return False;
+ }
+ track->numChannels = audio_channels;
+
+ u_int32_t audio_sample_rate = (p[3]<<24)|(p[2]<<16)|(p[1]<<8)|p[0]; p += 4;
+ if (audio_sample_rate == 0) {
+ fprintf(stderr, "Vorbis \"identification\" header: 'audio_sample_rate' is 0!\n");
+ return False;
+ }
+ track->samplingFrequency = audio_sample_rate;
+
+ p += 4; // skip over 'bitrate_maximum'
+ u_int32_t bitrate_nominal = (p[3]<<24)|(p[2]<<16)|(p[1]<<8)|p[0]; p += 4;
+ if (bitrate_nominal > 0) track->estBitrate = (bitrate_nominal+500)/1000; // round
+
+ p += 4; // skip over 'bitrate_maximum'
+
+ // Note the two 'block sizes' (samples per packet), and their durations in microseconds:
+ u_int8_t blocksizeBits = *p++;
+ unsigned& blocksize_0 = track->vtoHdrs.blocksize[0]; // alias
+ unsigned& blocksize_1 = track->vtoHdrs.blocksize[1]; // alias
+ blocksize_0 = blocksizeFromExponent(blocksizeBits&0x0F);
+ blocksize_1 = blocksizeFromExponent(blocksizeBits>>4);
+
+ double uSecsPerSample = 1000000.0/(track->samplingFrequency*2);
+ // Why the "2"? I don't know, but it seems to be necessary
+ track->vtoHdrs.uSecsPerPacket[0] = (unsigned)(uSecsPerSample*blocksize_0);
+ track->vtoHdrs.uSecsPerPacket[1] = (unsigned)(uSecsPerSample*blocksize_1);
+#ifdef DEBUG
+ fprintf(stderr, "\t%u Hz, %u-channel, %u kbps (est), block sizes: %u,%u (%u,%u us)\n",
+ track->samplingFrequency, track->numChannels, track->estBitrate,
+ blocksize_0, blocksize_1,
+ track->vtoHdrs.uSecsPerPacket[0], track->vtoHdrs.uSecsPerPacket[1]);
+#endif
+ // To be valid, "blocksize_0" must be <= "blocksize_1", and both must be in [64,8192]:
+ if (!(blocksize_0 <= blocksize_1 && blocksize_0 >= 64 && blocksize_1 <= 8192)) {
+ fprintf(stderr, "Invalid Vorbis \"blocksize_0\" (%d) and/or \"blocksize_1\" (%d)!\n",
+ blocksize_0, blocksize_1);
+ return False;
+ }
+ } else if (firstByte == 3) { // "comment" header
+ if (!validateCommentHeader(p, headerSize)) return False;
+ } else if (firstByte == 5) { // "setup" header
+ // Parse the "setup" header to get the values that we want:
+ // "vorbis_mode_count", and "vorbis_mode_blockflag" for each mode. Unfortunately these come
+ // near the end of the header, so we have to parse lots of other crap first.
+ p += 7;
+ if (!parseVorbisSetupHeader(track, p, headerSize)) {
+ fprintf(stderr, "Failed to parse Vorbis \"setup\" header!\n");
+ return False;
+ }
+ }
+ } else if (strcmp(track->mimeType, "video/THEORA") == 0) {
+ u_int8_t const firstByte = p[0];
+
+ if (firstByte == 0x80) { // "identification" header
+ if (headerSize < 42) {
+ fprintf(stderr, "Theora \"identification\" header is too short (%d bytes)\n", headerSize);
+ return False;
+ } else if ((p[41]&0x7) != 0) {
+ fprintf(stderr, "Theora \"identification\" header: 'res' bits are non-zero\n");
+ return False;
+ }
+
+ track->vtoHdrs.KFGSHIFT = ((p[40]&3)<<3) | (p[41]>>5);
+ u_int32_t FRN = (p[22]<<24) | (p[23]<<16) | (p[24]<<8) | p[25]; // Frame rate numerator
+ u_int32_t FRD = (p[26]<<24) | (p[27]<<16) | (p[28]<<8) | p[29]; // Frame rate numerator
+#ifdef DEBUG
+ fprintf(stderr, "\tKFGSHIFT %d, Frame rate numerator %d, Frame rate denominator %d\n", track->vtoHdrs.KFGSHIFT, FRN, FRD);
+#endif
+ if (FRN == 0 || FRD == 0) {
+ fprintf(stderr, "Theora \"identification\" header: Bad FRN and/or FRD values: %d, %d\n", FRN, FRD);
+ return False;
+ }
+ track->vtoHdrs.uSecsPerFrame = (unsigned)((1000000.0*FRD)/FRN);
+#ifdef DEBUG
+ fprintf(stderr, "\t\t=> %u microseconds per frame\n", track->vtoHdrs.uSecsPerFrame);
+#endif
+ } else if (firstByte == 0x81) { // "comment" header
+ if (!validateCommentHeader(p, headerSize)) return False;
+ } else if (firstByte == 0x82) { // "setup" header
+ // We don't care about the contents of the Theora "setup" header; just assume it's valid
+ }
+ } else { // Opus audio
+ if (strncmp((char const*)p, "OpusHead", 8) == 0) { // "identification" header
+ // Just check the size, and the 'major' number of the version byte:
+ if (headerSize < 19 || (p[8]&0xF0) != 0) return False;
+ } else { // comment header
+ if (!validateCommentHeader(p, headerSize, 1/*isOpus*/)) return False;
+ }
+ }
+
+ return True;
+}
+
+void OggFileParser::parseAndDeliverPages() {
+#ifdef DEBUG
+ fprintf(stderr, "parsing and delivering data\n");
+#endif
+ while (parseAndDeliverPage()) {}
+}
+
+Boolean OggFileParser::parseAndDeliverPage() {
+ u_int8_t header_type_flag;
+ u_int32_t bitstream_serial_number;
+ parseStartOfPage(header_type_flag, bitstream_serial_number);
+
+ OggDemuxedTrack* demuxedTrack = fOurDemux->lookupDemuxedTrack(bitstream_serial_number);
+ if (demuxedTrack == NULL) { // this track is not being read
+#ifdef DEBUG
+ fprintf(stderr, "\tIgnoring page from unread track; skipping %d remaining packet data bytes\n",
+ fPacketSizeTable->totSizes);
+#endif
+ skipBytes(fPacketSizeTable->totSizes);
+ return True;
+ } else if (fPacketSizeTable->totSizes == 0) {
+ // This page is empty (has no packets). Skip it and continue
+#ifdef DEBUG
+ fprintf(stderr, "\t[track: %s] Skipping empty page\n", demuxedTrack->MIMEtype());
+#endif
+ return True;
+ }
+
+ // Start delivering packets next:
+ demuxedTrack->fCurrentPageIsContinuation = (header_type_flag&0x01) != 0;
+ fCurrentTrackNumber = bitstream_serial_number;
+ fCurrentParseState = DELIVERING_PACKET_WITHIN_PAGE;
+ saveParserState();
+ return False;
+}
+
+Boolean OggFileParser::deliverPacketWithinPage() {
+ OggDemuxedTrack* demuxedTrack = fOurDemux->lookupDemuxedTrack(fCurrentTrackNumber);
+ if (demuxedTrack == NULL) return False; // should not happen
+
+ unsigned packetNum = fPacketSizeTable->nextPacketNumToDeliver;
+ unsigned packetSize = fPacketSizeTable->size[packetNum];
+
+ if (!demuxedTrack->isCurrentlyAwaitingData()) {
+ // Someone has been reading this stream, but isn't right now.
+ // We can't deliver this frame until he asks for it, so punt for now.
+ // The next time he asks for a frame, he'll get it.
+#ifdef DEBUG
+ fprintf(stderr, "\t[track: %s] Deferring delivery of packet %d (%d bytes%s)\n",
+ demuxedTrack->MIMEtype(), packetNum, packetSize,
+ packetNum == fPacketSizeTable->numCompletedPackets ? " (incomplete)" : "");
+#endif
+ return True;
+ }
+
+ // Deliver the next packet:
+#ifdef DEBUG
+ fprintf(stderr, "\t[track: %s] Delivering packet %d (%d bytes%s)\n", demuxedTrack->MIMEtype(),
+ packetNum, packetSize,
+ packetNum == fPacketSizeTable->numCompletedPackets ? " (incomplete)" : "");
+#endif
+ unsigned numBytesDelivered
+ = packetSize < demuxedTrack->maxSize() ? packetSize : demuxedTrack->maxSize();
+ getBytes(demuxedTrack->to(), numBytesDelivered);
+ u_int8_t firstByte = numBytesDelivered > 0 ? demuxedTrack->to()[0] : 0x00;
+ u_int8_t secondByte = numBytesDelivered > 1 ? demuxedTrack->to()[1] : 0x00;
+ demuxedTrack->to() += numBytesDelivered;
+
+ if (demuxedTrack->fCurrentPageIsContinuation) { // the previous page's read was incomplete
+ demuxedTrack->frameSize() += numBytesDelivered;
+ } else {
+ // This is the first delivery for this "doGetNextFrame()" call.
+ demuxedTrack->frameSize() = numBytesDelivered;
+ }
+ if (packetSize > demuxedTrack->maxSize()) {
+ demuxedTrack->numTruncatedBytes() += packetSize - demuxedTrack->maxSize();
+ }
+ demuxedTrack->maxSize() -= numBytesDelivered;
+
+ // Figure out the duration and presentation time of this frame.
+ unsigned durationInMicroseconds;
+ OggTrack* track = fOurFile.lookup(demuxedTrack->fOurTrackNumber);
+
+ if (strcmp(track->mimeType, "audio/VORBIS") == 0) {
+ if ((firstByte&0x01) != 0) { // This is a header packet
+ durationInMicroseconds = 0;
+ } else { // This is a data packet.
+ // Parse the first byte to figure out its duration.
+ // Extract the next "track->vtoHdrs.ilog_vorbis_mode_count_minus_1" bits of the first byte:
+ u_int8_t const mask = 0xFE<<(track->vtoHdrs.ilog_vorbis_mode_count_minus_1);
+ u_int8_t const modeNumber = (firstByte&~mask)>>1;
+ if (modeNumber >= track->vtoHdrs.vorbis_mode_count) {
+ fprintf(stderr, "Error: Bad mode number %d (>= vorbis_mode_count %d) in Vorbis packet!\n",
+ modeNumber, track->vtoHdrs.vorbis_mode_count);
+ durationInMicroseconds = 0;
+ } else {
+ unsigned blockNumber = track->vtoHdrs.vorbis_mode_blockflag[modeNumber];
+ durationInMicroseconds = track->vtoHdrs.uSecsPerPacket[blockNumber];
+ }
+ }
+ } else if (strcmp(track->mimeType, "video/THEORA") == 0) {
+ if ((firstByte&0x80) != 0) { // This is a header packet
+ durationInMicroseconds = 0;
+ } else { // This is a data packet.
+ durationInMicroseconds = track->vtoHdrs.uSecsPerFrame;
+ }
+ } else { // "audio/OPUS"
+ if (firstByte == 0x4F/*'O'*/ && secondByte == 0x70/*'p*/) { // This is a header packet
+ durationInMicroseconds = 0;
+ } else { // This is a data packet.
+ // Parse the first byte to figure out the duration of each frame, and then (if necessary)
+ // parse the second byte to figure out how many frames are in this packet:
+ u_int8_t config = firstByte >> 3;
+ u_int8_t c = firstByte & 0x03;
+ unsigned const configDuration[32] = { // in microseconds
+ 10000, 20000, 40000, 60000, // config 0..3
+ 10000, 20000, 40000, 60000, // config 4..7
+ 10000, 20000, 40000, 60000, // config 8..11
+ 10000, 20000, // config 12..13
+ 10000, 20000, // config 14..15
+ 2500, 5000, 10000, 20000, // config 16..19
+ 2500, 5000, 10000, 20000, // config 20..23
+ 2500, 5000, 10000, 20000, // config 24..27
+ 2500, 5000, 10000, 20000 // config 28..31
+ };
+ unsigned const numFramesInPacket = c == 0 ? 1 : c == 3 ? (secondByte&0x3F) : 2;
+ durationInMicroseconds = numFramesInPacket*configDuration[config];
+ }
+ }
+
+ if (demuxedTrack->nextPresentationTime().tv_sec == 0 && demuxedTrack->nextPresentationTime().tv_usec == 0) {
+ // This is the first delivery. Initialize "demuxedTrack->nextPresentationTime()":
+ gettimeofday(&demuxedTrack->nextPresentationTime(), NULL);
+ }
+ demuxedTrack->presentationTime() = demuxedTrack->nextPresentationTime();
+ demuxedTrack->durationInMicroseconds() = durationInMicroseconds;
+
+ demuxedTrack->nextPresentationTime().tv_usec += durationInMicroseconds;
+ while (demuxedTrack->nextPresentationTime().tv_usec >= 1000000) {
+ ++demuxedTrack->nextPresentationTime().tv_sec;
+ demuxedTrack->nextPresentationTime().tv_usec -= 1000000;
+ }
+ saveParserState();
+
+ // And check whether there's a next packet in this page:
+ if (packetNum == fPacketSizeTable->numCompletedPackets) {
+ // This delivery was for an incomplete packet, at the end of the page.
+ // Return without completing delivery:
+ fCurrentParseState = PARSING_AND_DELIVERING_PAGES;
+ return False;
+ }
+
+ if (packetNum < fPacketSizeTable->numCompletedPackets-1
+ || fPacketSizeTable->lastPacketIsIncomplete) {
+ // There is at least one more packet (possibly incomplete) left in this packet.
+ // Deliver it next:
+ ++fPacketSizeTable->nextPacketNumToDeliver;
+ } else {
+ // Start parsing a new page next:
+ fCurrentParseState = PARSING_AND_DELIVERING_PAGES;
+ }
+
+ FramedSource::afterGetting(demuxedTrack); // completes delivery
+ return True;
+}
+
+void OggFileParser::parseStartOfPage(u_int8_t& header_type_flag,
+ u_int32_t& bitstream_serial_number) {
+ saveParserState();
+ // First, make sure we start with the 'capture_pattern': 0x4F676753 ('OggS'):
+ while (test4Bytes() != 0x4F676753) {
+ skipBytes(1);
+ saveParserState(); // ensures forward progress through the file
+ }
+ skipBytes(4);
+#ifdef DEBUG
+ fprintf(stderr, "\nSaw Ogg page header:\n");
+#endif
+
+ u_int8_t stream_structure_version = get1Byte();
+ if (stream_structure_version != 0) {
+ fprintf(stderr, "Saw page with unknown Ogg file version number: 0x%02x\n", stream_structure_version);
+ }
+
+ header_type_flag = get1Byte();
+#ifdef DEBUG
+ fprintf(stderr, "\theader_type_flag: 0x%02x (", header_type_flag);
+ if (header_type_flag&0x01) fprintf(stderr, "continuation ");
+ if (header_type_flag&0x02) fprintf(stderr, "bos ");
+ if (header_type_flag&0x04) fprintf(stderr, "eos ");
+ fprintf(stderr, ")\n");
+#endif
+
+ u_int32_t granule_position1 = byteSwap(get4Bytes());
+ u_int32_t granule_position2 = byteSwap(get4Bytes());
+ bitstream_serial_number = byteSwap(get4Bytes());
+ u_int32_t page_sequence_number = byteSwap(get4Bytes());
+ u_int32_t CRC_checksum = byteSwap(get4Bytes());
+ u_int8_t number_page_segments = get1Byte();
+#ifdef DEBUG
+ fprintf(stderr, "\tgranule_position 0x%08x%08x, bitstream_serial_number 0x%08x, page_sequence_number 0x%08x, CRC_checksum 0x%08x, number_page_segments %d\n", granule_position2, granule_position1, bitstream_serial_number, page_sequence_number, CRC_checksum, number_page_segments);
+#else
+ // Dummy statements to prevent 'unused variable' compiler warnings:
+#define DUMMY_STATEMENT(x) do {x = x;} while (0)
+ DUMMY_STATEMENT(granule_position1);
+ DUMMY_STATEMENT(granule_position2);
+ DUMMY_STATEMENT(page_sequence_number);
+ DUMMY_STATEMENT(CRC_checksum);
+#endif
+
+ // Look at the "segment_table" to count the sizes of the packets in this page:
+ delete fPacketSizeTable/*if any*/; fPacketSizeTable = new PacketSizeTable(number_page_segments);
+ u_int8_t lacing_value = 0;
+#ifdef DEBUG
+ fprintf(stderr, "\tsegment_table\n");
+#endif
+ for (unsigned i = 0; i < number_page_segments; ++i) {
+ lacing_value = get1Byte();
+#ifdef DEBUG
+ fprintf(stderr, "\t\t%d:\t%d", i, lacing_value);
+#endif
+ fPacketSizeTable->totSizes += lacing_value;
+ fPacketSizeTable->size[fPacketSizeTable->numCompletedPackets] += lacing_value;
+ if (lacing_value < 255) {
+ // This completes a packet:
+#ifdef DEBUG
+ fprintf(stderr, " (->%d)", fPacketSizeTable->size[fPacketSizeTable->numCompletedPackets]);
+#endif
+ ++fPacketSizeTable->numCompletedPackets;
+ }
+#ifdef DEBUG
+ fprintf(stderr, "\n");
+#endif
+ }
+
+ fPacketSizeTable->lastPacketIsIncomplete = lacing_value == 255;
+}
diff --git a/liveMedia/OggFileParser.hh b/liveMedia/OggFileParser.hh
new file mode 100644
index 0000000..f25e97f
--- /dev/null
+++ b/liveMedia/OggFileParser.hh
@@ -0,0 +1,91 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A parser for an Ogg file
+// C++ header
+
+#ifndef _OGG_FILE_PARSER_HH
+
+#ifndef _STREAM_PARSER_HH
+#include "StreamParser.hh"
+#endif
+#ifndef _OGG_FILE_HH
+#include "OggFile.hh"
+#endif
+
+// An enum representing the current state of the parser:
+enum OggParseState {
+ PARSING_START_OF_FILE,
+ PARSING_AND_DELIVERING_PAGES,
+ DELIVERING_PACKET_WITHIN_PAGE
+};
+
+// A structure that counts the sizes of 'packets' given by each page's "segment_table":
+class PacketSizeTable {
+public:
+ PacketSizeTable(unsigned number_page_segments);
+ ~PacketSizeTable();
+
+ unsigned numCompletedPackets; // will be <= "number_page_segments"
+ unsigned* size; // an array of sizes of each of the packets
+ unsigned totSizes;
+ unsigned nextPacketNumToDeliver;
+ Boolean lastPacketIsIncomplete; // iff the last segment's 'lacing' was 255
+};
+
+class OggFileParser: public StreamParser {
+public:
+ OggFileParser(OggFile& ourFile, FramedSource* inputSource,
+ FramedSource::onCloseFunc* onEndFunc, void* onEndClientData,
+ OggDemux* ourDemux = NULL);
+ virtual ~OggFileParser();
+
+ // StreamParser 'client continue' function:
+ static void continueParsing(void* clientData, unsigned char* ptr, unsigned size, struct timeval presentationTime);
+ void continueParsing();
+
+private:
+ Boolean needHeaders() { return fNumUnfulfilledTracks > 0; }
+
+ // Parsing functions:
+ Boolean parse(); // returns True iff we have finished parsing all BOS pages (on initialization)
+
+ Boolean parseStartOfFile();
+ u_int8_t parseInitialPage(); // returns the 'header_type_flag' byte
+ void parseAndDeliverPages();
+ Boolean parseAndDeliverPage();
+ Boolean deliverPacketWithinPage();
+ void parseStartOfPage(u_int8_t& header_type_flag, u_int32_t& bitstream_serial_number);
+
+ Boolean validateHeader(OggTrack* track, u_int8_t const* p, unsigned headerSize);
+
+private:
+ // General state for parsing:
+ OggFile& fOurFile;
+ FramedSource* fInputSource;
+ FramedSource::onCloseFunc* fOnEndFunc;
+ void* fOnEndClientData;
+ OggDemux* fOurDemux;
+ OggParseState fCurrentParseState;
+
+ unsigned fNumUnfulfilledTracks;
+ PacketSizeTable* fPacketSizeTable;
+ u_int32_t fCurrentTrackNumber;
+ u_int8_t* fSavedPacket; // used to temporarily save a copy of a 'packet' from a page
+};
+
+#endif
diff --git a/liveMedia/OggFileServerDemux.cpp b/liveMedia/OggFileServerDemux.cpp
new file mode 100644
index 0000000..a303893
--- /dev/null
+++ b/liveMedia/OggFileServerDemux.cpp
@@ -0,0 +1,109 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A server demultiplexor for a Ogg file
+// Implementation
+
+#include "OggFileServerDemux.hh"
+#include "OggFileServerMediaSubsession.hh"
+
+void OggFileServerDemux
+::createNew(UsageEnvironment& env, char const* fileName,
+ onCreationFunc* onCreation, void* onCreationClientData) {
+ (void)new OggFileServerDemux(env, fileName,
+ onCreation, onCreationClientData);
+}
+
+ServerMediaSubsession* OggFileServerDemux::newServerMediaSubsession() {
+ u_int32_t dummyResultTrackNumber;
+ return newServerMediaSubsession(dummyResultTrackNumber);
+}
+
+ServerMediaSubsession* OggFileServerDemux
+::newServerMediaSubsession(u_int32_t& resultTrackNumber) {
+ resultTrackNumber = 0;
+
+ OggTrack* nextTrack = fIter->next();
+ if (nextTrack == NULL) return NULL;
+
+ return newServerMediaSubsessionByTrackNumber(nextTrack->trackNumber);
+}
+
+ServerMediaSubsession* OggFileServerDemux
+::newServerMediaSubsessionByTrackNumber(u_int32_t trackNumber) {
+ OggTrack* track = fOurOggFile->lookup(trackNumber);
+ if (track == NULL) return NULL;
+
+ ServerMediaSubsession* result = OggFileServerMediaSubsession::createNew(*this, track);
+ if (result != NULL) {
+#ifdef DEBUG
+ fprintf(stderr, "Created 'ServerMediaSubsession' object for track #%d: (%s)\n", track->trackNumber, track->mimeType);
+#endif
+ }
+
+ return result;
+}
+
+FramedSource* OggFileServerDemux::newDemuxedTrack(unsigned clientSessionId, u_int32_t trackNumber) {
+ OggDemux* demuxToUse = NULL;
+
+ if (clientSessionId != 0 && clientSessionId == fLastClientSessionId) {
+ demuxToUse = fLastCreatedDemux; // use the same demultiplexor as before
+ // Note: This code relies upon the fact that the creation of streams for different
+ // client sessions do not overlap - so all demuxed tracks are created for one "OggDemux" at a time.
+ // Also, the "clientSessionId != 0" test is a hack, because 'session 0' is special; its audio and video streams
+ // are created and destroyed one-at-a-time, rather than both streams being
+ // created, and then (later) both streams being destroyed (as is the case
+ // for other ('real') session ids). Because of this, a separate demultiplexor is used for each 'session 0' track.
+ }
+
+ if (demuxToUse == NULL) demuxToUse = fOurOggFile->newDemux();
+
+ fLastClientSessionId = clientSessionId;
+ fLastCreatedDemux = demuxToUse;
+
+ return demuxToUse->newDemuxedTrackByTrackNumber(trackNumber);
+}
+
+OggFileServerDemux
+::OggFileServerDemux(UsageEnvironment& env, char const* fileName,
+ onCreationFunc* onCreation, void* onCreationClientData)
+ : Medium(env),
+ fFileName(fileName), fOnCreation(onCreation), fOnCreationClientData(onCreationClientData),
+ fIter(NULL/*until the OggFile is created*/),
+ fLastClientSessionId(0), fLastCreatedDemux(NULL) {
+ OggFile::createNew(env, fileName, onOggFileCreation, this);
+}
+
+OggFileServerDemux::~OggFileServerDemux() {
+ Medium::close(fOurOggFile);
+
+ delete fIter;
+}
+
+void OggFileServerDemux::onOggFileCreation(OggFile* newFile, void* clientData) {
+ ((OggFileServerDemux*)clientData)->onOggFileCreation(newFile);
+}
+
+void OggFileServerDemux::onOggFileCreation(OggFile* newFile) {
+ fOurOggFile = newFile;
+
+ fIter = new OggTrackTableIterator(fOurOggFile->trackTable());
+
+ // Now, call our own creation notification function:
+ if (fOnCreation != NULL) (*fOnCreation)(this, fOnCreationClientData);
+}
diff --git a/liveMedia/OggFileServerMediaSubsession.cpp b/liveMedia/OggFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..c5a0f98
--- /dev/null
+++ b/liveMedia/OggFileServerMediaSubsession.cpp
@@ -0,0 +1,54 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a track within an Ogg file.
+// Implementation
+
+#include "OggFileServerMediaSubsession.hh"
+#include "OggDemuxedTrack.hh"
+#include "FramedFilter.hh"
+
+OggFileServerMediaSubsession* OggFileServerMediaSubsession
+::createNew(OggFileServerDemux& demux, OggTrack* track) {
+ return new OggFileServerMediaSubsession(demux, track);
+}
+
+OggFileServerMediaSubsession
+::OggFileServerMediaSubsession(OggFileServerDemux& demux, OggTrack* track)
+ : FileServerMediaSubsession(demux.envir(), demux.fileName(), False),
+ fOurDemux(demux), fTrack(track), fNumFiltersInFrontOfTrack(0) {
+}
+
+OggFileServerMediaSubsession::~OggFileServerMediaSubsession() {
+}
+
+FramedSource* OggFileServerMediaSubsession
+::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) {
+ FramedSource* baseSource = fOurDemux.newDemuxedTrack(clientSessionId, fTrack->trackNumber);
+ if (baseSource == NULL) return NULL;
+
+ return fOurDemux.ourOggFile()
+ ->createSourceForStreaming(baseSource, fTrack->trackNumber,
+ estBitrate, fNumFiltersInFrontOfTrack);
+}
+
+RTPSink* OggFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) {
+ return fOurDemux.ourOggFile()
+ ->createRTPSinkForTrackNumber(fTrack->trackNumber, rtpGroupsock, rtpPayloadTypeIfDynamic);
+}
diff --git a/liveMedia/OggFileServerMediaSubsession.hh b/liveMedia/OggFileServerMediaSubsession.hh
new file mode 100644
index 0000000..27741c9
--- /dev/null
+++ b/liveMedia/OggFileServerMediaSubsession.hh
@@ -0,0 +1,53 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a track within an Ogg file.
+// C++ header
+
+#ifndef _OGG_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _OGG_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+#ifndef _OGG_FILE_SERVER_DEMUX_HH
+#include "OggFileServerDemux.hh"
+#endif
+
+class OggFileServerMediaSubsession: public FileServerMediaSubsession {
+public:
+ static OggFileServerMediaSubsession*
+ createNew(OggFileServerDemux& demux, OggTrack* track);
+
+protected:
+ OggFileServerMediaSubsession(OggFileServerDemux& demux, OggTrack* track);
+ // called only by createNew(), or by subclass constructors
+ virtual ~OggFileServerMediaSubsession();
+
+protected: // redefined virtual functions
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource);
+
+protected:
+ OggFileServerDemux& fOurDemux;
+ OggTrack* fTrack;
+ unsigned fNumFiltersInFrontOfTrack;
+};
+
+#endif
diff --git a/liveMedia/OggFileSink.cpp b/liveMedia/OggFileSink.cpp
new file mode 100644
index 0000000..dbfb1af
--- /dev/null
+++ b/liveMedia/OggFileSink.cpp
@@ -0,0 +1,274 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// 'Ogg' File Sink (recording a single media track only)
+// Implementation
+
+#include "OggFileSink.hh"
+#include "OutputFile.hh"
+#include "VorbisAudioRTPSource.hh" // for "parseVorbisOrTheoraConfigStr()"
+#include "MPEG2TransportStreamMultiplexor.hh" // for calculateCRC()
+#include "FramedSource.hh"
+
+OggFileSink* OggFileSink
+::createNew(UsageEnvironment& env, char const* fileName,
+ unsigned samplingFrequency, char const* configStr,
+ unsigned bufferSize, Boolean oneFilePerFrame) {
+ do {
+ FILE* fid;
+ char const* perFrameFileNamePrefix;
+ if (oneFilePerFrame) {
+ // Create the fid for each frame
+ fid = NULL;
+ perFrameFileNamePrefix = fileName;
+ } else {
+ // Normal case: create the fid once
+ fid = OpenOutputFile(env, fileName);
+ if (fid == NULL) break;
+ perFrameFileNamePrefix = NULL;
+ }
+
+ return new OggFileSink(env, fid, samplingFrequency, configStr, bufferSize, perFrameFileNamePrefix);
+ } while (0);
+
+ return NULL;
+}
+
+OggFileSink::OggFileSink(UsageEnvironment& env, FILE* fid,
+ unsigned samplingFrequency, char const* configStr,
+ unsigned bufferSize, char const* perFrameFileNamePrefix)
+ : FileSink(env, fid, bufferSize, perFrameFileNamePrefix),
+ fSamplingFrequency(samplingFrequency), fConfigStr(strDup(configStr)),
+ fHaveWrittenFirstFrame(False), fHaveSeenEOF(False),
+ fGranulePosition(0), fGranulePositionAdjustment(0), fPageSequenceNumber(0),
+ fIsTheora(False), fGranuleIncrementPerFrame(1),
+ fAltFrameSize(0), fAltNumTruncatedBytes(0) {
+ fAltBuffer = new unsigned char[bufferSize];
+
+ // Initialize our 'Ogg page header' array with constant values:
+ u_int8_t* p = fPageHeaderBytes;
+ *p++=0x4f; *p++=0x67; *p++=0x67; *p++=0x53; // bytes 0..3: 'capture_pattern': "OggS"
+ *p++=0; // byte 4: 'stream_structure_version': 0
+ *p++=0; // byte 5: 'header_type_flag': set on each write
+ *p++=0; *p++=0; *p++=0; *p++=0; *p++=0; *p++=0; *p++=0; *p++=0;
+ // bytes 6..13: 'granule_position': set on each write
+ *p++=1; *p++=0; *p++=0; *p++=0; // bytes 14..17: 'bitstream_serial_number': 1
+ *p++=0; *p++=0; *p++=0; *p++=0; // bytes 18..21: 'page_sequence_number': set on each write
+ *p++=0; *p++=0; *p++=0; *p++=0; // bytes 22..25: 'CRC_checksum': set on each write
+ *p=0; // byte 26: 'number_page_segments': set on each write
+}
+
+OggFileSink::~OggFileSink() {
+ // We still have the previously-arrived frame, so write it to the file before we end:
+ fHaveSeenEOF = True;
+ OggFileSink::addData(fAltBuffer, fAltFrameSize, fAltPresentationTime);
+
+ delete[] fAltBuffer;
+ delete[] (char*)fConfigStr;
+}
+
+Boolean OggFileSink::continuePlaying() {
+ // Identical to "FileSink::continuePlaying()",
+ // except that we use our own 'on source closure' function:
+ if (fSource == NULL) return False;
+
+ fSource->getNextFrame(fBuffer, fBufferSize,
+ FileSink::afterGettingFrame, this,
+ ourOnSourceClosure, this);
+ return True;
+}
+
+#define PAGE_DATA_MAX_SIZE (255*255)
+
+void OggFileSink::addData(unsigned char const* data, unsigned dataSize,
+ struct timeval presentationTime) {
+ if (dataSize == 0) return;
+
+ // Set "fGranulePosition" for this frame:
+ if (fIsTheora) {
+ // Special case for Theora: "fGranulePosition" is supposed to be made up of a pair:
+ // (frame count to last key frame) | (frame count since last key frame)
+ // However, because there appears to be no easy way to figure out which frames are key frames,
+ // we just assume that all frames are key frames.
+ if (!(data[0] >= 0x80 && data[0] <= 0x82)) { // for header pages, "fGranulePosition" remains 0
+ fGranulePosition += fGranuleIncrementPerFrame;
+ }
+ } else {
+ double ptDiff
+ = (presentationTime.tv_sec - fFirstPresentationTime.tv_sec)
+ + (presentationTime.tv_usec - fFirstPresentationTime.tv_usec)/1000000.0;
+ int64_t newGranulePosition
+ = (int64_t)(fSamplingFrequency*ptDiff) + fGranulePositionAdjustment;
+ if (newGranulePosition < fGranulePosition) {
+ // Update "fGranulePositionAdjustment" so that "fGranulePosition" remains monotonic
+ fGranulePositionAdjustment += fGranulePosition - newGranulePosition;
+ } else {
+ fGranulePosition = newGranulePosition;
+ }
+ }
+
+ // Write the frame to the file as a single Ogg 'page' (or perhaps as multiple pages
+ // if it's too big for a single page). We don't aggregate more than one frame within
+ // an Ogg page because that's not legal for some headers, and because that would make
+ // it difficult for us to properly set the 'eos' (end of stream) flag on the last page.
+
+ // First, figure out how many pages to write here
+ // (a page can contain no more than PAGE_DATA_MAX_SIZE bytes)
+ unsigned numPagesToWrite = dataSize/PAGE_DATA_MAX_SIZE + 1;
+ // Note that if "dataSize" is a integral multiple of PAGE_DATA_MAX_SIZE, there will
+ // be an extra 0-size page at the end
+ for (unsigned i = 0; i < numPagesToWrite; ++i) {
+ // First, fill in the changeable parts of our 'page header' array;
+ u_int8_t header_type_flag = 0x0;
+ if (!fHaveWrittenFirstFrame && i == 0) {
+ header_type_flag |= 0x02; // 'bos'
+ fHaveWrittenFirstFrame = True; // for the future
+ }
+ if (i > 0) header_type_flag |= 0x01; // 'continuation'
+ if (fHaveSeenEOF && i == numPagesToWrite-1) header_type_flag |= 0x04; // 'eos'
+ fPageHeaderBytes[5] = header_type_flag;
+
+ if (i < numPagesToWrite-1) {
+ // For pages where the frame does not end, set 'granule_position' in the header to -1:
+ fPageHeaderBytes[6] = fPageHeaderBytes[7] = fPageHeaderBytes[8] = fPageHeaderBytes[9] =
+ fPageHeaderBytes[10] = fPageHeaderBytes[11] = fPageHeaderBytes[12] = fPageHeaderBytes[13]
+ = 0xFF;
+ } else {
+ fPageHeaderBytes[6] = (u_int8_t)fGranulePosition;
+ fPageHeaderBytes[7] = (u_int8_t)(fGranulePosition>>8);
+ fPageHeaderBytes[8] = (u_int8_t)(fGranulePosition>>16);
+ fPageHeaderBytes[9] = (u_int8_t)(fGranulePosition>>24);
+ fPageHeaderBytes[10] = (u_int8_t)(fGranulePosition>>32);
+ fPageHeaderBytes[11] = (u_int8_t)(fGranulePosition>>40);
+ fPageHeaderBytes[12] = (u_int8_t)(fGranulePosition>>48);
+ fPageHeaderBytes[13] = (u_int8_t)(fGranulePosition>>56);
+ }
+
+ fPageHeaderBytes[18] = (u_int8_t)fPageSequenceNumber;
+ fPageHeaderBytes[19] = (u_int8_t)(fPageSequenceNumber>>8);
+ fPageHeaderBytes[20] = (u_int8_t)(fPageSequenceNumber>>16);
+ fPageHeaderBytes[21] = (u_int8_t)(fPageSequenceNumber>>24);
+ ++fPageSequenceNumber;
+
+ unsigned pageDataSize;
+ u_int8_t number_page_segments;
+ if (dataSize >= PAGE_DATA_MAX_SIZE) {
+ pageDataSize = PAGE_DATA_MAX_SIZE;
+ number_page_segments = 255;
+ } else {
+ pageDataSize = dataSize;
+ number_page_segments = (pageDataSize+255)/255; // so that we don't end with a lacing of 255
+ }
+ fPageHeaderBytes[26] = number_page_segments;
+
+ u_int8_t segment_table[255];
+ for (unsigned j = 0; j < (unsigned)(number_page_segments-1); ++j) {
+ segment_table[j] = 255;
+ }
+ segment_table[number_page_segments-1] = pageDataSize%255;
+
+ // Compute the CRC from the 'page header' array, the 'segment_table', and the frame data:
+ u_int32_t crc = 0;
+ fPageHeaderBytes[22] = fPageHeaderBytes[23] = fPageHeaderBytes[24] = fPageHeaderBytes[25] = 0;
+ crc = calculateCRC(fPageHeaderBytes, 27, 0);
+ crc = calculateCRC(segment_table, number_page_segments, crc);
+ crc = calculateCRC(data, pageDataSize, crc);
+ fPageHeaderBytes[22] = (u_int8_t)crc;
+ fPageHeaderBytes[23] = (u_int8_t)(crc>>8);
+ fPageHeaderBytes[24] = (u_int8_t)(crc>>16);
+ fPageHeaderBytes[25] = (u_int8_t)(crc>>24);
+
+ // Then write out the 'page header' array:
+ FileSink::addData(fPageHeaderBytes, 27, presentationTime);
+
+ // Then write out the 'segment_table':
+ FileSink::addData(segment_table, number_page_segments, presentationTime);
+
+ // Then add frame data, to complete the page:
+ FileSink::addData(data, pageDataSize, presentationTime);
+ data += pageDataSize;
+ dataSize -= pageDataSize;
+ }
+}
+
+void OggFileSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime) {
+ if (!fHaveWrittenFirstFrame) {
+ fFirstPresentationTime = presentationTime;
+
+ // If we have a 'config string' representing 'packed configuration headers'
+ // ("identification", "comment", "setup"), unpack them and prepend them to the file:
+ if (fConfigStr != NULL && fConfigStr[0] != '\0') {
+ u_int8_t* identificationHdr; unsigned identificationHdrSize;
+ u_int8_t* commentHdr; unsigned commentHdrSize;
+ u_int8_t* setupHdr; unsigned setupHdrSize;
+ u_int32_t identField;
+ parseVorbisOrTheoraConfigStr(fConfigStr,
+ identificationHdr, identificationHdrSize,
+ commentHdr, commentHdrSize,
+ setupHdr, setupHdrSize,
+ identField);
+ if (identificationHdrSize >= 42
+ && strncmp((const char*)&identificationHdr[1], "theora", 6) == 0) {
+ // Hack for Theora video: Parse the "identification" hdr to get the "KFGSHIFT" parameter:
+ fIsTheora = True;
+ u_int8_t const KFGSHIFT = ((identificationHdr[40]&3)<<3) | (identificationHdr[41]>>5);
+ fGranuleIncrementPerFrame = (u_int64_t)(1 << KFGSHIFT);
+ }
+ OggFileSink::addData(identificationHdr, identificationHdrSize, presentationTime);
+ OggFileSink::addData(commentHdr, commentHdrSize, presentationTime);
+
+ // Hack: Handle the "setup" header as if had arrived in the previous delivery, so it'll get
+ // written properly below:
+ if (setupHdrSize > fBufferSize) {
+ fAltFrameSize = fBufferSize;
+ fAltNumTruncatedBytes = setupHdrSize - fBufferSize;
+ } else {
+ fAltFrameSize = setupHdrSize;
+ fAltNumTruncatedBytes = 0;
+ }
+ memmove(fAltBuffer, setupHdr, fAltFrameSize);
+ fAltPresentationTime = presentationTime;
+
+ delete[] identificationHdr;
+ delete[] commentHdr;
+ delete[] setupHdr;
+ }
+ }
+
+ // Save this input frame for next time, and instead write the previous input frame now:
+ unsigned char* tmpPtr = fBuffer; fBuffer = fAltBuffer; fAltBuffer = tmpPtr;
+ unsigned prevFrameSize = fAltFrameSize; fAltFrameSize = frameSize;
+ unsigned prevNumTruncatedBytes = fAltNumTruncatedBytes; fAltNumTruncatedBytes = numTruncatedBytes;
+ struct timeval prevPresentationTime = fAltPresentationTime; fAltPresentationTime = presentationTime;
+
+ // Call the parent class to complete the normal file write with the (previous) input frame:
+ FileSink::afterGettingFrame(prevFrameSize, prevNumTruncatedBytes, prevPresentationTime);
+}
+
+void OggFileSink::ourOnSourceClosure(void* clientData) {
+ ((OggFileSink*)clientData)->ourOnSourceClosure();
+}
+
+void OggFileSink::ourOnSourceClosure() {
+ fHaveSeenEOF = True;
+
+ // We still have the previously-arrived frame, so write it to the file before we end:
+ OggFileSink::addData(fAltBuffer, fAltFrameSize, fAltPresentationTime);
+
+ // Handle the closure for real:
+ onSourceClosure();
+}
diff --git a/liveMedia/OnDemandServerMediaSubsession.cpp b/liveMedia/OnDemandServerMediaSubsession.cpp
new file mode 100644
index 0000000..0c9fb5d
--- /dev/null
+++ b/liveMedia/OnDemandServerMediaSubsession.cpp
@@ -0,0 +1,624 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand.
+// Implementation
+
+#include "OnDemandServerMediaSubsession.hh"
+#include <GroupsockHelper.hh>
+
+OnDemandServerMediaSubsession
+::OnDemandServerMediaSubsession(UsageEnvironment& env,
+ Boolean reuseFirstSource,
+ portNumBits initialPortNum,
+ Boolean multiplexRTCPWithRTP)
+ : ServerMediaSubsession(env),
+ fSDPLines(NULL), fReuseFirstSource(reuseFirstSource),
+ fMultiplexRTCPWithRTP(multiplexRTCPWithRTP), fLastStreamToken(NULL),
+ fAppHandlerTask(NULL), fAppHandlerClientData(NULL) {
+ fDestinationsHashTable = HashTable::create(ONE_WORD_HASH_KEYS);
+ if (fMultiplexRTCPWithRTP) {
+ fInitialPortNum = initialPortNum;
+ } else {
+ // Make sure RTP ports are even-numbered:
+ fInitialPortNum = (initialPortNum+1)&~1;
+ }
+ gethostname(fCNAME, sizeof fCNAME);
+ fCNAME[sizeof fCNAME-1] = '\0'; // just in case
+}
+
+OnDemandServerMediaSubsession::~OnDemandServerMediaSubsession() {
+ delete[] fSDPLines;
+
+ // Clean out the destinations hash table:
+ while (1) {
+ Destinations* destinations
+ = (Destinations*)(fDestinationsHashTable->RemoveNext());
+ if (destinations == NULL) break;
+ delete destinations;
+ }
+ delete fDestinationsHashTable;
+}
+
+char const*
+OnDemandServerMediaSubsession::sdpLines() {
+ if (fSDPLines == NULL) {
+ // We need to construct a set of SDP lines that describe this
+ // subsession (as a unicast stream). To do so, we first create
+ // dummy (unused) source and "RTPSink" objects,
+ // whose parameters we use for the SDP lines:
+ unsigned estBitrate;
+ FramedSource* inputSource = createNewStreamSource(0, estBitrate);
+ if (inputSource == NULL) return NULL; // file not found
+
+ struct in_addr dummyAddr;
+ dummyAddr.s_addr = 0;
+ Groupsock* dummyGroupsock = createGroupsock(dummyAddr, 0);
+ unsigned char rtpPayloadType = 96 + trackNumber()-1; // if dynamic
+ RTPSink* dummyRTPSink = createNewRTPSink(dummyGroupsock, rtpPayloadType, inputSource);
+ if (dummyRTPSink != NULL && dummyRTPSink->estimatedBitrate() > 0) estBitrate = dummyRTPSink->estimatedBitrate();
+
+ setSDPLinesFromRTPSink(dummyRTPSink, inputSource, estBitrate);
+ Medium::close(dummyRTPSink);
+ delete dummyGroupsock;
+ closeStreamSource(inputSource);
+ }
+
+ return fSDPLines;
+}
+
+void OnDemandServerMediaSubsession
+::getStreamParameters(unsigned clientSessionId,
+ netAddressBits clientAddress,
+ Port const& clientRTPPort,
+ Port const& clientRTCPPort,
+ int tcpSocketNum,
+ unsigned char rtpChannelId,
+ unsigned char rtcpChannelId,
+ netAddressBits& destinationAddress,
+ u_int8_t& /*destinationTTL*/,
+ Boolean& isMulticast,
+ Port& serverRTPPort,
+ Port& serverRTCPPort,
+ void*& streamToken) {
+ if (destinationAddress == 0) destinationAddress = clientAddress;
+ struct in_addr destinationAddr; destinationAddr.s_addr = destinationAddress;
+ isMulticast = False;
+
+ if (fLastStreamToken != NULL && fReuseFirstSource) {
+ // Special case: Rather than creating a new 'StreamState',
+ // we reuse the one that we've already created:
+ serverRTPPort = ((StreamState*)fLastStreamToken)->serverRTPPort();
+ serverRTCPPort = ((StreamState*)fLastStreamToken)->serverRTCPPort();
+ ++((StreamState*)fLastStreamToken)->referenceCount();
+ streamToken = fLastStreamToken;
+ } else {
+ // Normal case: Create a new media source:
+ unsigned streamBitrate;
+ FramedSource* mediaSource
+ = createNewStreamSource(clientSessionId, streamBitrate);
+
+ // Create 'groupsock' and 'sink' objects for the destination,
+ // using previously unused server port numbers:
+ RTPSink* rtpSink = NULL;
+ BasicUDPSink* udpSink = NULL;
+ Groupsock* rtpGroupsock = NULL;
+ Groupsock* rtcpGroupsock = NULL;
+
+ if (clientRTPPort.num() != 0 || tcpSocketNum >= 0) { // Normal case: Create destinations
+ portNumBits serverPortNum;
+ if (clientRTCPPort.num() == 0) {
+ // We're streaming raw UDP (not RTP). Create a single groupsock:
+ NoReuse dummy(envir()); // ensures that we skip over ports that are already in use
+ for (serverPortNum = fInitialPortNum; ; ++serverPortNum) {
+ struct in_addr dummyAddr; dummyAddr.s_addr = 0;
+
+ serverRTPPort = serverPortNum;
+ rtpGroupsock = createGroupsock(dummyAddr, serverRTPPort);
+ if (rtpGroupsock->socketNum() >= 0) break; // success
+ }
+
+ udpSink = BasicUDPSink::createNew(envir(), rtpGroupsock);
+ } else {
+ // Normal case: We're streaming RTP (over UDP or TCP). Create a pair of
+ // groupsocks (RTP and RTCP), with adjacent port numbers (RTP port number even).
+ // (If we're multiplexing RTCP and RTP over the same port number, it can be odd or even.)
+ NoReuse dummy(envir()); // ensures that we skip over ports that are already in use
+ for (portNumBits serverPortNum = fInitialPortNum; ; ++serverPortNum) {
+ struct in_addr dummyAddr; dummyAddr.s_addr = 0;
+
+ serverRTPPort = serverPortNum;
+ rtpGroupsock = createGroupsock(dummyAddr, serverRTPPort);
+ if (rtpGroupsock->socketNum() < 0) {
+ delete rtpGroupsock;
+ continue; // try again
+ }
+
+ if (fMultiplexRTCPWithRTP) {
+ // Use the RTP 'groupsock' object for RTCP as well:
+ serverRTCPPort = serverRTPPort;
+ rtcpGroupsock = rtpGroupsock;
+ } else {
+ // Create a separate 'groupsock' object (with the next (odd) port number) for RTCP:
+ serverRTCPPort = ++serverPortNum;
+ rtcpGroupsock = createGroupsock(dummyAddr, serverRTCPPort);
+ if (rtcpGroupsock->socketNum() < 0) {
+ delete rtpGroupsock;
+ delete rtcpGroupsock;
+ continue; // try again
+ }
+ }
+
+ break; // success
+ }
+
+ unsigned char rtpPayloadType = 96 + trackNumber()-1; // if dynamic
+ rtpSink = createNewRTPSink(rtpGroupsock, rtpPayloadType, mediaSource);
+ if (rtpSink != NULL && rtpSink->estimatedBitrate() > 0) streamBitrate = rtpSink->estimatedBitrate();
+ }
+
+ // Turn off the destinations for each groupsock. They'll get set later
+ // (unless TCP is used instead):
+ if (rtpGroupsock != NULL) rtpGroupsock->removeAllDestinations();
+ if (rtcpGroupsock != NULL) rtcpGroupsock->removeAllDestinations();
+
+ if (rtpGroupsock != NULL) {
+ // Try to use a big send buffer for RTP - at least 0.1 second of
+ // specified bandwidth and at least 50 KB
+ unsigned rtpBufSize = streamBitrate * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes
+ if (rtpBufSize < 50 * 1024) rtpBufSize = 50 * 1024;
+ increaseSendBufferTo(envir(), rtpGroupsock->socketNum(), rtpBufSize);
+ }
+ }
+
+ // Set up the state of the stream. The stream will get started later:
+ streamToken = fLastStreamToken
+ = new StreamState(*this, serverRTPPort, serverRTCPPort, rtpSink, udpSink,
+ streamBitrate, mediaSource,
+ rtpGroupsock, rtcpGroupsock);
+ }
+
+ // Record these destinations as being for this client session id:
+ Destinations* destinations;
+ if (tcpSocketNum < 0) { // UDP
+ destinations = new Destinations(destinationAddr, clientRTPPort, clientRTCPPort);
+ } else { // TCP
+ destinations = new Destinations(tcpSocketNum, rtpChannelId, rtcpChannelId);
+ }
+ fDestinationsHashTable->Add((char const*)clientSessionId, destinations);
+}
+
+void OnDemandServerMediaSubsession::startStream(unsigned clientSessionId,
+ void* streamToken,
+ TaskFunc* rtcpRRHandler,
+ void* rtcpRRHandlerClientData,
+ unsigned short& rtpSeqNum,
+ unsigned& rtpTimestamp,
+ ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
+ void* serverRequestAlternativeByteHandlerClientData) {
+ StreamState* streamState = (StreamState*)streamToken;
+ Destinations* destinations
+ = (Destinations*)(fDestinationsHashTable->Lookup((char const*)clientSessionId));
+ if (streamState != NULL) {
+ streamState->startPlaying(destinations, clientSessionId,
+ rtcpRRHandler, rtcpRRHandlerClientData,
+ serverRequestAlternativeByteHandler, serverRequestAlternativeByteHandlerClientData);
+ RTPSink* rtpSink = streamState->rtpSink(); // alias
+ if (rtpSink != NULL) {
+ rtpSeqNum = rtpSink->currentSeqNo();
+ rtpTimestamp = rtpSink->presetNextTimestamp();
+ }
+ }
+}
+
+void OnDemandServerMediaSubsession::pauseStream(unsigned /*clientSessionId*/,
+ void* streamToken) {
+ // Pausing isn't allowed if multiple clients are receiving data from
+ // the same source:
+ if (fReuseFirstSource) return;
+
+ StreamState* streamState = (StreamState*)streamToken;
+ if (streamState != NULL) streamState->pause();
+}
+
+void OnDemandServerMediaSubsession::seekStream(unsigned /*clientSessionId*/,
+ void* streamToken, double& seekNPT, double streamDuration, u_int64_t& numBytes) {
+ numBytes = 0; // by default: unknown
+
+ // Seeking isn't allowed if multiple clients are receiving data from the same source:
+ if (fReuseFirstSource) return;
+
+ StreamState* streamState = (StreamState*)streamToken;
+ if (streamState != NULL && streamState->mediaSource() != NULL) {
+ seekStreamSource(streamState->mediaSource(), seekNPT, streamDuration, numBytes);
+
+ streamState->startNPT() = (float)seekNPT;
+ RTPSink* rtpSink = streamState->rtpSink(); // alias
+ if (rtpSink != NULL) rtpSink->resetPresentationTimes();
+ }
+}
+
+void OnDemandServerMediaSubsession::seekStream(unsigned /*clientSessionId*/,
+ void* streamToken, char*& absStart, char*& absEnd) {
+ // Seeking isn't allowed if multiple clients are receiving data from the same source:
+ if (fReuseFirstSource) return;
+
+ StreamState* streamState = (StreamState*)streamToken;
+ if (streamState != NULL && streamState->mediaSource() != NULL) {
+ seekStreamSource(streamState->mediaSource(), absStart, absEnd);
+ }
+}
+
+void OnDemandServerMediaSubsession::nullSeekStream(unsigned /*clientSessionId*/, void* streamToken,
+ double streamEndTime, u_int64_t& numBytes) {
+ numBytes = 0; // by default: unknown
+
+ StreamState* streamState = (StreamState*)streamToken;
+ if (streamState != NULL && streamState->mediaSource() != NULL) {
+ // Because we're not seeking here, get the current NPT, and remember it as the new 'start' NPT:
+ streamState->startNPT() = getCurrentNPT(streamToken);
+
+ double duration = streamEndTime - streamState->startNPT();
+ if (duration < 0.0) duration = 0.0;
+ setStreamSourceDuration(streamState->mediaSource(), duration, numBytes);
+
+ RTPSink* rtpSink = streamState->rtpSink(); // alias
+ if (rtpSink != NULL) rtpSink->resetPresentationTimes();
+ }
+}
+
+void OnDemandServerMediaSubsession::setStreamScale(unsigned /*clientSessionId*/,
+ void* streamToken, float scale) {
+ // Changing the scale factor isn't allowed if multiple clients are receiving data
+ // from the same source:
+ if (fReuseFirstSource) return;
+
+ StreamState* streamState = (StreamState*)streamToken;
+ if (streamState != NULL && streamState->mediaSource() != NULL) {
+ setStreamSourceScale(streamState->mediaSource(), scale);
+ }
+}
+
+float OnDemandServerMediaSubsession::getCurrentNPT(void* streamToken) {
+ do {
+ if (streamToken == NULL) break;
+
+ StreamState* streamState = (StreamState*)streamToken;
+ RTPSink* rtpSink = streamState->rtpSink();
+ if (rtpSink == NULL) break;
+
+ return streamState->startNPT()
+ + (rtpSink->mostRecentPresentationTime().tv_sec - rtpSink->initialPresentationTime().tv_sec)
+ + (rtpSink->mostRecentPresentationTime().tv_usec - rtpSink->initialPresentationTime().tv_usec)/1000000.0f;
+ } while (0);
+
+ return 0.0;
+}
+
+FramedSource* OnDemandServerMediaSubsession::getStreamSource(void* streamToken) {
+ if (streamToken == NULL) return NULL;
+
+ StreamState* streamState = (StreamState*)streamToken;
+ return streamState->mediaSource();
+}
+
+void OnDemandServerMediaSubsession
+::getRTPSinkandRTCP(void* streamToken,
+ RTPSink const*& rtpSink, RTCPInstance const*& rtcp) {
+ if (streamToken == NULL) {
+ rtpSink = NULL;
+ rtcp = NULL;
+ return;
+ }
+
+ StreamState* streamState = (StreamState*)streamToken;
+ rtpSink = streamState->rtpSink();
+ rtcp = streamState->rtcpInstance();
+}
+
+void OnDemandServerMediaSubsession::deleteStream(unsigned clientSessionId,
+ void*& streamToken) {
+ StreamState* streamState = (StreamState*)streamToken;
+
+ // Look up (and remove) the destinations for this client session:
+ Destinations* destinations
+ = (Destinations*)(fDestinationsHashTable->Lookup((char const*)clientSessionId));
+ if (destinations != NULL) {
+ fDestinationsHashTable->Remove((char const*)clientSessionId);
+
+ // Stop streaming to these destinations:
+ if (streamState != NULL) streamState->endPlaying(destinations, clientSessionId);
+ }
+
+ // Delete the "StreamState" structure if it's no longer being used:
+ if (streamState != NULL) {
+ if (streamState->referenceCount() > 0) --streamState->referenceCount();
+ if (streamState->referenceCount() == 0) {
+ delete streamState;
+ streamToken = NULL;
+ }
+ }
+
+ // Finally, delete the destinations themselves:
+ delete destinations;
+}
+
+char const* OnDemandServerMediaSubsession
+::getAuxSDPLine(RTPSink* rtpSink, FramedSource* /*inputSource*/) {
+ // Default implementation:
+ return rtpSink == NULL ? NULL : rtpSink->auxSDPLine();
+}
+
+void OnDemandServerMediaSubsession::seekStreamSource(FramedSource* /*inputSource*/,
+ double& /*seekNPT*/, double /*streamDuration*/, u_int64_t& numBytes) {
+ // Default implementation: Do nothing
+ numBytes = 0;
+}
+
+void OnDemandServerMediaSubsession::seekStreamSource(FramedSource* /*inputSource*/,
+ char*& absStart, char*& absEnd) {
+ // Default implementation: do nothing (but delete[] and assign "absStart" and "absEnd" to NULL, to show that we don't handle this)
+ delete[] absStart; absStart = NULL;
+ delete[] absEnd; absEnd = NULL;
+}
+
+void OnDemandServerMediaSubsession
+::setStreamSourceScale(FramedSource* /*inputSource*/, float /*scale*/) {
+ // Default implementation: Do nothing
+}
+
+void OnDemandServerMediaSubsession
+::setStreamSourceDuration(FramedSource* /*inputSource*/, double /*streamDuration*/, u_int64_t& numBytes) {
+ // Default implementation: Do nothing
+ numBytes = 0;
+}
+
+void OnDemandServerMediaSubsession::closeStreamSource(FramedSource *inputSource) {
+ Medium::close(inputSource);
+}
+
+Groupsock* OnDemandServerMediaSubsession
+::createGroupsock(struct in_addr const& addr, Port port) {
+ // Default implementation; may be redefined by subclasses:
+ return new Groupsock(envir(), addr, port, 255);
+}
+
+RTCPInstance* OnDemandServerMediaSubsession
+::createRTCP(Groupsock* RTCPgs, unsigned totSessionBW, /* in kbps */
+ unsigned char const* cname, RTPSink* sink) {
+ // Default implementation; may be redefined by subclasses:
+ return RTCPInstance::createNew(envir(), RTCPgs, totSessionBW, cname, sink, NULL/*we're a server*/);
+}
+
+void OnDemandServerMediaSubsession
+::setRTCPAppPacketHandler(RTCPAppHandlerFunc* handler, void* clientData) {
+ fAppHandlerTask = handler;
+ fAppHandlerClientData = clientData;
+}
+
+void OnDemandServerMediaSubsession
+::sendRTCPAppPacket(u_int8_t subtype, char const* name,
+ u_int8_t* appDependentData, unsigned appDependentDataSize) {
+ StreamState* streamState = (StreamState*)fLastStreamToken;
+ if (streamState != NULL) {
+ streamState->sendRTCPAppPacket(subtype, name, appDependentData, appDependentDataSize);
+ }
+}
+
+void OnDemandServerMediaSubsession
+::setSDPLinesFromRTPSink(RTPSink* rtpSink, FramedSource* inputSource, unsigned estBitrate) {
+ if (rtpSink == NULL) return;
+
+ char const* mediaType = rtpSink->sdpMediaType();
+ unsigned char rtpPayloadType = rtpSink->rtpPayloadType();
+ AddressString ipAddressStr(fServerAddressForSDP);
+ char* rtpmapLine = rtpSink->rtpmapLine();
+ char const* rtcpmuxLine = fMultiplexRTCPWithRTP ? "a=rtcp-mux\r\n" : "";
+ char const* rangeLine = rangeSDPLine();
+ char const* auxSDPLine = getAuxSDPLine(rtpSink, inputSource);
+ if (auxSDPLine == NULL) auxSDPLine = "";
+
+ char const* const sdpFmt =
+ "m=%s %u RTP/AVP %d\r\n"
+ "c=IN IP4 %s\r\n"
+ "b=AS:%u\r\n"
+ "%s"
+ "%s"
+ "%s"
+ "%s"
+ "a=control:%s\r\n";
+ unsigned sdpFmtSize = strlen(sdpFmt)
+ + strlen(mediaType) + 5 /* max short len */ + 3 /* max char len */
+ + strlen(ipAddressStr.val())
+ + 20 /* max int len */
+ + strlen(rtpmapLine)
+ + strlen(rtcpmuxLine)
+ + strlen(rangeLine)
+ + strlen(auxSDPLine)
+ + strlen(trackId());
+ char* sdpLines = new char[sdpFmtSize];
+ sprintf(sdpLines, sdpFmt,
+ mediaType, // m= <media>
+ fPortNumForSDP, // m= <port>
+ rtpPayloadType, // m= <fmt list>
+ ipAddressStr.val(), // c= address
+ estBitrate, // b=AS:<bandwidth>
+ rtpmapLine, // a=rtpmap:... (if present)
+ rtcpmuxLine, // a=rtcp-mux:... (if present)
+ rangeLine, // a=range:... (if present)
+ auxSDPLine, // optional extra SDP line
+ trackId()); // a=control:<track-id>
+ delete[] (char*)rangeLine; delete[] rtpmapLine;
+
+ delete[] fSDPLines; fSDPLines = strDup(sdpLines);
+ delete[] sdpLines;
+}
+
+
+////////// StreamState implementation //////////
+
+static void afterPlayingStreamState(void* clientData) {
+ StreamState* streamState = (StreamState*)clientData;
+ if (streamState->streamDuration() == 0.0) {
+ // When the input stream ends, tear it down. This will cause a RTCP "BYE"
+ // to be sent to each client, teling it that the stream has ended.
+ // (Because the stream didn't have a known duration, there was no other
+ // way for clients to know when the stream ended.)
+ streamState->reclaim();
+ }
+ // Otherwise, keep the stream alive, in case a client wants to
+ // subsequently re-play the stream starting from somewhere other than the end.
+ // (This can be done only on streams that have a known duration.)
+}
+
+StreamState::StreamState(OnDemandServerMediaSubsession& master,
+ Port const& serverRTPPort, Port const& serverRTCPPort,
+ RTPSink* rtpSink, BasicUDPSink* udpSink,
+ unsigned totalBW, FramedSource* mediaSource,
+ Groupsock* rtpGS, Groupsock* rtcpGS)
+ : fMaster(master), fAreCurrentlyPlaying(False), fReferenceCount(1),
+ fServerRTPPort(serverRTPPort), fServerRTCPPort(serverRTCPPort),
+ fRTPSink(rtpSink), fUDPSink(udpSink), fStreamDuration(master.duration()),
+ fTotalBW(totalBW), fRTCPInstance(NULL) /* created later */,
+ fMediaSource(mediaSource), fStartNPT(0.0), fRTPgs(rtpGS), fRTCPgs(rtcpGS) {
+}
+
+StreamState::~StreamState() {
+ reclaim();
+}
+
+void StreamState
+::startPlaying(Destinations* dests, unsigned clientSessionId,
+ TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData,
+ ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
+ void* serverRequestAlternativeByteHandlerClientData) {
+ if (dests == NULL) return;
+
+ if (fRTCPInstance == NULL && fRTPSink != NULL) {
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ fRTCPInstance = fMaster.createRTCP(fRTCPgs, fTotalBW, (unsigned char*)fMaster.fCNAME, fRTPSink);
+ // Note: This starts RTCP running automatically
+ fRTCPInstance->setAppHandler(fMaster.fAppHandlerTask, fMaster.fAppHandlerClientData);
+ }
+
+ if (dests->isTCP) {
+ // Change RTP and RTCP to use the TCP socket instead of UDP:
+ if (fRTPSink != NULL) {
+ fRTPSink->addStreamSocket(dests->tcpSocketNum, dests->rtpChannelId);
+ RTPInterface
+ ::setServerRequestAlternativeByteHandler(fRTPSink->envir(), dests->tcpSocketNum,
+ serverRequestAlternativeByteHandler, serverRequestAlternativeByteHandlerClientData);
+ // So that we continue to handle RTSP commands from the client
+ }
+ if (fRTCPInstance != NULL) {
+ fRTCPInstance->addStreamSocket(dests->tcpSocketNum, dests->rtcpChannelId);
+ fRTCPInstance->setSpecificRRHandler(dests->tcpSocketNum, dests->rtcpChannelId,
+ rtcpRRHandler, rtcpRRHandlerClientData);
+ }
+ } else {
+ // Tell the RTP and RTCP 'groupsocks' about this destination
+ // (in case they don't already have it):
+ if (fRTPgs != NULL) fRTPgs->addDestination(dests->addr, dests->rtpPort, clientSessionId);
+ if (fRTCPgs != NULL && !(fRTCPgs == fRTPgs && dests->rtcpPort.num() == dests->rtpPort.num())) {
+ fRTCPgs->addDestination(dests->addr, dests->rtcpPort, clientSessionId);
+ }
+ if (fRTCPInstance != NULL) {
+ fRTCPInstance->setSpecificRRHandler(dests->addr.s_addr, dests->rtcpPort,
+ rtcpRRHandler, rtcpRRHandlerClientData);
+ }
+ }
+
+ if (fRTCPInstance != NULL) {
+ // Hack: Send an initial RTCP "SR" packet, before the initial RTP packet, so that receivers will (likely) be able to
+ // get RTCP-synchronized presentation times immediately:
+ fRTCPInstance->sendReport();
+ }
+
+ if (!fAreCurrentlyPlaying && fMediaSource != NULL) {
+ if (fRTPSink != NULL) {
+ fRTPSink->startPlaying(*fMediaSource, afterPlayingStreamState, this);
+ fAreCurrentlyPlaying = True;
+ } else if (fUDPSink != NULL) {
+ fUDPSink->startPlaying(*fMediaSource, afterPlayingStreamState, this);
+ fAreCurrentlyPlaying = True;
+ }
+ }
+}
+
+void StreamState::pause() {
+ if (fRTPSink != NULL) fRTPSink->stopPlaying();
+ if (fUDPSink != NULL) fUDPSink->stopPlaying();
+ if (fMediaSource != NULL) fMediaSource->stopGettingFrames();
+ fAreCurrentlyPlaying = False;
+}
+
+void StreamState::endPlaying(Destinations* dests, unsigned clientSessionId) {
+#if 0
+ // The following code is temporarily disabled, because it erroneously sends RTCP "BYE"s to all clients if multiple
+ // clients are streaming from the same data source (i.e., if "reuseFirstSource" is True), and we don't want that to happen
+ // if we're being called as a result of a single one of these clients having sent a "TEARDOWN" (rather than the whole stream
+ // having been closed, for all clients).
+ // This will be fixed for real later.
+ if (fRTCPInstance != NULL) {
+ // Hack: Explicitly send a RTCP "BYE" packet now, because the code below will prevent that from happening later,
+ // when "fRTCPInstance" gets deleted:
+ fRTCPInstance->sendBYE();
+ }
+#endif
+
+ if (dests->isTCP) {
+ if (fRTPSink != NULL) {
+ // Comment out the following, because it prevents the "RTSPClientConnection" object
+ // from being closed after handling a "TEARDOWN": #####
+ //RTPInterface::clearServerRequestAlternativeByteHandler(fRTPSink->envir(), dests->tcpSocketNum);
+ fRTPSink->removeStreamSocket(dests->tcpSocketNum, dests->rtpChannelId);
+ }
+ if (fRTCPInstance != NULL) {
+ fRTCPInstance->removeStreamSocket(dests->tcpSocketNum, dests->rtcpChannelId);
+ fRTCPInstance->unsetSpecificRRHandler(dests->tcpSocketNum, dests->rtcpChannelId);
+ }
+ } else {
+ // Tell the RTP and RTCP 'groupsocks' to stop using these destinations:
+ if (fRTPgs != NULL) fRTPgs->removeDestination(clientSessionId);
+ if (fRTCPgs != NULL && fRTCPgs != fRTPgs) fRTCPgs->removeDestination(clientSessionId);
+ if (fRTCPInstance != NULL) {
+ fRTCPInstance->unsetSpecificRRHandler(dests->addr.s_addr, dests->rtcpPort);
+ }
+ }
+}
+
+void StreamState::sendRTCPAppPacket(u_int8_t subtype, char const* name,
+ u_int8_t* appDependentData, unsigned appDependentDataSize) {
+ if (fRTCPInstance != NULL) {
+ fRTCPInstance->sendAppPacket(subtype, name, appDependentData, appDependentDataSize);
+ }
+}
+
+void StreamState::reclaim() {
+ // Delete allocated media objects
+ Medium::close(fRTCPInstance) /* will send a RTCP BYE */; fRTCPInstance = NULL;
+ Medium::close(fRTPSink); fRTPSink = NULL;
+ Medium::close(fUDPSink); fUDPSink = NULL;
+
+ fMaster.closeStreamSource(fMediaSource); fMediaSource = NULL;
+ if (fMaster.fLastStreamToken == this) fMaster.fLastStreamToken = NULL;
+
+ delete fRTPgs;
+ if (fRTCPgs != fRTPgs) delete fRTCPgs;
+ fRTPgs = NULL; fRTCPgs = NULL;
+}
diff --git a/liveMedia/OutputFile.cpp b/liveMedia/OutputFile.cpp
new file mode 100644
index 0000000..ecca580
--- /dev/null
+++ b/liveMedia/OutputFile.cpp
@@ -0,0 +1,60 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Common routines for opening/closing named output files
+// Implementation
+
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE)
+#include <io.h>
+#include <fcntl.h>
+#endif
+#ifndef _WIN32_WCE
+#include <sys/stat.h>
+#endif
+#include <string.h>
+
+#include "OutputFile.hh"
+
+FILE* OpenOutputFile(UsageEnvironment& env, char const* fileName) {
+ FILE* fid;
+
+ // Check for special case 'file names': "stdout" and "stderr"
+ if (strcmp(fileName, "stdout") == 0) {
+ fid = stdout;
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE)
+ _setmode(_fileno(stdout), _O_BINARY); // convert to binary mode
+#endif
+ } else if (strcmp(fileName, "stderr") == 0) {
+ fid = stderr;
+#if (defined(__WIN32__) || defined(_WIN32)) && !defined(_WIN32_WCE)
+ _setmode(_fileno(stderr), _O_BINARY); // convert to binary mode
+#endif
+ } else {
+ fid = fopen(fileName, "wb");
+ }
+
+ if (fid == NULL) {
+ env.setResultMsg("unable to open file \"", fileName, "\"");
+ }
+
+ return fid;
+}
+
+void CloseOutputFile(FILE* fid) {
+ // Don't close 'stdout' or 'stderr', in case we want to use it again later.
+ if (fid != NULL && fid != stdout && fid != stderr) fclose(fid);
+}
diff --git a/liveMedia/PassiveServerMediaSubsession.cpp b/liveMedia/PassiveServerMediaSubsession.cpp
new file mode 100644
index 0000000..33c4fb5
--- /dev/null
+++ b/liveMedia/PassiveServerMediaSubsession.cpp
@@ -0,0 +1,228 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that represents an existing
+// 'RTPSink', rather than one that creates new 'RTPSink's on demand.
+// Implementation
+
+#include "PassiveServerMediaSubsession.hh"
+#include <GroupsockHelper.hh>
+
+////////// PassiveServerMediaSubsession //////////
+
+PassiveServerMediaSubsession*
+PassiveServerMediaSubsession::createNew(RTPSink& rtpSink,
+ RTCPInstance* rtcpInstance) {
+ return new PassiveServerMediaSubsession(rtpSink, rtcpInstance);
+}
+
+PassiveServerMediaSubsession
+::PassiveServerMediaSubsession(RTPSink& rtpSink, RTCPInstance* rtcpInstance)
+ : ServerMediaSubsession(rtpSink.envir()),
+ fSDPLines(NULL), fRTPSink(rtpSink), fRTCPInstance(rtcpInstance) {
+ fClientRTCPSourceRecords = HashTable::create(ONE_WORD_HASH_KEYS);
+}
+
+class RTCPSourceRecord {
+public:
+ RTCPSourceRecord(netAddressBits addr, Port const& port)
+ : addr(addr), port(port) {
+ }
+
+ netAddressBits addr;
+ Port port;
+};
+
+PassiveServerMediaSubsession::~PassiveServerMediaSubsession() {
+ delete[] fSDPLines;
+
+ // Clean out the RTCPSourceRecord table:
+ while (1) {
+ RTCPSourceRecord* source = (RTCPSourceRecord*)(fClientRTCPSourceRecords->RemoveNext());
+ if (source == NULL) break;
+ delete source;
+ }
+
+ delete fClientRTCPSourceRecords;
+}
+
+Boolean PassiveServerMediaSubsession::rtcpIsMuxed() {
+ if (fRTCPInstance == NULL) return False;
+
+ // Check whether RTP and RTCP use the same "groupsock" object:
+ return &(fRTPSink.groupsockBeingUsed()) == fRTCPInstance->RTCPgs();
+}
+
+char const*
+PassiveServerMediaSubsession::sdpLines() {
+ if (fSDPLines == NULL ) {
+ // Construct a set of SDP lines that describe this subsession:
+ // Use the components from "rtpSink":
+ Groupsock const& gs = fRTPSink.groupsockBeingUsed();
+ AddressString groupAddressStr(gs.groupAddress());
+ unsigned short portNum = ntohs(gs.port().num());
+ unsigned char ttl = gs.ttl();
+ unsigned char rtpPayloadType = fRTPSink.rtpPayloadType();
+ char const* mediaType = fRTPSink.sdpMediaType();
+ unsigned estBitrate
+ = fRTCPInstance == NULL ? 50 : fRTCPInstance->totSessionBW();
+ char* rtpmapLine = fRTPSink.rtpmapLine();
+ char const* rtcpmuxLine = rtcpIsMuxed() ? "a=rtcp-mux\r\n" : "";
+ char const* rangeLine = rangeSDPLine();
+ char const* auxSDPLine = fRTPSink.auxSDPLine();
+ if (auxSDPLine == NULL) auxSDPLine = "";
+
+ char const* const sdpFmt =
+ "m=%s %d RTP/AVP %d\r\n"
+ "c=IN IP4 %s/%d\r\n"
+ "b=AS:%u\r\n"
+ "%s"
+ "%s"
+ "%s"
+ "%s"
+ "a=control:%s\r\n";
+ unsigned sdpFmtSize = strlen(sdpFmt)
+ + strlen(mediaType) + 5 /* max short len */ + 3 /* max char len */
+ + strlen(groupAddressStr.val()) + 3 /* max char len */
+ + 20 /* max int len */
+ + strlen(rtpmapLine)
+ + strlen(rtcpmuxLine)
+ + strlen(rangeLine)
+ + strlen(auxSDPLine)
+ + strlen(trackId());
+ char* sdpLines = new char[sdpFmtSize];
+ sprintf(sdpLines, sdpFmt,
+ mediaType, // m= <media>
+ portNum, // m= <port>
+ rtpPayloadType, // m= <fmt list>
+ groupAddressStr.val(), // c= <connection address>
+ ttl, // c= TTL
+ estBitrate, // b=AS:<bandwidth>
+ rtpmapLine, // a=rtpmap:... (if present)
+ rtcpmuxLine, // a=rtcp-mux:... (if present)
+ rangeLine, // a=range:... (if present)
+ auxSDPLine, // optional extra SDP line
+ trackId()); // a=control:<track-id>
+ delete[] (char*)rangeLine; delete[] rtpmapLine;
+
+ fSDPLines = strDup(sdpLines);
+ delete[] sdpLines;
+ }
+
+ return fSDPLines;
+}
+
+void PassiveServerMediaSubsession
+::getStreamParameters(unsigned clientSessionId,
+ netAddressBits clientAddress,
+ Port const& /*clientRTPPort*/,
+ Port const& clientRTCPPort,
+ int /*tcpSocketNum*/,
+ unsigned char /*rtpChannelId*/,
+ unsigned char /*rtcpChannelId*/,
+ netAddressBits& destinationAddress,
+ u_int8_t& destinationTTL,
+ Boolean& isMulticast,
+ Port& serverRTPPort,
+ Port& serverRTCPPort,
+ void*& streamToken) {
+ isMulticast = True;
+ Groupsock& gs = fRTPSink.groupsockBeingUsed();
+ if (destinationTTL == 255) destinationTTL = gs.ttl();
+ if (destinationAddress == 0) { // normal case
+ destinationAddress = gs.groupAddress().s_addr;
+ } else { // use the client-specified destination address instead:
+ struct in_addr destinationAddr; destinationAddr.s_addr = destinationAddress;
+ gs.changeDestinationParameters(destinationAddr, 0, destinationTTL);
+ if (fRTCPInstance != NULL) {
+ Groupsock* rtcpGS = fRTCPInstance->RTCPgs();
+ rtcpGS->changeDestinationParameters(destinationAddr, 0, destinationTTL);
+ }
+ }
+ serverRTPPort = gs.port();
+ if (fRTCPInstance != NULL) {
+ Groupsock* rtcpGS = fRTCPInstance->RTCPgs();
+ serverRTCPPort = rtcpGS->port();
+ }
+ streamToken = NULL; // not used
+
+ // Make a record of this client's source - for RTCP RR handling:
+ RTCPSourceRecord* source = new RTCPSourceRecord(clientAddress, clientRTCPPort);
+ fClientRTCPSourceRecords->Add((char const*)clientSessionId, source);
+}
+
+void PassiveServerMediaSubsession::startStream(unsigned clientSessionId,
+ void* /*streamToken*/,
+ TaskFunc* rtcpRRHandler,
+ void* rtcpRRHandlerClientData,
+ unsigned short& rtpSeqNum,
+ unsigned& rtpTimestamp,
+ ServerRequestAlternativeByteHandler* /*serverRequestAlternativeByteHandler*/,
+ void* /*serverRequestAlternativeByteHandlerClientData*/) {
+ rtpSeqNum = fRTPSink.currentSeqNo();
+ rtpTimestamp = fRTPSink.presetNextTimestamp();
+
+ // Try to use a big send buffer for RTP - at least 0.1 second of
+ // specified bandwidth and at least 50 KB
+ unsigned streamBitrate = fRTCPInstance == NULL ? 50 : fRTCPInstance->totSessionBW(); // in kbps
+ unsigned rtpBufSize = streamBitrate * 25 / 2; // 1 kbps * 0.1 s = 12.5 bytes
+ if (rtpBufSize < 50 * 1024) rtpBufSize = 50 * 1024;
+ increaseSendBufferTo(envir(), fRTPSink.groupsockBeingUsed().socketNum(), rtpBufSize);
+
+ if (fRTCPInstance != NULL) {
+ // Hack: Send a RTCP "SR" packet now, so that receivers will (likely) be able to
+ // get RTCP-synchronized presentation times immediately:
+ fRTCPInstance->sendReport();
+
+ // Set up the handler for incoming RTCP "RR" packets from this client:
+ RTCPSourceRecord* source = (RTCPSourceRecord*)(fClientRTCPSourceRecords->Lookup((char const*)clientSessionId));
+ if (source != NULL) {
+ fRTCPInstance->setSpecificRRHandler(source->addr, source->port,
+ rtcpRRHandler, rtcpRRHandlerClientData);
+ }
+ }
+}
+
+float PassiveServerMediaSubsession::getCurrentNPT(void* streamToken) {
+ // Return the elapsed time between our "RTPSink"s creation time, and the current time:
+ struct timeval const& creationTime = fRTPSink.creationTime(); // alias
+
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+
+ return (float)(timeNow.tv_sec - creationTime.tv_sec + (timeNow.tv_usec - creationTime.tv_usec)/1000000.0);
+}
+
+void PassiveServerMediaSubsession
+::getRTPSinkandRTCP(void* streamToken,
+ RTPSink const*& rtpSink, RTCPInstance const*& rtcp) {
+ rtpSink = &fRTPSink;
+ rtcp = fRTCPInstance;
+}
+
+void PassiveServerMediaSubsession::deleteStream(unsigned clientSessionId, void*& /*streamToken*/) {
+ // Lookup and remove the 'RTCPSourceRecord' for this client. Also turn off RTCP "RR" handling:
+ RTCPSourceRecord* source = (RTCPSourceRecord*)(fClientRTCPSourceRecords->Lookup((char const*)clientSessionId));
+ if (source != NULL) {
+ if (fRTCPInstance != NULL) {
+ fRTCPInstance->unsetSpecificRRHandler(source->addr, source->port);
+ }
+
+ fClientRTCPSourceRecords->Remove((char const*)clientSessionId);
+ delete source;
+ }
+}
diff --git a/liveMedia/ProxyServerMediaSession.cpp b/liveMedia/ProxyServerMediaSession.cpp
new file mode 100644
index 0000000..c58c998
--- /dev/null
+++ b/liveMedia/ProxyServerMediaSession.cpp
@@ -0,0 +1,943 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A subclass of "ServerMediaSession" that can be used to create a (unicast) RTSP servers that acts as a 'proxy' for
+// another (unicast or multicast) RTSP/RTP stream.
+// Implementation
+
+#include "liveMedia.hh"
+#include "RTSPCommon.hh"
+#include "GroupsockHelper.hh" // for "our_random()"
+
+#ifndef MILLION
+#define MILLION 1000000
+#endif
+
+// A "OnDemandServerMediaSubsession" subclass, used to implement a unicast RTSP server that's proxying another RTSP stream:
+
+class ProxyServerMediaSubsession: public OnDemandServerMediaSubsession {
+public:
+ ProxyServerMediaSubsession(MediaSubsession& mediaSubsession,
+ portNumBits initialPortNum, Boolean multiplexRTCPWithRTP);
+ virtual ~ProxyServerMediaSubsession();
+
+ char const* codecName() const { return fCodecName; }
+ char const* url() const { return ((ProxyServerMediaSession*)fParentSession)->url(); }
+
+private: // redefined virtual functions
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual void closeStreamSource(FramedSource *inputSource);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+ virtual Groupsock* createGroupsock(struct in_addr const& addr, Port port);
+ virtual RTCPInstance* createRTCP(Groupsock* RTCPgs, unsigned totSessionBW, /* in kbps */
+ unsigned char const* cname, RTPSink* sink);
+
+private:
+ static void subsessionByeHandler(void* clientData);
+ void subsessionByeHandler();
+
+ int verbosityLevel() const { return ((ProxyServerMediaSession*)fParentSession)->fVerbosityLevel; }
+
+private:
+ friend class ProxyRTSPClient;
+ MediaSubsession& fClientMediaSubsession; // the 'client' media subsession object that corresponds to this 'server' media subsession
+ char const* fCodecName; // copied from "fClientMediaSubsession" once it's been set up
+ ProxyServerMediaSubsession* fNext; // used when we're part of a queue
+ Boolean fHaveSetupStream;
+};
+
+
+////////// ProxyServerMediaSession implementation //////////
+
+UsageEnvironment& operator<<(UsageEnvironment& env, const ProxyServerMediaSession& psms) { // used for debugging
+ return env << "ProxyServerMediaSession[" << psms.url() << "]";
+}
+
+ProxyRTSPClient*
+defaultCreateNewProxyRTSPClientFunc(ProxyServerMediaSession& ourServerMediaSession,
+ char const* rtspURL,
+ char const* username, char const* password,
+ portNumBits tunnelOverHTTPPortNum, int verbosityLevel,
+ int socketNumToServer) {
+ return new ProxyRTSPClient(ourServerMediaSession, rtspURL, username, password,
+ tunnelOverHTTPPortNum, verbosityLevel, socketNumToServer);
+}
+
+ProxyServerMediaSession* ProxyServerMediaSession
+::createNew(UsageEnvironment& env, GenericMediaServer* ourMediaServer,
+ char const* inputStreamURL, char const* streamName,
+ char const* username, char const* password,
+ portNumBits tunnelOverHTTPPortNum, int verbosityLevel, int socketNumToServer,
+ MediaTranscodingTable* transcodingTable) {
+ return new ProxyServerMediaSession(env, ourMediaServer, inputStreamURL, streamName, username, password,
+ tunnelOverHTTPPortNum, verbosityLevel, socketNumToServer,
+ transcodingTable);
+}
+
+
+ProxyServerMediaSession
+::ProxyServerMediaSession(UsageEnvironment& env, GenericMediaServer* ourMediaServer,
+ char const* inputStreamURL, char const* streamName,
+ char const* username, char const* password,
+ portNumBits tunnelOverHTTPPortNum, int verbosityLevel,
+ int socketNumToServer,
+ MediaTranscodingTable* transcodingTable,
+ createNewProxyRTSPClientFunc* ourCreateNewProxyRTSPClientFunc,
+ portNumBits initialPortNum, Boolean multiplexRTCPWithRTP)
+ : ServerMediaSession(env, streamName, NULL, NULL, False, NULL),
+ describeCompletedFlag(0), fOurMediaServer(ourMediaServer), fClientMediaSession(NULL),
+ fVerbosityLevel(verbosityLevel),
+ fPresentationTimeSessionNormalizer(new PresentationTimeSessionNormalizer(envir())),
+ fCreateNewProxyRTSPClientFunc(ourCreateNewProxyRTSPClientFunc),
+ fTranscodingTable(transcodingTable),
+ fInitialPortNum(initialPortNum), fMultiplexRTCPWithRTP(multiplexRTCPWithRTP) {
+ // Open a RTSP connection to the input stream, and send a "DESCRIBE" command.
+ // We'll use the SDP description in the response to set ourselves up.
+ fProxyRTSPClient
+ = (*fCreateNewProxyRTSPClientFunc)(*this, inputStreamURL, username, password,
+ tunnelOverHTTPPortNum,
+ verbosityLevel > 0 ? verbosityLevel-1 : verbosityLevel,
+ socketNumToServer);
+ fProxyRTSPClient->sendDESCRIBE();
+}
+
+ProxyServerMediaSession::~ProxyServerMediaSession() {
+ if (fVerbosityLevel > 0) {
+ envir() << *this << "::~ProxyServerMediaSession()\n";
+ }
+
+ // Begin by sending a "TEARDOWN" command (without checking for a response):
+ if (fProxyRTSPClient != NULL && fClientMediaSession != NULL) {
+ fProxyRTSPClient->sendTeardownCommand(*fClientMediaSession, NULL, fProxyRTSPClient->auth());
+ }
+
+ // Then delete our state:
+ Medium::close(fClientMediaSession);
+ Medium::close(fProxyRTSPClient);
+ Medium::close(fPresentationTimeSessionNormalizer);
+}
+
+char const* ProxyServerMediaSession::url() const {
+ return fProxyRTSPClient == NULL ? NULL : fProxyRTSPClient->url();
+}
+
+Groupsock* ProxyServerMediaSession::createGroupsock(struct in_addr const& addr, Port port) {
+ // Default implementation; may be redefined by subclasses:
+ return new Groupsock(envir(), addr, port, 255);
+}
+
+RTCPInstance* ProxyServerMediaSession
+::createRTCP(Groupsock* RTCPgs, unsigned totSessionBW, /* in kbps */
+ unsigned char const* cname, RTPSink* sink) {
+ // Default implementation; may be redefined by subclasses:
+ return RTCPInstance::createNew(envir(), RTCPgs, totSessionBW, cname, sink, NULL/*we're a server*/);
+}
+
+Boolean ProxyServerMediaSession::allowProxyingForSubsession(MediaSubsession const& /*mss*/) {
+ // Default implementation
+ return True;
+}
+
+void ProxyServerMediaSession::continueAfterDESCRIBE(char const* sdpDescription) {
+ describeCompletedFlag = 1;
+
+ // Create a (client) "MediaSession" object from the stream's SDP description ("resultString"), then iterate through its
+ // "MediaSubsession" objects, to set up corresponding "ServerMediaSubsession" objects that we'll use to serve the stream's tracks.
+ do {
+ fClientMediaSession = MediaSession::createNew(envir(), sdpDescription);
+ if (fClientMediaSession == NULL) break;
+
+ MediaSubsessionIterator iter(*fClientMediaSession);
+ for (MediaSubsession* mss = iter.next(); mss != NULL; mss = iter.next()) {
+ if (!allowProxyingForSubsession(*mss)) continue;
+
+ ServerMediaSubsession* smss
+ = new ProxyServerMediaSubsession(*mss, fInitialPortNum, fMultiplexRTCPWithRTP);
+ addSubsession(smss);
+ if (fVerbosityLevel > 0) {
+ envir() << *this << " added new \"ProxyServerMediaSubsession\" for "
+ << mss->protocolName() << "/" << mss->mediumName() << "/" << mss->codecName() << " track\n";
+ }
+ }
+ } while (0);
+}
+
+void ProxyServerMediaSession::resetDESCRIBEState() {
+ // Delete all of our "ProxyServerMediaSubsession"s; they'll get set up again once we get a response to the new "DESCRIBE".
+ if (fOurMediaServer != NULL) {
+ // First, close any client connections that may have already been set up:
+ fOurMediaServer->closeAllClientSessionsForServerMediaSession(this);
+ }
+ deleteAllSubsessions();
+
+ // Finally, delete the client "MediaSession" object that we had set up after receiving the response to the previous "DESCRIBE":
+ Medium::close(fClientMediaSession); fClientMediaSession = NULL;
+}
+
+///////// RTSP 'response handlers' //////////
+
+static void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ char const* res;
+
+ if (resultCode == 0) {
+ // The "DESCRIBE" command succeeded, so "resultString" should be the stream's SDP description.
+ res = resultString;
+ } else {
+ // The "DESCRIBE" command failed.
+ res = NULL;
+ }
+ ((ProxyRTSPClient*)rtspClient)->continueAfterDESCRIBE(res);
+ delete[] resultString;
+}
+
+static void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ ((ProxyRTSPClient*)rtspClient)->continueAfterSETUP(resultCode);
+ delete[] resultString;
+}
+
+static void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ ((ProxyRTSPClient*)rtspClient)->continueAfterPLAY(resultCode);
+ delete[] resultString;
+}
+
+static void continueAfterOPTIONS(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ Boolean serverSupportsGetParameter = False;
+ if (resultCode == 0) {
+ // Note whether the server told us that it supports the "GET_PARAMETER" command:
+ serverSupportsGetParameter = RTSPOptionIsSupported("GET_PARAMETER", resultString);
+ }
+ ((ProxyRTSPClient*)rtspClient)->continueAfterLivenessCommand(resultCode, serverSupportsGetParameter);
+ delete[] resultString;
+}
+
+#ifdef SEND_GET_PARAMETER_IF_SUPPORTED
+static void continueAfterGET_PARAMETER(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ ((ProxyRTSPClient*)rtspClient)->continueAfterLivenessCommand(resultCode, True);
+ delete[] resultString;
+}
+#endif
+
+
+////////// "ProxyRTSPClient" implementation /////////
+
+UsageEnvironment& operator<<(UsageEnvironment& env, const ProxyRTSPClient& proxyRTSPClient) { // used for debugging
+ return env << "ProxyRTSPClient[" << proxyRTSPClient.url() << "]";
+}
+
+ProxyRTSPClient::ProxyRTSPClient(ProxyServerMediaSession& ourServerMediaSession, char const* rtspURL,
+ char const* username, char const* password,
+ portNumBits tunnelOverHTTPPortNum, int verbosityLevel, int socketNumToServer)
+ : RTSPClient(ourServerMediaSession.envir(), rtspURL, verbosityLevel, "ProxyRTSPClient",
+ tunnelOverHTTPPortNum == (portNumBits)(~0) ? 0 : tunnelOverHTTPPortNum, socketNumToServer),
+ fOurServerMediaSession(ourServerMediaSession), fOurURL(strDup(rtspURL)), fStreamRTPOverTCP(tunnelOverHTTPPortNum != 0),
+ fSetupQueueHead(NULL), fSetupQueueTail(NULL), fNumSetupsDone(0), fNextDESCRIBEDelay(1),
+ fServerSupportsGetParameter(False), fLastCommandWasPLAY(False), fDoneDESCRIBE(False),
+ fLivenessCommandTask(NULL), fDESCRIBECommandTask(NULL), fSubsessionTimerTask(NULL), fResetTask(NULL) {
+ if (username != NULL && password != NULL) {
+ fOurAuthenticator = new Authenticator(username, password);
+ } else {
+ fOurAuthenticator = NULL;
+ }
+}
+
+void ProxyRTSPClient::reset() {
+ envir().taskScheduler().unscheduleDelayedTask(fLivenessCommandTask);
+ envir().taskScheduler().unscheduleDelayedTask(fDESCRIBECommandTask);
+ envir().taskScheduler().unscheduleDelayedTask(fSubsessionTimerTask);
+ envir().taskScheduler().unscheduleDelayedTask(fResetTask);
+
+ fSetupQueueHead = fSetupQueueTail = NULL;
+ fNumSetupsDone = 0;
+ fNextDESCRIBEDelay = 1;
+ fLastCommandWasPLAY = False;
+ fDoneDESCRIBE = False;
+
+ RTSPClient::reset();
+}
+
+ProxyRTSPClient::~ProxyRTSPClient() {
+ reset();
+
+ delete fOurAuthenticator;
+ delete[] fOurURL;
+}
+
+int ProxyRTSPClient::connectToServer(int socketNum, portNumBits remotePortNum) {
+ int res;
+ res = RTSPClient::connectToServer(socketNum, remotePortNum);
+
+ if (res == 0 && fDoneDESCRIBE && fStreamRTPOverTCP) {
+ if (fVerbosityLevel > 0) {
+ envir() << "ProxyRTSPClient::connectToServer calling scheduleReset()\n";
+ }
+ scheduleReset();
+ }
+
+ return res;
+}
+
+void ProxyRTSPClient::continueAfterDESCRIBE(char const* sdpDescription) {
+ if (sdpDescription != NULL) {
+ fOurServerMediaSession.continueAfterDESCRIBE(sdpDescription);
+
+ // Unlike most RTSP streams, there might be a long delay between this "DESCRIBE" command (to the downstream server) and the
+ // subsequent "SETUP"/"PLAY" - which doesn't occur until the first time that a client requests the stream.
+ // To prevent the proxied connection (between us and the downstream server) from timing out, we send periodic 'liveness'
+ // ("OPTIONS" or "GET_PARAMETER") commands. (The usual RTCP liveness mechanism wouldn't work here, because RTCP packets
+ // don't get sent until after the "PLAY" command.)
+ scheduleLivenessCommand();
+ } else {
+ // The "DESCRIBE" command failed, most likely because the server or the stream is not yet running.
+ // Reschedule another "DESCRIBE" command to take place later:
+ scheduleDESCRIBECommand();
+ }
+ fDoneDESCRIBE = True;
+}
+
+void ProxyRTSPClient::continueAfterLivenessCommand(int resultCode, Boolean serverSupportsGetParameter) {
+ if (resultCode != 0) {
+ // The periodic 'liveness' command failed, suggesting that the back-end stream is no longer alive.
+ // We handle this by resetting our connection state with this server. Any current clients will be closed, but
+ // subsequent clients will cause new RTSP "SETUP"s and "PLAY"s to get done, restarting the stream.
+ // Then continue by sending more "DESCRIBE" commands, to try to restore the stream.
+
+ fServerSupportsGetParameter = False; // until we learn otherwise, in response to a future "OPTIONS" command
+
+ if (resultCode < 0) {
+ // The 'liveness' command failed without getting a response from the server (otherwise "resultCode" would have been > 0).
+ // This suggests that the RTSP connection itself has failed. Print this error code, in case it's useful for debugging:
+ if (fVerbosityLevel > 0) {
+ envir() << *this << ": lost connection to server ('errno': " << -resultCode << "). Scheduling reset...\n";
+ }
+ }
+
+ scheduleReset();
+ return;
+ }
+
+ fServerSupportsGetParameter = serverSupportsGetParameter;
+
+ // Schedule the next 'liveness' command (i.e., to tell the back-end server that we're still alive):
+ scheduleLivenessCommand();
+}
+
+#define SUBSESSION_TIMEOUT_SECONDS 5 // how many seconds to wait for the last track's "SETUP" to be done (note below)
+
+void ProxyRTSPClient::continueAfterSETUP(int resultCode) {
+ if (resultCode != 0) {
+ // The "SETUP" command failed, so arrange to reset the state. (We don't do this now, because it deletes the
+ // "ProxyServerMediaSubsession", and we can't do that during "ProxyServerMediaSubsession::createNewStreamSource()".)
+ scheduleReset();
+ return;
+ }
+
+ if (fVerbosityLevel > 0) {
+ envir() << *this << "::continueAfterSETUP(): head codec: " << fSetupQueueHead->codecName()
+ << "; numSubsessions " << fSetupQueueHead->fParentSession->numSubsessions() << "\n\tqueue:";
+ for (ProxyServerMediaSubsession* p = fSetupQueueHead; p != NULL; p = p->fNext) {
+ envir() << "\t" << p->codecName();
+ }
+ envir() << "\n";
+ }
+ envir().taskScheduler().unscheduleDelayedTask(fSubsessionTimerTask); // in case it had been set
+
+ // Dequeue the first "ProxyServerMediaSubsession" from our 'SETUP queue'. It will be the one for which this "SETUP" was done:
+ ProxyServerMediaSubsession* smss = fSetupQueueHead; // Assert: != NULL
+ fSetupQueueHead = fSetupQueueHead->fNext;
+ if (fSetupQueueHead == NULL) fSetupQueueTail = NULL;
+
+ if (fSetupQueueHead != NULL) {
+ // There are still entries in the queue, for tracks for which we have still to do a "SETUP".
+ // "SETUP" the first of these now:
+ sendSetupCommand(fSetupQueueHead->fClientMediaSubsession, ::continueAfterSETUP,
+ False, fStreamRTPOverTCP, False, fOurAuthenticator);
+ ++fNumSetupsDone;
+ fSetupQueueHead->fHaveSetupStream = True;
+ } else {
+ if (fNumSetupsDone >= smss->fParentSession->numSubsessions()) {
+ // We've now finished setting up each of our subsessions (i.e., 'tracks').
+ // Continue by sending a "PLAY" command (an 'aggregate' "PLAY" command, on the whole session):
+ sendPlayCommand(smss->fClientMediaSubsession.parentSession(), ::continueAfterPLAY, -1.0f, -1.0f, 1.0f, fOurAuthenticator);
+ // the "-1.0f" "start" parameter causes the "PLAY" to be sent without a "Range:" header, in case we'd already done
+ // a "PLAY" before (as a result of a 'subsession timeout' (note below))
+ fLastCommandWasPLAY = True;
+ } else {
+ // Some of this session's subsessions (i.e., 'tracks') remain to be "SETUP". They might get "SETUP" very soon, but it's
+ // also possible - if the remote client chose to play only some of the session's tracks - that they might not.
+ // To allow for this possibility, we set a timer. If the timer expires without the remaining subsessions getting "SETUP",
+ // then we send a "PLAY" command anyway:
+ fSubsessionTimerTask
+ = envir().taskScheduler().scheduleDelayedTask(SUBSESSION_TIMEOUT_SECONDS*MILLION, (TaskFunc*)subsessionTimeout, this);
+ }
+ }
+}
+
+void ProxyRTSPClient::continueAfterPLAY(int resultCode) {
+ if (resultCode != 0) {
+ // The "PLAY" command failed, so arrange to reset the state. (We don't do this now, because it deletes the
+ // "ProxyServerMediaSubsession", and we can't do that during "ProxyServerMediaSubsession::createNewStreamSource()".)
+ scheduleReset();
+ return;
+ }
+}
+
+void ProxyRTSPClient::scheduleLivenessCommand() {
+ // Delay a random time before sending another 'liveness' command.
+ unsigned delayMax = sessionTimeoutParameter(); // if the server specified a maximum time between 'liveness' probes, then use that
+ if (delayMax == 0) {
+ delayMax = 60;
+ }
+
+ // Choose a random time from [delayMax/2,delayMax-1) seconds:
+ unsigned const us_1stPart = delayMax*500000;
+ unsigned uSecondsToDelay;
+ if (us_1stPart <= 1000000) {
+ uSecondsToDelay = us_1stPart;
+ } else {
+ unsigned const us_2ndPart = us_1stPart-1000000;
+ uSecondsToDelay = us_1stPart + (us_2ndPart*our_random())%us_2ndPart;
+ }
+ fLivenessCommandTask = envir().taskScheduler().scheduleDelayedTask(uSecondsToDelay, sendLivenessCommand, this);
+}
+
+void ProxyRTSPClient::sendLivenessCommand(void* clientData) {
+ ProxyRTSPClient* rtspClient = (ProxyRTSPClient*)clientData;
+ rtspClient->fLivenessCommandTask = NULL;
+
+ // Note. By default, we do not send "GET_PARAMETER" as our 'liveness notification' command, even if the server previously
+ // indicated (in its response to our earlier "OPTIONS" command) that it supported "GET_PARAMETER". This is because
+ // "GET_PARAMETER" crashes some camera servers (even though they claimed to support "GET_PARAMETER").
+#ifdef SEND_GET_PARAMETER_IF_SUPPORTED
+ MediaSession* sess = rtspClient->fOurServerMediaSession.fClientMediaSession;
+
+ if (rtspClient->fServerSupportsGetParameter && rtspClient->fNumSetupsDone > 0 && sess != NULL) {
+ rtspClient->sendGetParameterCommand(*sess, ::continueAfterGET_PARAMETER, "", rtspClient->auth());
+ } else {
+#endif
+ rtspClient->sendOptionsCommand(::continueAfterOPTIONS, rtspClient->auth());
+#ifdef SEND_GET_PARAMETER_IF_SUPPORTED
+ }
+#endif
+}
+
+void ProxyRTSPClient::scheduleReset() {
+ if (fVerbosityLevel > 0) {
+ envir() << "ProxyRTSPClient::scheduleReset\n";
+ }
+ envir().taskScheduler().rescheduleDelayedTask(fResetTask, 0, doReset, this);
+}
+
+void ProxyRTSPClient::doReset() {
+ fResetTask = NULL;
+ if (fVerbosityLevel > 0) {
+ envir() << *this << "::doReset\n";
+ }
+
+ reset();
+ fOurServerMediaSession.resetDESCRIBEState();
+
+ setBaseURL(fOurURL); // because we'll be sending an initial "DESCRIBE" all over again
+ sendDESCRIBE();
+}
+
+void ProxyRTSPClient::doReset(void* clientData) {
+ ProxyRTSPClient* rtspClient = (ProxyRTSPClient*)clientData;
+ rtspClient->doReset();
+}
+
+void ProxyRTSPClient::scheduleDESCRIBECommand() {
+ // Delay 1s, 2s, 4s, 8s ... 256s until sending the next "DESCRIBE". Then, keep delaying a random time from [256..511] seconds:
+ unsigned secondsToDelay;
+ if (fNextDESCRIBEDelay <= 256) {
+ secondsToDelay = fNextDESCRIBEDelay;
+ fNextDESCRIBEDelay *= 2;
+ } else {
+ secondsToDelay = 256 + (our_random()&0xFF); // [256..511] seconds
+ }
+
+ if (fVerbosityLevel > 0) {
+ envir() << *this << ": RTSP \"DESCRIBE\" command failed; trying again in " << secondsToDelay << " seconds\n";
+ }
+ fDESCRIBECommandTask = envir().taskScheduler().scheduleDelayedTask(secondsToDelay*MILLION, sendDESCRIBE, this);
+}
+
+void ProxyRTSPClient::sendDESCRIBE(void* clientData) {
+ ProxyRTSPClient* rtspClient = (ProxyRTSPClient*)clientData;
+ if (rtspClient != NULL) {
+ rtspClient->fDESCRIBECommandTask = NULL;
+ rtspClient->sendDESCRIBE();
+ }
+}
+
+void ProxyRTSPClient::sendDESCRIBE() {
+ sendDescribeCommand(::continueAfterDESCRIBE, auth());
+}
+
+void ProxyRTSPClient::subsessionTimeout(void* clientData) {
+ ((ProxyRTSPClient*)clientData)->handleSubsessionTimeout();
+}
+
+void ProxyRTSPClient::handleSubsessionTimeout() {
+ fSubsessionTimerTask = NULL;
+ // We still have one or more subsessions ('tracks') left to "SETUP". But we can't wait any longer for them. Send a "PLAY" now:
+ MediaSession* sess = fOurServerMediaSession.fClientMediaSession;
+ if (sess != NULL) sendPlayCommand(*sess, ::continueAfterPLAY, -1.0f, -1.0f, 1.0f, fOurAuthenticator);
+ fLastCommandWasPLAY = True;
+}
+
+
+//////// "ProxyServerMediaSubsession" implementation //////////
+
+ProxyServerMediaSubsession
+::ProxyServerMediaSubsession(MediaSubsession& mediaSubsession,
+ portNumBits initialPortNum, Boolean multiplexRTCPWithRTP)
+ : OnDemandServerMediaSubsession(mediaSubsession.parentSession().envir(), True/*reuseFirstSource*/,
+ initialPortNum, multiplexRTCPWithRTP),
+ fClientMediaSubsession(mediaSubsession), fCodecName(strDup(mediaSubsession.codecName())),
+ fNext(NULL), fHaveSetupStream(False) {
+}
+
+UsageEnvironment& operator<<(UsageEnvironment& env, const ProxyServerMediaSubsession& psmss) { // used for debugging
+ return env << "ProxyServerMediaSubsession[" << psmss.url() << "," << psmss.codecName() << "]";
+}
+
+ProxyServerMediaSubsession::~ProxyServerMediaSubsession() {
+ if (verbosityLevel() > 0) {
+ envir() << *this << "::~ProxyServerMediaSubsession()\n";
+ }
+
+ delete[] (char*)fCodecName;
+}
+
+FramedSource* ProxyServerMediaSubsession::createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate) {
+ ProxyServerMediaSession* const sms = (ProxyServerMediaSession*)fParentSession;
+
+ if (verbosityLevel() > 0) {
+ envir() << *this << "::createNewStreamSource(session id " << clientSessionId << ")\n";
+ }
+
+ // If we haven't yet created a data source from our 'media subsession' object, initiate() it to do so:
+ if (fClientMediaSubsession.readSource() == NULL) {
+ if (sms->fTranscodingTable == NULL || !sms->fTranscodingTable->weWillTranscode("audio", "MPA-ROBUST")) fClientMediaSubsession.receiveRawMP3ADUs(); // hack for proxying MPA-ROBUST streams
+ if (sms->fTranscodingTable == NULL || !sms->fTranscodingTable->weWillTranscode("video", "JPEG")) fClientMediaSubsession.receiveRawJPEGFrames(); // hack for proxying JPEG/RTP streams.
+ fClientMediaSubsession.initiate();
+ if (verbosityLevel() > 0) {
+ envir() << "\tInitiated: " << *this << "\n";
+ }
+
+ if (fClientMediaSubsession.readSource() != NULL) {
+ // First, check whether we have defined a 'transcoder' filter to be used with this codec:
+ if (sms->fTranscodingTable != NULL) {
+ char* outputCodecName;
+ FramedFilter* transcoder
+ = sms->fTranscodingTable->lookupTranscoder(fClientMediaSubsession, outputCodecName);
+ if (transcoder != NULL) {
+ fClientMediaSubsession.addFilter(transcoder);
+ delete[] (char*)fCodecName; fCodecName = outputCodecName;
+ }
+ }
+
+ // Then, add to the front of all data sources a filter that will 'normalize' their frames'
+ // presentation times, before the frames get re-transmitted by our server:
+ FramedFilter* normalizerFilter = sms->fPresentationTimeSessionNormalizer
+ ->createNewPresentationTimeSubsessionNormalizer(fClientMediaSubsession.readSource(),
+ fClientMediaSubsession.rtpSource(),
+ fCodecName);
+ fClientMediaSubsession.addFilter(normalizerFilter);
+
+ // Some data sources require a 'framer' object to be added, before they can be fed into
+ // a "RTPSink". Adjust for this now:
+ if (strcmp(fCodecName, "H264") == 0) {
+ fClientMediaSubsession.addFilter(H264VideoStreamDiscreteFramer
+ ::createNew(envir(), fClientMediaSubsession.readSource()));
+ } else if (strcmp(fCodecName, "H265") == 0) {
+ fClientMediaSubsession.addFilter(H265VideoStreamDiscreteFramer
+ ::createNew(envir(), fClientMediaSubsession.readSource()));
+ } else if (strcmp(fCodecName, "MP4V-ES") == 0) {
+ fClientMediaSubsession.addFilter(MPEG4VideoStreamDiscreteFramer
+ ::createNew(envir(), fClientMediaSubsession.readSource(),
+ True/* leave PTs unmodified*/));
+ } else if (strcmp(fCodecName, "MPV") == 0) {
+ fClientMediaSubsession.addFilter(MPEG1or2VideoStreamDiscreteFramer
+ ::createNew(envir(), fClientMediaSubsession.readSource(),
+ False, 5.0, True/* leave PTs unmodified*/));
+ } else if (strcmp(fCodecName, "DV") == 0) {
+ fClientMediaSubsession.addFilter(DVVideoStreamFramer
+ ::createNew(envir(), fClientMediaSubsession.readSource(),
+ False, True/* leave PTs unmodified*/));
+ }
+ }
+
+ if (fClientMediaSubsession.rtcpInstance() != NULL) {
+ fClientMediaSubsession.rtcpInstance()->setByeHandler(subsessionByeHandler, this);
+ }
+ }
+
+ ProxyRTSPClient* const proxyRTSPClient = sms->fProxyRTSPClient;
+ if (clientSessionId != 0) {
+ // We're being called as a result of implementing a RTSP "SETUP".
+ if (!fHaveSetupStream) {
+ // This is our first "SETUP". Send RTSP "SETUP" and later "PLAY" commands to the proxied server, to start streaming:
+ // (Before sending "SETUP", enqueue ourselves on the "RTSPClient"s 'SETUP queue', so we'll be able to get the correct
+ // "ProxyServerMediaSubsession" to handle the response. (Note that responses come back in the same order as requests.))
+ Boolean queueWasEmpty = proxyRTSPClient->fSetupQueueHead == NULL;
+ if (queueWasEmpty) {
+ proxyRTSPClient->fSetupQueueHead = this;
+ proxyRTSPClient->fSetupQueueTail = this;
+ } else {
+ // Add ourself to the "RTSPClient"s 'SETUP queue' (if we're not already on it):
+ ProxyServerMediaSubsession* psms;
+ for (psms = proxyRTSPClient->fSetupQueueHead; psms != NULL; psms = psms->fNext) {
+ if (psms == this) break;
+ }
+ if (psms == NULL) {
+ proxyRTSPClient->fSetupQueueTail->fNext = this;
+ proxyRTSPClient->fSetupQueueTail = this;
+ }
+ }
+
+ // Hack: If there's already a pending "SETUP" request, don't send this track's "SETUP" right away, because
+ // the server might not properly handle 'pipelined' requests. Instead, wait until after previous "SETUP" responses come back.
+ if (queueWasEmpty) {
+ proxyRTSPClient->sendSetupCommand(fClientMediaSubsession, ::continueAfterSETUP,
+ False, proxyRTSPClient->fStreamRTPOverTCP, False, proxyRTSPClient->auth());
+ ++proxyRTSPClient->fNumSetupsDone;
+ fHaveSetupStream = True;
+ }
+ } else {
+ // This is a "SETUP" from a new client. We know that there are no other currently active clients (otherwise we wouldn't
+ // have been called here), so we know that the substream was previously "PAUSE"d. Send "PLAY" downstream once again,
+ // to resume the stream:
+ if (!proxyRTSPClient->fLastCommandWasPLAY) { // so that we send only one "PLAY"; not one for each subsession
+ proxyRTSPClient->sendPlayCommand(fClientMediaSubsession.parentSession(), ::continueAfterPLAY, -1.0f/*resume from previous point*/,
+ -1.0f, 1.0f, proxyRTSPClient->auth());
+ proxyRTSPClient->fLastCommandWasPLAY = True;
+ }
+ }
+ }
+
+ estBitrate = fClientMediaSubsession.bandwidth();
+ if (estBitrate == 0) estBitrate = 50; // kbps, estimate
+ return fClientMediaSubsession.readSource();
+}
+
+void ProxyServerMediaSubsession::closeStreamSource(FramedSource* inputSource) {
+ if (verbosityLevel() > 0) {
+ envir() << *this << "::closeStreamSource()\n";
+ }
+ // Because there's only one input source for this 'subsession' (regardless of how many downstream clients are proxying it),
+ // we don't close the input source here. (Instead, we wait until *this* object gets deleted.)
+ // However, because (as evidenced by this function having been called) we no longer have any clients accessing the stream,
+ // then we "PAUSE" the downstream proxied stream, until a new client arrives:
+ if (fHaveSetupStream) {
+ ProxyServerMediaSession* const sms = (ProxyServerMediaSession*)fParentSession;
+ ProxyRTSPClient* const proxyRTSPClient = sms->fProxyRTSPClient;
+ if (proxyRTSPClient->fLastCommandWasPLAY) { // so that we send only one "PAUSE"; not one for each subsession
+ if (fParentSession->referenceCount() > 1) {
+ // There are other client(s) still streaming other subsessions of this stream.
+ // Therefore, we don't send a "PAUSE" for the whole stream, but only for the sub-stream:
+ proxyRTSPClient->sendPauseCommand(fClientMediaSubsession, NULL, proxyRTSPClient->auth());
+ } else {
+ // Normal case: There are no other client still streaming (parts of) this stream.
+ // Send a "PAUSE" for the whole stream.
+ proxyRTSPClient->sendPauseCommand(fClientMediaSubsession.parentSession(), NULL, proxyRTSPClient->auth());
+ proxyRTSPClient->fLastCommandWasPLAY = False;
+ }
+ }
+ }
+}
+
+RTPSink* ProxyServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource) {
+ if (verbosityLevel() > 0) {
+ envir() << *this << "::createNewRTPSink()\n";
+ }
+
+ // Create (and return) the appropriate "RTPSink" object for our codec:
+ // (Note: The configuration string might not be correct if a transcoder is used. FIX!) #####
+ RTPSink* newSink;
+ if (strcmp(fCodecName, "AC3") == 0 || strcmp(fCodecName, "EAC3") == 0) {
+ newSink = AC3AudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ fClientMediaSubsession.rtpTimestampFrequency());
+#if 0 // This code does not work; do *not* enable it:
+ } else if (strcmp(fCodecName, "AMR") == 0 || strcmp(fCodecName, "AMR-WB") == 0) {
+ Boolean isWideband = strcmp(fCodecName, "AMR-WB") == 0;
+ newSink = AMRAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ isWideband, fClientMediaSubsession.numChannels());
+#endif
+ } else if (strcmp(fCodecName, "DV") == 0) {
+ newSink = DVVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+ } else if (strcmp(fCodecName, "GSM") == 0) {
+ newSink = GSMAudioRTPSink::createNew(envir(), rtpGroupsock);
+ } else if (strcmp(fCodecName, "H263-1998") == 0 || strcmp(fCodecName, "H263-2000") == 0) {
+ newSink = H263plusVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ fClientMediaSubsession.rtpTimestampFrequency());
+ } else if (strcmp(fCodecName, "H264") == 0) {
+ newSink = H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ fClientMediaSubsession.fmtp_spropparametersets());
+ } else if (strcmp(fCodecName, "H265") == 0) {
+ newSink = H265VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ fClientMediaSubsession.fmtp_spropvps(),
+ fClientMediaSubsession.fmtp_spropsps(),
+ fClientMediaSubsession.fmtp_sproppps());
+ } else if (strcmp(fCodecName, "JPEG") == 0) {
+ newSink = SimpleRTPSink::createNew(envir(), rtpGroupsock, 26, 90000, "video", "JPEG",
+ 1/*numChannels*/, False/*allowMultipleFramesPerPacket*/, False/*doNormalMBitRule*/);
+ } else if (strcmp(fCodecName, "MP4A-LATM") == 0) {
+ newSink = MPEG4LATMAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ fClientMediaSubsession.rtpTimestampFrequency(),
+ fClientMediaSubsession.fmtp_config(),
+ fClientMediaSubsession.numChannels());
+ } else if (strcmp(fCodecName, "MP4V-ES") == 0) {
+ newSink = MPEG4ESVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ fClientMediaSubsession.rtpTimestampFrequency(),
+ fClientMediaSubsession.attrVal_unsigned("profile-level-id"),
+ fClientMediaSubsession.fmtp_config());
+ } else if (strcmp(fCodecName, "MPA") == 0) {
+ newSink = MPEG1or2AudioRTPSink::createNew(envir(), rtpGroupsock);
+ } else if (strcmp(fCodecName, "MPA-ROBUST") == 0) {
+ newSink = MP3ADURTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+ } else if (strcmp(fCodecName, "MPEG4-GENERIC") == 0) {
+ newSink = MPEG4GenericRTPSink::createNew(envir(), rtpGroupsock,
+ rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(),
+ fClientMediaSubsession.mediumName(),
+ fClientMediaSubsession.attrVal_str("mode"),
+ fClientMediaSubsession.fmtp_config(), fClientMediaSubsession.numChannels());
+ } else if (strcmp(fCodecName, "MPV") == 0) {
+ newSink = MPEG1or2VideoRTPSink::createNew(envir(), rtpGroupsock);
+ } else if (strcmp(fCodecName, "OPUS") == 0) {
+ newSink = SimpleRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ 48000, "audio", "OPUS", 2, False/*only 1 Opus 'packet' in each RTP packet*/);
+ } else if (strcmp(fCodecName, "T140") == 0) {
+ newSink = T140TextRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+ } else if (strcmp(fCodecName, "THEORA") == 0) {
+ newSink = TheoraVideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ fClientMediaSubsession.fmtp_config());
+ } else if (strcmp(fCodecName, "VORBIS") == 0) {
+ newSink = VorbisAudioRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic,
+ fClientMediaSubsession.rtpTimestampFrequency(), fClientMediaSubsession.numChannels(),
+ fClientMediaSubsession.fmtp_config());
+ } else if (strcmp(fCodecName, "VP8") == 0) {
+ newSink = VP8VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+ } else if (strcmp(fCodecName, "VP9") == 0) {
+ newSink = VP9VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
+ } else if (strcmp(fCodecName, "AMR") == 0 || strcmp(fCodecName, "AMR-WB") == 0) {
+ // Proxying of these codecs is currently *not* supported, because the data received by the "RTPSource" object is not in a
+ // form that can be fed directly into a corresponding "RTPSink" object.
+ if (verbosityLevel() > 0) {
+ envir() << "\treturns NULL (because we currently don't support the proxying of \""
+ << fClientMediaSubsession.mediumName() << "/" << fCodecName << "\" streams)\n";
+ }
+ return NULL;
+ } else if (strcmp(fCodecName, "QCELP") == 0 ||
+ strcmp(fCodecName, "H261") == 0 ||
+ strcmp(fCodecName, "H263-1998") == 0 || strcmp(fCodecName, "H263-2000") == 0 ||
+ strcmp(fCodecName, "X-QT") == 0 || strcmp(fCodecName, "X-QUICKTIME") == 0) {
+ // This codec requires a specialized RTP payload format; however, we don't yet have an appropriate "RTPSink" subclass for it:
+ if (verbosityLevel() > 0) {
+ envir() << "\treturns NULL (because we don't have a \"RTPSink\" subclass for this RTP payload format)\n";
+ }
+ return NULL;
+ } else {
+ // This codec is assumed to have a simple RTP payload format that can be implemented just with a "SimpleRTPSink":
+ Boolean allowMultipleFramesPerPacket = True; // by default
+ Boolean doNormalMBitRule = True; // by default
+ // Some codecs change the above default parameters:
+ if (strcmp(fCodecName, "MP2T") == 0) {
+ doNormalMBitRule = False; // no RTP 'M' bit
+ }
+ newSink = SimpleRTPSink::createNew(envir(), rtpGroupsock,
+ rtpPayloadTypeIfDynamic, fClientMediaSubsession.rtpTimestampFrequency(),
+ fClientMediaSubsession.mediumName(), fCodecName,
+ fClientMediaSubsession.numChannels(), allowMultipleFramesPerPacket, doNormalMBitRule);
+ }
+
+ // Because our relayed frames' presentation times are inaccurate until the input frames have been RTCP-synchronized,
+ // we temporarily disable RTCP "SR" reports for this "RTPSink" object:
+ newSink->enableRTCPReports() = False;
+
+ // Also tell our "PresentationTimeSubsessionNormalizer" object about the "RTPSink", so it can enable RTCP "SR" reports later:
+ PresentationTimeSubsessionNormalizer* ssNormalizer;
+ if (strcmp(fCodecName, "H264") == 0 ||
+ strcmp(fCodecName, "H265") == 0 ||
+ strcmp(fCodecName, "MP4V-ES") == 0 ||
+ strcmp(fCodecName, "MPV") == 0 ||
+ strcmp(fCodecName, "DV") == 0) {
+ // There was a separate 'framer' object in front of the "PresentationTimeSubsessionNormalizer", so go back one object to get it:
+ ssNormalizer = (PresentationTimeSubsessionNormalizer*)(((FramedFilter*)inputSource)->inputSource());
+ } else {
+ ssNormalizer = (PresentationTimeSubsessionNormalizer*)inputSource;
+ }
+ ssNormalizer->setRTPSink(newSink);
+
+ return newSink;
+}
+
+Groupsock* ProxyServerMediaSubsession::createGroupsock(struct in_addr const& addr, Port port) {
+ ProxyServerMediaSession* parentSession = (ProxyServerMediaSession*)fParentSession;
+ return parentSession->createGroupsock(addr, port);
+}
+
+RTCPInstance* ProxyServerMediaSubsession
+::createRTCP(Groupsock* RTCPgs, unsigned totSessionBW, /* in kbps */
+ unsigned char const* cname, RTPSink* sink) {
+ ProxyServerMediaSession* parentSession = (ProxyServerMediaSession*)fParentSession;
+ return parentSession->createRTCP(RTCPgs, totSessionBW, cname, sink);
+}
+
+void ProxyServerMediaSubsession::subsessionByeHandler(void* clientData) {
+ ((ProxyServerMediaSubsession*)clientData)->subsessionByeHandler();
+}
+
+void ProxyServerMediaSubsession::subsessionByeHandler() {
+ if (verbosityLevel() > 0) {
+ envir() << *this << ": received RTCP \"BYE\". (The back-end stream has ended.)\n";
+ }
+
+ // This "BYE" signals that our input source has (effectively) closed, so pass this onto the front-end clients:
+ fHaveSetupStream = False; // hack to stop "PAUSE" getting sent by:
+ if (fClientMediaSubsession.readSource() != NULL) {
+ fClientMediaSubsession.readSource()->handleClosure();
+ }
+
+ // And then treat this as if we had lost connection to the back-end server,
+ // and can reestablish streaming from it only by sending another "DESCRIBE":
+ ProxyServerMediaSession* const sms = (ProxyServerMediaSession*)fParentSession;
+ ProxyRTSPClient* const proxyRTSPClient = sms->fProxyRTSPClient;
+ proxyRTSPClient->scheduleReset();
+}
+
+
+////////// PresentationTimeSessionNormalizer and PresentationTimeSubsessionNormalizer implementations //////////
+
+// PresentationTimeSessionNormalizer:
+
+PresentationTimeSessionNormalizer::PresentationTimeSessionNormalizer(UsageEnvironment& env)
+ : Medium(env),
+ fSubsessionNormalizers(NULL), fMasterSSNormalizer(NULL) {
+}
+
+PresentationTimeSessionNormalizer::~PresentationTimeSessionNormalizer() {
+ while (fSubsessionNormalizers != NULL) {
+ Medium::close(fSubsessionNormalizers);
+ }
+}
+
+PresentationTimeSubsessionNormalizer* PresentationTimeSessionNormalizer
+::createNewPresentationTimeSubsessionNormalizer(FramedSource* inputSource, RTPSource* rtpSource,
+ char const* codecName) {
+ fSubsessionNormalizers
+ = new PresentationTimeSubsessionNormalizer(*this, inputSource, rtpSource, codecName, fSubsessionNormalizers);
+ return fSubsessionNormalizers;
+}
+
+void PresentationTimeSessionNormalizer
+::normalizePresentationTime(PresentationTimeSubsessionNormalizer* ssNormalizer,
+ struct timeval& toPT, struct timeval const& fromPT) {
+ Boolean const hasBeenSynced = ssNormalizer->fRTPSource->hasBeenSynchronizedUsingRTCP();
+
+ if (!hasBeenSynced) {
+ // If "fromPT" has not yet been RTCP-synchronized, then it was generated by our own receiving code, and thus
+ // is already aligned with 'wall-clock' time. Just copy it 'as is' to "toPT":
+ toPT = fromPT;
+ } else {
+ if (fMasterSSNormalizer == NULL) {
+ // Make "ssNormalizer" the 'master' subsession - meaning that its presentation time is adjusted to align with 'wall clock'
+ // time, and the presentation times of other subsessions (if any) are adjusted to retain their relative separation with
+ // those of the master:
+ fMasterSSNormalizer = ssNormalizer;
+
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+
+ // Compute: fPTAdjustment = timeNow - fromPT
+ fPTAdjustment.tv_sec = timeNow.tv_sec - fromPT.tv_sec;
+ fPTAdjustment.tv_usec = timeNow.tv_usec - fromPT.tv_usec;
+ // Note: It's OK if one or both of these fields underflows; the result still works out OK later.
+ }
+
+ // Compute a normalized presentation time: toPT = fromPT + fPTAdjustment
+ toPT.tv_sec = fromPT.tv_sec + fPTAdjustment.tv_sec - 1;
+ toPT.tv_usec = fromPT.tv_usec + fPTAdjustment.tv_usec + MILLION;
+ while (toPT.tv_usec > MILLION) { ++toPT.tv_sec; toPT.tv_usec -= MILLION; }
+
+ // Because "ssNormalizer"s relayed presentation times are accurate from now on, enable RTCP "SR" reports for its "RTPSink":
+ RTPSink* const rtpSink = ssNormalizer->fRTPSink;
+ if (rtpSink != NULL) { // sanity check; should always be true
+ rtpSink->enableRTCPReports() = True;
+ }
+ }
+}
+
+void PresentationTimeSessionNormalizer
+::removePresentationTimeSubsessionNormalizer(PresentationTimeSubsessionNormalizer* ssNormalizer) {
+ // Unlink "ssNormalizer" from the linked list (starting with "fSubsessionNormalizers"):
+ if (fSubsessionNormalizers == ssNormalizer) {
+ fSubsessionNormalizers = fSubsessionNormalizers->fNext;
+ } else {
+ PresentationTimeSubsessionNormalizer** ssPtrPtr = &(fSubsessionNormalizers->fNext);
+ while (*ssPtrPtr != ssNormalizer) ssPtrPtr = &((*ssPtrPtr)->fNext);
+ *ssPtrPtr = (*ssPtrPtr)->fNext;
+ }
+}
+
+// PresentationTimeSubsessionNormalizer:
+
+PresentationTimeSubsessionNormalizer
+::PresentationTimeSubsessionNormalizer(PresentationTimeSessionNormalizer& parent, FramedSource* inputSource, RTPSource* rtpSource,
+ char const* codecName, PresentationTimeSubsessionNormalizer* next)
+ : FramedFilter(parent.envir(), inputSource),
+ fParent(parent), fRTPSource(rtpSource), fRTPSink(NULL), fCodecName(codecName), fNext(next) {
+}
+
+PresentationTimeSubsessionNormalizer::~PresentationTimeSubsessionNormalizer() {
+ fParent.removePresentationTimeSubsessionNormalizer(this);
+}
+
+void PresentationTimeSubsessionNormalizer::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ ((PresentationTimeSubsessionNormalizer*)clientData)
+ ->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
+}
+
+void PresentationTimeSubsessionNormalizer::afterGettingFrame(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // This filter is implemented by passing all frames through unchanged, except that "fPresentationTime" is changed:
+ fFrameSize = frameSize;
+ fNumTruncatedBytes = numTruncatedBytes;
+ fDurationInMicroseconds = durationInMicroseconds;
+
+ fParent.normalizePresentationTime(this, fPresentationTime, presentationTime);
+
+ // Hack for JPEG/RTP proxying. Because we're proxying JPEG by just copying the raw JPEG/RTP payloads, without interpreting them,
+ // we need to also 'copy' the RTP 'M' (marker) bit from the "RTPSource" to the "RTPSink":
+ if (fRTPSource->curPacketMarkerBit() && strcmp(fCodecName, "JPEG") == 0) ((SimpleRTPSink*)fRTPSink)->setMBitOnNextPacket();
+
+ // Complete delivery:
+ FramedSource::afterGetting(this);
+}
+
+void PresentationTimeSubsessionNormalizer::doGetNextFrame() {
+ fInputSource->getNextFrame(fTo, fMaxSize, afterGettingFrame, this, FramedSource::handleClosure, this);
+}
diff --git a/liveMedia/QCELPAudioRTPSource.cpp b/liveMedia/QCELPAudioRTPSource.cpp
new file mode 100644
index 0000000..44938d8
--- /dev/null
+++ b/liveMedia/QCELPAudioRTPSource.cpp
@@ -0,0 +1,504 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Qualcomm "PureVoice" (aka. "QCELP") Audio RTP Sources
+// Implementation
+
+#include "QCELPAudioRTPSource.hh"
+#include "MultiFramedRTPSource.hh"
+#include "FramedFilter.hh"
+#include <string.h>
+#include <stdlib.h>
+
+// This source is implemented internally by two separate sources:
+// (i) a RTP source for the raw (interleaved) QCELP frames, and
+// (ii) a deinterleaving filter that reads from this.
+// Define these two new classes here:
+
+class RawQCELPRTPSource: public MultiFramedRTPSource {
+public:
+ static RawQCELPRTPSource* createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+
+ unsigned char interleaveL() const { return fInterleaveL; }
+ unsigned char interleaveN() const { return fInterleaveN; }
+ unsigned char& frameIndex() { return fFrameIndex; } // index within pkt
+
+private:
+ RawQCELPRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+ virtual ~RawQCELPRTPSource();
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+ virtual Boolean hasBeenSynchronizedUsingRTCP();
+
+private:
+ unsigned char fInterleaveL, fInterleaveN, fFrameIndex;
+ unsigned fNumSuccessiveSyncedPackets;
+};
+
+class QCELPDeinterleaver: public FramedFilter {
+public:
+ static QCELPDeinterleaver* createNew(UsageEnvironment& env,
+ RawQCELPRTPSource* inputSource);
+
+private:
+ QCELPDeinterleaver(UsageEnvironment& env,
+ RawQCELPRTPSource* inputSource);
+ // called only by "createNew()"
+
+ virtual ~QCELPDeinterleaver();
+
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize, struct timeval presentationTime);
+
+private:
+ // Redefined virtual functions:
+ void doGetNextFrame();
+ virtual void doStopGettingFrames();
+
+private:
+ class QCELPDeinterleavingBuffer* fDeinterleavingBuffer;
+ Boolean fNeedAFrame;
+};
+
+
+////////// QCELPAudioRTPSource implementation //////////
+
+FramedSource*
+QCELPAudioRTPSource::createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ RTPSource*& resultRTPSource,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ RawQCELPRTPSource* rawRTPSource;
+ resultRTPSource = rawRTPSource
+ = RawQCELPRTPSource::createNew(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+ if (resultRTPSource == NULL) return NULL;
+
+ QCELPDeinterleaver* deinterleaver
+ = QCELPDeinterleaver::createNew(env, rawRTPSource);
+ if (deinterleaver == NULL) {
+ Medium::close(resultRTPSource);
+ resultRTPSource = NULL;
+ }
+
+ return deinterleaver;
+}
+
+
+////////// QCELPBufferedPacket and QCELPBufferedPacketFactory //////////
+
+// A subclass of BufferedPacket, used to separate out QCELP frames.
+
+class QCELPBufferedPacket: public BufferedPacket {
+public:
+ QCELPBufferedPacket(RawQCELPRTPSource& ourSource);
+ virtual ~QCELPBufferedPacket();
+
+private: // redefined virtual functions
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+private:
+ RawQCELPRTPSource& fOurSource;
+};
+
+class QCELPBufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+
+///////// RawQCELPRTPSource implementation ////////
+
+RawQCELPRTPSource*
+RawQCELPRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new RawQCELPRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+RawQCELPRTPSource::RawQCELPRTPSource(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency,
+ new QCELPBufferedPacketFactory),
+ fInterleaveL(0), fInterleaveN(0), fFrameIndex(0),
+ fNumSuccessiveSyncedPackets(0) {
+}
+
+RawQCELPRTPSource::~RawQCELPRTPSource() {
+}
+
+Boolean RawQCELPRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ // First, check whether this packet's RTP timestamp is synchronized:
+ if (RTPSource::hasBeenSynchronizedUsingRTCP()) {
+ ++fNumSuccessiveSyncedPackets;
+ } else {
+ fNumSuccessiveSyncedPackets = 0;
+ }
+
+ // There's a 1-byte header indicating the interleave parameters
+ if (packetSize < 1) return False;
+
+ // Get the interleaving parameters from the 1-byte header,
+ // and check them for validity:
+ unsigned char const firstByte = headerStart[0];
+ unsigned char const interleaveL = (firstByte&0x38)>>3;
+ unsigned char const interleaveN = firstByte&0x07;
+#ifdef DEBUG
+ fprintf(stderr, "packetSize: %d, interleaveL: %d, interleaveN: %d\n", packetSize, interleaveL, interleaveN);
+#endif
+ if (interleaveL > 5 || interleaveN > interleaveL) return False; //invalid
+
+ fInterleaveL = interleaveL;
+ fInterleaveN = interleaveN;
+ fFrameIndex = 0; // initially
+
+ resultSpecialHeaderSize = 1;
+ return True;
+}
+
+char const* RawQCELPRTPSource::MIMEtype() const {
+ return "audio/QCELP";
+}
+
+Boolean RawQCELPRTPSource::hasBeenSynchronizedUsingRTCP() {
+ // Don't report ourselves as being synchronized until we've received
+ // at least a complete interleave cycle of synchronized packets.
+ // This ensures that the receiver is currently getting a frame from
+ // a packet that was synchronized.
+ if (fNumSuccessiveSyncedPackets > (unsigned)(fInterleaveL+1)) {
+ fNumSuccessiveSyncedPackets = fInterleaveL+2; // prevents overflow
+ return True;
+ }
+ return False;
+}
+
+
+///// QCELPBufferedPacket and QCELPBufferedPacketFactory implementation
+
+QCELPBufferedPacket::QCELPBufferedPacket(RawQCELPRTPSource& ourSource)
+ : fOurSource(ourSource) {
+}
+
+QCELPBufferedPacket::~QCELPBufferedPacket() {
+}
+
+unsigned QCELPBufferedPacket::
+ nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
+ // The size of the QCELP frame is determined by the first byte:
+ if (dataSize == 0) return 0; // sanity check
+ unsigned char const firstByte = framePtr[0];
+
+ unsigned frameSize;
+ switch (firstByte) {
+ case 0: { frameSize = 1; break; }
+ case 1: { frameSize = 4; break; }
+ case 2: { frameSize = 8; break; }
+ case 3: { frameSize = 17; break; }
+ case 4: { frameSize = 35; break; }
+ default: { frameSize = 0; break; }
+ }
+
+#ifdef DEBUG
+ fprintf(stderr, "QCELPBufferedPacket::nextEnclosedFrameSize(): frameSize: %d, dataSize: %d\n", frameSize, dataSize);
+#endif
+ if (dataSize < frameSize) return 0;
+
+ ++fOurSource.frameIndex();
+ return frameSize;
+}
+
+BufferedPacket* QCELPBufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* ourSource) {
+ return new QCELPBufferedPacket((RawQCELPRTPSource&)(*ourSource));
+}
+
+///////// QCELPDeinterleavingBuffer /////////
+// (used to implement QCELPDeinterleaver)
+
+#define QCELP_MAX_FRAME_SIZE 35
+#define QCELP_MAX_INTERLEAVE_L 5
+#define QCELP_MAX_FRAMES_PER_PACKET 10
+#define QCELP_MAX_INTERLEAVE_GROUP_SIZE \
+ ((QCELP_MAX_INTERLEAVE_L+1)*QCELP_MAX_FRAMES_PER_PACKET)
+
+class QCELPDeinterleavingBuffer {
+public:
+ QCELPDeinterleavingBuffer();
+ virtual ~QCELPDeinterleavingBuffer();
+
+ void deliverIncomingFrame(unsigned frameSize,
+ unsigned char interleaveL,
+ unsigned char interleaveN,
+ unsigned char frameIndex,
+ unsigned short packetSeqNum,
+ struct timeval presentationTime);
+ Boolean retrieveFrame(unsigned char* to, unsigned maxSize,
+ unsigned& resultFrameSize, unsigned& resultNumTruncatedBytes,
+ struct timeval& resultPresentationTime);
+
+ unsigned char* inputBuffer() { return fInputBuffer; }
+ unsigned inputBufferSize() const { return QCELP_MAX_FRAME_SIZE; }
+
+private:
+ class FrameDescriptor {
+ public:
+ FrameDescriptor();
+ virtual ~FrameDescriptor();
+
+ unsigned frameSize;
+ unsigned char* frameData;
+ struct timeval presentationTime;
+ };
+
+ // Use two banks of descriptors - one for incoming, one for outgoing
+ FrameDescriptor fFrames[QCELP_MAX_INTERLEAVE_GROUP_SIZE][2];
+ unsigned char fIncomingBankId; // toggles between 0 and 1
+ unsigned char fIncomingBinMax; // in the incoming bank
+ unsigned char fOutgoingBinMax; // in the outgoing bank
+ unsigned char fNextOutgoingBin;
+ Boolean fHaveSeenPackets;
+ u_int16_t fLastPacketSeqNumForGroup;
+ unsigned char* fInputBuffer;
+ struct timeval fLastRetrievedPresentationTime;
+};
+
+
+////////// QCELPDeinterleaver implementation /////////
+
+QCELPDeinterleaver*
+QCELPDeinterleaver::createNew(UsageEnvironment& env,
+ RawQCELPRTPSource* inputSource) {
+ return new QCELPDeinterleaver(env, inputSource);
+}
+
+QCELPDeinterleaver::QCELPDeinterleaver(UsageEnvironment& env,
+ RawQCELPRTPSource* inputSource)
+ : FramedFilter(env, inputSource),
+ fNeedAFrame(False) {
+ fDeinterleavingBuffer = new QCELPDeinterleavingBuffer();
+}
+
+QCELPDeinterleaver::~QCELPDeinterleaver() {
+ delete fDeinterleavingBuffer;
+}
+
+static unsigned const uSecsPerFrame = 20000; // 20 ms
+
+void QCELPDeinterleaver::doGetNextFrame() {
+ // First, try getting a frame from the deinterleaving buffer:
+ if (fDeinterleavingBuffer->retrieveFrame(fTo, fMaxSize,
+ fFrameSize, fNumTruncatedBytes,
+ fPresentationTime)) {
+ // Success!
+ fNeedAFrame = False;
+
+ fDurationInMicroseconds = uSecsPerFrame;
+
+ // Call our own 'after getting' function. Because we're not a 'leaf'
+ // source, we can call this directly, without risking
+ // infinite recursion
+ afterGetting(this);
+ return;
+ }
+
+ // No luck, so ask our source for help:
+ fNeedAFrame = True;
+ if (!fInputSource->isCurrentlyAwaitingData()) {
+ fInputSource->getNextFrame(fDeinterleavingBuffer->inputBuffer(),
+ fDeinterleavingBuffer->inputBufferSize(),
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+ }
+}
+
+void QCELPDeinterleaver::doStopGettingFrames() {
+ fNeedAFrame = False;
+ fInputSource->stopGettingFrames();
+}
+
+void QCELPDeinterleaver
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned /*numTruncatedBytes*/,
+ struct timeval presentationTime,
+ unsigned /*durationInMicroseconds*/) {
+ QCELPDeinterleaver* deinterleaver = (QCELPDeinterleaver*)clientData;
+ deinterleaver->afterGettingFrame1(frameSize, presentationTime);
+}
+
+void QCELPDeinterleaver
+::afterGettingFrame1(unsigned frameSize, struct timeval presentationTime) {
+ RawQCELPRTPSource* source = (RawQCELPRTPSource*)fInputSource;
+
+ // First, put the frame into our deinterleaving buffer:
+ fDeinterleavingBuffer
+ ->deliverIncomingFrame(frameSize, source->interleaveL(),
+ source->interleaveN(), source->frameIndex(),
+ source->curPacketRTPSeqNum(),
+ presentationTime);
+
+ // Then, try delivering a frame to the client (if he wants one):
+ if (fNeedAFrame) doGetNextFrame();
+}
+
+
+////////// QCELPDeinterleavingBuffer implementation /////////
+
+QCELPDeinterleavingBuffer::QCELPDeinterleavingBuffer()
+ : fIncomingBankId(0), fIncomingBinMax(0),
+ fOutgoingBinMax(0), fNextOutgoingBin(0),
+ fHaveSeenPackets(False) {
+ fInputBuffer = new unsigned char[QCELP_MAX_FRAME_SIZE];
+}
+
+QCELPDeinterleavingBuffer::~QCELPDeinterleavingBuffer() {
+ delete[] fInputBuffer;
+}
+
+void QCELPDeinterleavingBuffer
+::deliverIncomingFrame(unsigned frameSize,
+ unsigned char interleaveL,
+ unsigned char interleaveN,
+ unsigned char frameIndex,
+ unsigned short packetSeqNum,
+ struct timeval presentationTime) {
+ // First perform a sanity check on the parameters:
+ // (This is overkill, as the source should have already done this.)
+ if (frameSize > QCELP_MAX_FRAME_SIZE
+ || interleaveL > QCELP_MAX_INTERLEAVE_L || interleaveN > interleaveL
+ || frameIndex == 0 || frameIndex > QCELP_MAX_FRAMES_PER_PACKET) {
+#ifdef DEBUG
+ fprintf(stderr, "QCELPDeinterleavingBuffer::deliverIncomingFrame() param sanity check failed (%d,%d,%d,%d)\n", frameSize, interleaveL, interleaveN, frameIndex);
+#endif
+ return;
+ }
+
+ // The input "presentationTime" was that of the first frame in this
+ // packet. Update it for the current frame:
+ unsigned uSecIncrement = (frameIndex-1)*(interleaveL+1)*uSecsPerFrame;
+ presentationTime.tv_usec += uSecIncrement;
+ presentationTime.tv_sec += presentationTime.tv_usec/1000000;
+ presentationTime.tv_usec = presentationTime.tv_usec%1000000;
+
+ // Next, check whether this packet is part of a new interleave group
+ if (!fHaveSeenPackets
+ || seqNumLT(fLastPacketSeqNumForGroup, packetSeqNum)) {
+ // We've moved to a new interleave group
+ fHaveSeenPackets = True;
+ fLastPacketSeqNumForGroup = packetSeqNum + interleaveL - interleaveN;
+
+ // Switch the incoming and outgoing banks:
+ fIncomingBankId ^= 1;
+ unsigned char tmp = fIncomingBinMax;
+ fIncomingBinMax = fOutgoingBinMax;
+ fOutgoingBinMax = tmp;
+ fNextOutgoingBin = 0;
+ }
+
+ // Now move the incoming frame into the appropriate bin:
+ unsigned const binNumber
+ = interleaveN + (frameIndex-1)*(interleaveL+1);
+ FrameDescriptor& inBin = fFrames[binNumber][fIncomingBankId];
+ unsigned char* curBuffer = inBin.frameData;
+ inBin.frameData = fInputBuffer;
+ inBin.frameSize = frameSize;
+ inBin.presentationTime = presentationTime;
+
+ if (curBuffer == NULL) curBuffer = new unsigned char[QCELP_MAX_FRAME_SIZE];
+ fInputBuffer = curBuffer;
+
+ if (binNumber >= fIncomingBinMax) {
+ fIncomingBinMax = binNumber + 1;
+ }
+}
+
+Boolean QCELPDeinterleavingBuffer
+::retrieveFrame(unsigned char* to, unsigned maxSize,
+ unsigned& resultFrameSize, unsigned& resultNumTruncatedBytes,
+ struct timeval& resultPresentationTime) {
+ if (fNextOutgoingBin >= fOutgoingBinMax) return False; // none left
+
+ FrameDescriptor& outBin = fFrames[fNextOutgoingBin][fIncomingBankId^1];
+ unsigned char* fromPtr;
+ unsigned char fromSize = outBin.frameSize;
+ outBin.frameSize = 0; // for the next time this bin is used
+
+ // Check whether this frame is missing; if so, return an 'erasure' frame:
+ unsigned char erasure = 14;
+ if (fromSize == 0) {
+ fromPtr = &erasure;
+ fromSize = 1;
+
+ // Compute this erasure frame's presentation time via extrapolation:
+ resultPresentationTime = fLastRetrievedPresentationTime;
+ resultPresentationTime.tv_usec += uSecsPerFrame;
+ if (resultPresentationTime.tv_usec >= 1000000) {
+ ++resultPresentationTime.tv_sec;
+ resultPresentationTime.tv_usec -= 1000000;
+ }
+ } else {
+ // Normal case - a frame exists:
+ fromPtr = outBin.frameData;
+ resultPresentationTime = outBin.presentationTime;
+ }
+
+ fLastRetrievedPresentationTime = resultPresentationTime;
+
+ if (fromSize > maxSize) {
+ resultNumTruncatedBytes = fromSize - maxSize;
+ resultFrameSize = maxSize;
+ } else {
+ resultNumTruncatedBytes = 0;
+ resultFrameSize = fromSize;
+ }
+ memmove(to, fromPtr, resultFrameSize);
+
+ ++fNextOutgoingBin;
+ return True;
+}
+
+QCELPDeinterleavingBuffer::FrameDescriptor::FrameDescriptor()
+ : frameSize(0), frameData(NULL) {
+}
+
+QCELPDeinterleavingBuffer::FrameDescriptor::~FrameDescriptor() {
+ delete[] frameData;
+}
diff --git a/liveMedia/QuickTimeFileSink.cpp b/liveMedia/QuickTimeFileSink.cpp
new file mode 100644
index 0000000..62c6583
--- /dev/null
+++ b/liveMedia/QuickTimeFileSink.cpp
@@ -0,0 +1,2339 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A sink that generates a QuickTime file from a composite media session
+// Implementation
+
+#include "QuickTimeFileSink.hh"
+#include "QuickTimeGenericRTPSource.hh"
+#include "GroupsockHelper.hh"
+#include "InputFile.hh"
+#include "OutputFile.hh"
+#include "H263plusVideoRTPSource.hh" // for the special header
+#include "MPEG4GenericRTPSource.hh" //for "samplingFrequencyFromAudioSpecificConfig()"
+#include "MPEG4LATMAudioRTPSource.hh" // for "parseGeneralConfigStr()"
+#include "Base64.hh"
+
+#include <ctype.h>
+
+#define fourChar(x,y,z,w) ( ((x)<<24)|((y)<<16)|((z)<<8)|(w) )
+
+#define H264_IDR_FRAME 0x65 //bit 8 == 0, bits 7-6 (ref) == 3, bits 5-0 (type) == 5
+
+////////// SubsessionIOState, ChunkDescriptor ///////////
+// A structure used to represent the I/O state of each input 'subsession':
+
+class ChunkDescriptor {
+public:
+ ChunkDescriptor(int64_t offsetInFile, unsigned size,
+ unsigned frameSize, unsigned frameDuration,
+ struct timeval presentationTime);
+
+ ChunkDescriptor* extendChunk(int64_t newOffsetInFile, unsigned newSize,
+ unsigned newFrameSize,
+ unsigned newFrameDuration,
+ struct timeval newPresentationTime);
+ // this may end up allocating a new chunk instead
+public:
+ ChunkDescriptor* fNextChunk;
+ int64_t fOffsetInFile;
+ unsigned fNumFrames;
+ unsigned fFrameSize;
+ unsigned fFrameDuration;
+ struct timeval fPresentationTime; // of the start of the data
+};
+
+class SubsessionBuffer {
+public:
+ SubsessionBuffer(unsigned bufferSize)
+ : fBufferSize(bufferSize) {
+ reset();
+ fData = new unsigned char[bufferSize];
+ }
+ virtual ~SubsessionBuffer() { delete[] fData; }
+ void reset() { fBytesInUse = 0; }
+ void addBytes(unsigned numBytes) { fBytesInUse += numBytes; }
+
+ unsigned char* dataStart() { return &fData[0]; }
+ unsigned char* dataEnd() { return &fData[fBytesInUse]; }
+ unsigned bytesInUse() const { return fBytesInUse; }
+ unsigned bytesAvailable() const { return fBufferSize - fBytesInUse; }
+
+ void setPresentationTime(struct timeval const& presentationTime) {
+ fPresentationTime = presentationTime;
+ }
+ struct timeval const& presentationTime() const {return fPresentationTime;}
+
+private:
+ unsigned fBufferSize;
+ struct timeval fPresentationTime;
+ unsigned char* fData;
+ unsigned fBytesInUse;
+};
+
+class SyncFrame {
+public:
+ SyncFrame(unsigned frameNum);
+
+public:
+ class SyncFrame *nextSyncFrame;
+ unsigned sfFrameNum;
+};
+
+// A 64-bit counter, used below:
+class Count64 {
+public:
+ Count64()
+ : hi(0), lo(0) {
+ }
+
+ void operator+=(unsigned arg);
+
+ u_int32_t hi, lo;
+};
+
+class SubsessionIOState {
+public:
+ SubsessionIOState(QuickTimeFileSink& sink, MediaSubsession& subsession);
+ virtual ~SubsessionIOState();
+
+ Boolean setQTstate();
+ void setFinalQTstate();
+
+ void afterGettingFrame(unsigned packetDataSize,
+ struct timeval presentationTime);
+ void onSourceClosure();
+
+ Boolean syncOK(struct timeval presentationTime);
+ // returns true iff data is usable despite a sync check
+
+ static void setHintTrack(SubsessionIOState* hintedTrack,
+ SubsessionIOState* hintTrack);
+ Boolean isHintTrack() const { return fTrackHintedByUs != NULL; }
+ Boolean hasHintTrack() const { return fHintTrackForUs != NULL; }
+
+ UsageEnvironment& envir() const { return fOurSink.envir(); }
+
+public:
+ static unsigned fCurrentTrackNumber;
+ unsigned fTrackID;
+ SubsessionIOState* fHintTrackForUs; SubsessionIOState* fTrackHintedByUs;
+
+ SubsessionBuffer *fBuffer, *fPrevBuffer;
+ QuickTimeFileSink& fOurSink;
+ MediaSubsession& fOurSubsession;
+
+ unsigned short fLastPacketRTPSeqNum;
+ Boolean fOurSourceIsActive;
+
+ Boolean fHaveBeenSynced; // used in synchronizing with other streams
+ struct timeval fSyncTime;
+
+ Boolean fQTEnableTrack;
+ unsigned fQTcomponentSubtype;
+ char const* fQTcomponentName;
+ typedef unsigned (QuickTimeFileSink::*atomCreationFunc)();
+ atomCreationFunc fQTMediaInformationAtomCreator;
+ atomCreationFunc fQTMediaDataAtomCreator;
+ char const* fQTAudioDataType;
+ unsigned short fQTSoundSampleVersion;
+ unsigned fQTTimeScale;
+ unsigned fQTTimeUnitsPerSample;
+ unsigned fQTBytesPerFrame;
+ unsigned fQTSamplesPerFrame;
+ // These next fields are derived from the ones above,
+ // plus the information from each chunk:
+ unsigned fQTTotNumSamples;
+ unsigned fQTDurationM; // in media time units
+ unsigned fQTDurationT; // in track time units
+ int64_t fTKHD_durationPosn;
+ // position of the duration in the output 'tkhd' atom
+ unsigned fQTInitialOffsetDuration;
+ // if there's a pause at the beginning
+
+ ChunkDescriptor *fHeadChunk, *fTailChunk;
+ unsigned fNumChunks;
+ SyncFrame *fHeadSyncFrame, *fTailSyncFrame;
+
+ // Counters to be used in the hint track's 'udta'/'hinf' atom;
+ struct hinf {
+ Count64 trpy;
+ Count64 nump;
+ Count64 tpyl;
+ // Is 'maxr' needed? Computing this would be a PITA. #####
+ Count64 dmed;
+ Count64 dimm;
+ // 'drep' is always 0
+ // 'tmin' and 'tmax' are always 0
+ unsigned pmax;
+ unsigned dmax;
+ } fHINF;
+
+private:
+ void useFrame(SubsessionBuffer& buffer);
+ void useFrameForHinting(unsigned frameSize,
+ struct timeval presentationTime,
+ unsigned startSampleNumber);
+
+ // used by the above two routines:
+ unsigned useFrame1(unsigned sourceDataSize,
+ struct timeval presentationTime,
+ unsigned frameDuration, int64_t destFileOffset);
+ // returns the number of samples in this data
+
+private:
+ // A structure used for temporarily storing frame state:
+ struct {
+ unsigned frameSize;
+ struct timeval presentationTime;
+ int64_t destFileOffset; // used for non-hint tracks only
+
+ // The remaining fields are used for hint tracks only:
+ unsigned startSampleNumber;
+ unsigned short seqNum;
+ unsigned rtpHeader;
+ unsigned char numSpecialHeaders; // used when our RTP source has special headers
+ unsigned specialHeaderBytesLength; // ditto
+ unsigned char specialHeaderBytes[SPECIAL_HEADER_BUFFER_SIZE]; // ditto
+ unsigned packetSizes[256];
+ } fPrevFrameState;
+};
+
+
+////////// QuickTimeFileSink implementation //////////
+
+QuickTimeFileSink::QuickTimeFileSink(UsageEnvironment& env,
+ MediaSession& inputSession,
+ char const* outputFileName,
+ unsigned bufferSize,
+ unsigned short movieWidth,
+ unsigned short movieHeight,
+ unsigned movieFPS,
+ Boolean packetLossCompensate,
+ Boolean syncStreams,
+ Boolean generateHintTracks,
+ Boolean generateMP4Format)
+ : Medium(env), fInputSession(inputSession),
+ fBufferSize(bufferSize), fPacketLossCompensate(packetLossCompensate),
+ fSyncStreams(syncStreams), fGenerateMP4Format(generateMP4Format),
+ fAreCurrentlyBeingPlayed(False),
+ fLargestRTPtimestampFrequency(0),
+ fNumSubsessions(0), fNumSyncedSubsessions(0),
+ fHaveCompletedOutputFile(False),
+ fMovieWidth(movieWidth), fMovieHeight(movieHeight),
+ fMovieFPS(movieFPS), fMaxTrackDurationM(0) {
+ fOutFid = OpenOutputFile(env, outputFileName);
+ if (fOutFid == NULL) return;
+
+ fNewestSyncTime.tv_sec = fNewestSyncTime.tv_usec = 0;
+ fFirstDataTime.tv_sec = fFirstDataTime.tv_usec = (unsigned)(~0);
+
+ // Set up I/O state for each input subsession:
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ // Ignore subsessions without a data source:
+ FramedSource* subsessionSource = subsession->readSource();
+ if (subsessionSource == NULL) continue;
+
+ // If "subsession's" SDP description specified screen dimension
+ // or frame rate parameters, then use these. (Note that this must
+ // be done before the call to "setQTState()" below.)
+ if (subsession->videoWidth() != 0) {
+ fMovieWidth = subsession->videoWidth();
+ }
+ if (subsession->videoHeight() != 0) {
+ fMovieHeight = subsession->videoHeight();
+ }
+ if (subsession->videoFPS() != 0) {
+ fMovieFPS = subsession->videoFPS();
+ }
+
+ SubsessionIOState* ioState
+ = new SubsessionIOState(*this, *subsession);
+ if (ioState == NULL || !ioState->setQTstate()) {
+ // We're not able to output a QuickTime track for this subsession
+ delete ioState; ioState = NULL;
+ continue;
+ }
+ subsession->miscPtr = (void*)ioState;
+
+ if (generateHintTracks) {
+ // Also create a hint track for this track:
+ SubsessionIOState* hintTrack
+ = new SubsessionIOState(*this, *subsession);
+ SubsessionIOState::setHintTrack(ioState, hintTrack);
+ if (!hintTrack->setQTstate()) {
+ delete hintTrack;
+ SubsessionIOState::setHintTrack(ioState, NULL);
+ }
+ }
+
+ // Also set a 'BYE' handler for this subsession's RTCP instance:
+ if (subsession->rtcpInstance() != NULL) {
+ subsession->rtcpInstance()->setByeHandler(onRTCPBye, ioState);
+ }
+
+ unsigned rtpTimestampFrequency = subsession->rtpTimestampFrequency();
+ if (rtpTimestampFrequency > fLargestRTPtimestampFrequency) {
+ fLargestRTPtimestampFrequency = rtpTimestampFrequency;
+ }
+
+ ++fNumSubsessions;
+ }
+
+ // Use the current time as the file's creation and modification
+ // time. Use Apple's time format: seconds (UTC) since January 1, 1904
+
+ gettimeofday(&fStartTime, NULL);
+ fAppleCreationTime = fStartTime.tv_sec - 0x83da4f80;
+
+ // Begin by writing a "mdat" atom at the start of the file.
+ // (Later, when we've finished copying data to the file, we'll come
+ // back and fill in its size.)
+ fMDATposition = TellFile64(fOutFid);
+ addAtomHeader64("mdat");
+ // add 64Bit offset
+ fMDATposition += 8;
+}
+
+QuickTimeFileSink::~QuickTimeFileSink() {
+ completeOutputFile();
+
+ // Then, stop streaming and delete each active "SubsessionIOState":
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ if (subsession->readSource() != NULL) subsession->readSource()->stopGettingFrames();
+
+ SubsessionIOState* ioState
+ = (SubsessionIOState*)(subsession->miscPtr);
+ if (ioState == NULL) continue;
+
+ delete ioState->fHintTrackForUs; // if any
+ delete ioState;
+ }
+
+ // Finally, close our output file:
+ CloseOutputFile(fOutFid);
+}
+
+QuickTimeFileSink*
+QuickTimeFileSink::createNew(UsageEnvironment& env,
+ MediaSession& inputSession,
+ char const* outputFileName,
+ unsigned bufferSize,
+ unsigned short movieWidth,
+ unsigned short movieHeight,
+ unsigned movieFPS,
+ Boolean packetLossCompensate,
+ Boolean syncStreams,
+ Boolean generateHintTracks,
+ Boolean generateMP4Format) {
+ QuickTimeFileSink* newSink =
+ new QuickTimeFileSink(env, inputSession, outputFileName, bufferSize, movieWidth, movieHeight, movieFPS,
+ packetLossCompensate, syncStreams, generateHintTracks, generateMP4Format);
+ if (newSink == NULL || newSink->fOutFid == NULL) {
+ Medium::close(newSink);
+ return NULL;
+ }
+
+ return newSink;
+}
+
+void QuickTimeFileSink
+::noteRecordedFrame(MediaSubsession& /*inputSubsession*/,
+ unsigned /*packetDataSize*/, struct timeval const& /*presentationTime*/) {
+ // Default implementation: Do nothing
+}
+
+Boolean QuickTimeFileSink::startPlaying(afterPlayingFunc* afterFunc,
+ void* afterClientData) {
+ // Make sure we're not already being played:
+ if (fAreCurrentlyBeingPlayed) {
+ envir().setResultMsg("This sink has already been played");
+ return False;
+ }
+
+ fAreCurrentlyBeingPlayed = True;
+ fAfterFunc = afterFunc;
+ fAfterClientData = afterClientData;
+
+ return continuePlaying();
+}
+
+Boolean QuickTimeFileSink::continuePlaying() {
+ // Run through each of our input session's 'subsessions',
+ // asking for a frame from each one:
+ Boolean haveActiveSubsessions = False;
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ FramedSource* subsessionSource = subsession->readSource();
+ if (subsessionSource == NULL) continue;
+
+ if (subsessionSource->isCurrentlyAwaitingData()) continue;
+
+ SubsessionIOState* ioState
+ = (SubsessionIOState*)(subsession->miscPtr);
+ if (ioState == NULL) continue;
+
+ haveActiveSubsessions = True;
+ unsigned char* toPtr = ioState->fBuffer->dataEnd();
+ unsigned toSize = ioState->fBuffer->bytesAvailable();
+ subsessionSource->getNextFrame(toPtr, toSize,
+ afterGettingFrame, ioState,
+ onSourceClosure, ioState);
+ }
+ if (!haveActiveSubsessions) {
+ envir().setResultMsg("No subsessions are currently active");
+ return False;
+ }
+
+ return True;
+}
+
+void QuickTimeFileSink
+::afterGettingFrame(void* clientData, unsigned packetDataSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned /*durationInMicroseconds*/) {
+ SubsessionIOState* ioState = (SubsessionIOState*)clientData;
+ if (!ioState->syncOK(presentationTime)) {
+ // Ignore this data:
+ ioState->fOurSink.continuePlaying();
+ return;
+ }
+ if (numTruncatedBytes > 0) {
+ ioState->envir() << "QuickTimeFileSink::afterGettingFrame(): The input frame data was too large for our buffer. "
+ << numTruncatedBytes
+ << " bytes of trailing data was dropped! Correct this by increasing the \"bufferSize\" parameter in the \"createNew()\" call.\n";
+ }
+ ioState->afterGettingFrame(packetDataSize, presentationTime);
+}
+
+void QuickTimeFileSink::onSourceClosure(void* clientData) {
+ SubsessionIOState* ioState = (SubsessionIOState*)clientData;
+ ioState->onSourceClosure();
+}
+
+void QuickTimeFileSink::onSourceClosure1() {
+ // Check whether *all* of the subsession sources have closed.
+ // If not, do nothing for now:
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ SubsessionIOState* ioState
+ = (SubsessionIOState*)(subsession->miscPtr);
+ if (ioState == NULL) continue;
+
+ if (ioState->fOurSourceIsActive) return; // this source hasn't closed
+ }
+
+ completeOutputFile();
+
+ // Call our specified 'after' function:
+ if (fAfterFunc != NULL) {
+ (*fAfterFunc)(fAfterClientData);
+ }
+}
+
+void QuickTimeFileSink::onRTCPBye(void* clientData) {
+ SubsessionIOState* ioState = (SubsessionIOState*)clientData;
+
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ unsigned secsDiff
+ = timeNow.tv_sec - ioState->fOurSink.fStartTime.tv_sec;
+
+ MediaSubsession& subsession = ioState->fOurSubsession;
+ ioState->envir() << "Received RTCP \"BYE\" on \""
+ << subsession.mediumName()
+ << "/" << subsession.codecName()
+ << "\" subsession (after "
+ << secsDiff << " seconds)\n";
+
+ // Handle the reception of a RTCP "BYE" as if the source had closed:
+ ioState->onSourceClosure();
+}
+
+static Boolean timevalGE(struct timeval const& tv1,
+ struct timeval const& tv2) {
+ return (unsigned)tv1.tv_sec > (unsigned)tv2.tv_sec
+ || (tv1.tv_sec == tv2.tv_sec
+ && (unsigned)tv1.tv_usec >= (unsigned)tv2.tv_usec);
+}
+
+void QuickTimeFileSink::completeOutputFile() {
+ if (fHaveCompletedOutputFile || fOutFid == NULL) return;
+
+ // Begin by filling in the initial "mdat" atom with the current
+ // file size:
+ int64_t curFileSize = TellFile64(fOutFid);
+ setWord64(fMDATposition, (u_int64_t)curFileSize);
+
+ // Then, note the time of the first received data:
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ SubsessionIOState* ioState
+ = (SubsessionIOState*)(subsession->miscPtr);
+ if (ioState == NULL) continue;
+
+ ChunkDescriptor* const headChunk = ioState->fHeadChunk;
+ if (headChunk != NULL
+ && timevalGE(fFirstDataTime, headChunk->fPresentationTime)) {
+ fFirstDataTime = headChunk->fPresentationTime;
+ }
+ }
+
+ // Then, update the QuickTime-specific state for each active track:
+ iter.reset();
+ while ((subsession = iter.next()) != NULL) {
+ SubsessionIOState* ioState
+ = (SubsessionIOState*)(subsession->miscPtr);
+ if (ioState == NULL) continue;
+
+ ioState->setFinalQTstate();
+ // Do the same for a hint track (if any):
+ if (ioState->hasHintTrack()) {
+ ioState->fHintTrackForUs->setFinalQTstate();
+ }
+ }
+
+ if (fGenerateMP4Format) {
+ // Begin with a "ftyp" atom:
+ addAtom_ftyp();
+ }
+
+ // Then, add a "moov" atom for the file metadata:
+ addAtom_moov();
+
+ // We're done:
+ fHaveCompletedOutputFile = True;
+}
+
+
+////////// SubsessionIOState, ChunkDescriptor implementation ///////////
+
+unsigned SubsessionIOState::fCurrentTrackNumber = 0;
+
+SubsessionIOState::SubsessionIOState(QuickTimeFileSink& sink,
+ MediaSubsession& subsession)
+ : fHintTrackForUs(NULL), fTrackHintedByUs(NULL),
+ fOurSink(sink), fOurSubsession(subsession),
+ fLastPacketRTPSeqNum(0), fHaveBeenSynced(False), fQTTotNumSamples(0),
+ fHeadChunk(NULL), fTailChunk(NULL), fNumChunks(0),
+ fHeadSyncFrame(NULL), fTailSyncFrame(NULL) {
+ fTrackID = ++fCurrentTrackNumber;
+
+ fBuffer = new SubsessionBuffer(fOurSink.fBufferSize);
+ fPrevBuffer = sink.fPacketLossCompensate
+ ? new SubsessionBuffer(fOurSink.fBufferSize) : NULL;
+
+ FramedSource* subsessionSource = subsession.readSource();
+ fOurSourceIsActive = subsessionSource != NULL;
+
+ fPrevFrameState.presentationTime.tv_sec = 0;
+ fPrevFrameState.presentationTime.tv_usec = 0;
+ fPrevFrameState.seqNum = 0;
+}
+
+SubsessionIOState::~SubsessionIOState() {
+ delete fBuffer; delete fPrevBuffer;
+
+ // Delete the list of chunk descriptors:
+ ChunkDescriptor* chunk = fHeadChunk;
+ while (chunk != NULL) {
+ ChunkDescriptor* next = chunk->fNextChunk;
+ delete chunk;
+ chunk = next;
+ }
+
+ // Delete the list of sync frames:
+ SyncFrame* syncFrame = fHeadSyncFrame;
+ while (syncFrame != NULL) {
+ SyncFrame* next = syncFrame->nextSyncFrame;
+ delete syncFrame;
+ syncFrame = next;
+ }
+}
+
+Boolean SubsessionIOState::setQTstate() {
+ char const* noCodecWarning1 = "Warning: We don't implement a QuickTime ";
+ char const* noCodecWarning2 = " Media Data Type for the \"";
+ char const* noCodecWarning3 = "\" track, so we'll insert a dummy \"????\" Media Data Atom instead. A separate, codec-specific editing pass will be needed before this track can be played.\n";
+
+ do {
+ fQTEnableTrack = True; // enable this track in the movie by default
+ fQTTimeScale = fOurSubsession.rtpTimestampFrequency(); // by default
+ fQTTimeUnitsPerSample = 1; // by default
+ fQTBytesPerFrame = 0;
+ // by default - indicates that the whole packet data is a frame
+ fQTSamplesPerFrame = 1; // by default
+
+ // Make sure our subsession's medium is one that we know how to
+ // represent in a QuickTime file:
+ if (isHintTrack()) {
+ // Hint tracks are treated specially
+ fQTEnableTrack = False; // hint tracks are marked as inactive
+ fQTcomponentSubtype = fourChar('h','i','n','t');
+ fQTcomponentName = "hint media handler";
+ fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_gmhd;
+ fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_rtp;
+ } else if (strcmp(fOurSubsession.mediumName(), "audio") == 0) {
+ fQTcomponentSubtype = fourChar('s','o','u','n');
+ fQTcomponentName = "Apple Sound Media Handler";
+ fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_smhd;
+ fQTMediaDataAtomCreator
+ = &QuickTimeFileSink::addAtom_soundMediaGeneral; // by default
+ fQTSoundSampleVersion = 0; // by default
+
+ // Make sure that our subsession's codec is one that we can handle:
+ if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 ||
+ strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) {
+ fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia;
+ } else if (strcmp(fOurSubsession.codecName(), "PCMU") == 0) {
+ fQTAudioDataType = "ulaw";
+ fQTBytesPerFrame = 1;
+ } else if (strcmp(fOurSubsession.codecName(), "GSM") == 0) {
+ fQTAudioDataType = "agsm";
+ fQTBytesPerFrame = 33;
+ fQTSamplesPerFrame = 160;
+ } else if (strcmp(fOurSubsession.codecName(), "PCMA") == 0) {
+ fQTAudioDataType = "alaw";
+ fQTBytesPerFrame = 1;
+ } else if (strcmp(fOurSubsession.codecName(), "QCELP") == 0) {
+ fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_Qclp;
+ fQTSamplesPerFrame = 160;
+ } else if (strcmp(fOurSubsession.codecName(), "MPEG4-GENERIC") == 0 ||
+ strcmp(fOurSubsession.codecName(), "MP4A-LATM") == 0) {
+ fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4a;
+ fQTTimeUnitsPerSample = 1024; // QT considers each frame to be a 'sample'
+ // The time scale (frequency) comes from the 'config' information.
+ // It might be different from the RTP timestamp frequency (e.g., aacPlus).
+ unsigned frequencyFromConfig
+ = samplingFrequencyFromAudioSpecificConfig(fOurSubsession.fmtp_config());
+ if (frequencyFromConfig != 0) fQTTimeScale = frequencyFromConfig;
+ } else {
+ envir() << noCodecWarning1 << "Audio" << noCodecWarning2
+ << fOurSubsession.codecName() << noCodecWarning3;
+ fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy;
+ fQTEnableTrack = False; // disable this track in the movie
+ }
+ } else if (strcmp(fOurSubsession.mediumName(), "video") == 0) {
+ fQTcomponentSubtype = fourChar('v','i','d','e');
+ fQTcomponentName = "Apple Video Media Handler";
+ fQTMediaInformationAtomCreator = &QuickTimeFileSink::addAtom_vmhd;
+
+ // Make sure that our subsession's codec is one that we can handle:
+ if (strcmp(fOurSubsession.codecName(), "X-QT") == 0 ||
+ strcmp(fOurSubsession.codecName(), "X-QUICKTIME") == 0) {
+ fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_genericMedia;
+ } else if (strcmp(fOurSubsession.codecName(), "H263-1998") == 0 ||
+ strcmp(fOurSubsession.codecName(), "H263-2000") == 0) {
+ fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_h263;
+ fQTTimeScale = 600;
+ fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
+ } else if (strcmp(fOurSubsession.codecName(), "H264") == 0) {
+ fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_avc1;
+ fQTTimeScale = 600;
+ fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
+ } else if (strcmp(fOurSubsession.codecName(), "MP4V-ES") == 0) {
+ fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_mp4v;
+ fQTTimeScale = 600;
+ fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
+ } else {
+ envir() << noCodecWarning1 << "Video" << noCodecWarning2
+ << fOurSubsession.codecName() << noCodecWarning3;
+ fQTMediaDataAtomCreator = &QuickTimeFileSink::addAtom_dummy;
+ fQTEnableTrack = False; // disable this track in the movie
+ }
+ } else {
+ envir() << "Warning: We don't implement a QuickTime Media Handler for media type \""
+ << fOurSubsession.mediumName() << "\"";
+ break;
+ }
+
+#ifdef QT_SUPPORT_PARTIALLY_ONLY
+ envir() << "Warning: We don't have sufficient codec-specific information (e.g., sample sizes) to fully generate the \""
+ << fOurSubsession.mediumName() << "/" << fOurSubsession.codecName()
+ << "\" track, so we'll disable this track in the movie. A separate, codec-specific editing pass will be needed before this track can be played\n";
+ fQTEnableTrack = False; // disable this track in the movie
+#endif
+
+ return True;
+ } while (0);
+
+ envir() << ", so a track for the \"" << fOurSubsession.mediumName()
+ << "/" << fOurSubsession.codecName()
+ << "\" subsession will not be included in the output QuickTime file\n";
+ return False;
+}
+
+void SubsessionIOState::setFinalQTstate() {
+ // Compute derived parameters, by running through the list of chunks:
+ fQTDurationT = 0;
+
+ ChunkDescriptor* chunk = fHeadChunk;
+ while (chunk != NULL) {
+ unsigned const numFrames = chunk->fNumFrames;
+ unsigned const dur = numFrames*chunk->fFrameDuration;
+ fQTDurationT += dur;
+
+ chunk = chunk->fNextChunk;
+ }
+
+ // Convert this duration from track to movie time scale:
+ double scaleFactor = fOurSink.movieTimeScale()/(double)fQTTimeScale;
+ fQTDurationM = (unsigned)(fQTDurationT*scaleFactor);
+
+ if (fQTDurationM > fOurSink.fMaxTrackDurationM) {
+ fOurSink.fMaxTrackDurationM = fQTDurationM;
+ }
+}
+
+void SubsessionIOState::afterGettingFrame(unsigned packetDataSize,
+ struct timeval presentationTime) {
+ // Begin by checking whether there was a gap in the RTP stream.
+ // If so, try to compensate for this (if desired):
+ if (fOurSubsession.rtpSource() != NULL) { // we have a RTP stream
+ unsigned short rtpSeqNum
+ = fOurSubsession.rtpSource()->curPacketRTPSeqNum();
+ if (fOurSink.fPacketLossCompensate && fPrevBuffer->bytesInUse() > 0) {
+ short seqNumGap = rtpSeqNum - fLastPacketRTPSeqNum;
+ for (short i = 1; i < seqNumGap; ++i) {
+ // Insert a copy of the previous frame, to compensate for the loss:
+ useFrame(*fPrevBuffer);
+ }
+ }
+ fLastPacketRTPSeqNum = rtpSeqNum;
+ }
+
+ // Now, continue working with the frame that we just got
+ fOurSink.noteRecordedFrame(fOurSubsession, packetDataSize, presentationTime);
+
+ if (fBuffer->bytesInUse() == 0) {
+ fBuffer->setPresentationTime(presentationTime);
+ }
+ fBuffer->addBytes(packetDataSize);
+
+ // If our RTP source is a "QuickTimeGenericRTPSource", then
+ // use its 'qtState' to set some parameters that we need:
+ if (fOurSubsession.rtpSource() != NULL // we have a RTP stream
+ && fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_genericMedia) {
+ QuickTimeGenericRTPSource* rtpSource
+ = (QuickTimeGenericRTPSource*)fOurSubsession.rtpSource();
+ QuickTimeGenericRTPSource::QTState& qtState = rtpSource->qtState;
+ fQTTimeScale = qtState.timescale;
+ if (qtState.width != 0) {
+ fOurSink.fMovieWidth = qtState.width;
+ }
+ if (qtState.height != 0) {
+ fOurSink.fMovieHeight = qtState.height;
+ }
+
+ // Also, if the media type in the "sdAtom" is one that we recognize
+ // to have a special parameters, then fix this here:
+ if (qtState.sdAtomSize >= 8) {
+ char const* atom = qtState.sdAtom;
+ unsigned mediaType = fourChar(atom[4],atom[5],atom[6],atom[7]);
+ switch (mediaType) {
+ case fourChar('a','g','s','m'): {
+ fQTBytesPerFrame = 33;
+ fQTSamplesPerFrame = 160;
+ break;
+ }
+ case fourChar('Q','c','l','p'): {
+ fQTBytesPerFrame = 35;
+ fQTSamplesPerFrame = 160;
+ break;
+ }
+ case fourChar('H','c','l','p'): {
+ fQTBytesPerFrame = 17;
+ fQTSamplesPerFrame = 160;
+ break;
+ }
+ case fourChar('h','2','6','3'): {
+ fQTTimeUnitsPerSample = fQTTimeScale/fOurSink.fMovieFPS;
+ break;
+ }
+ }
+ }
+ } else if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_Qclp) {
+ // For QCELP data, make a note of the frame size (even though it's the
+ // same as the packet data size), because it varies depending on the
+ // 'rate' of the stream, and this size gets used later when setting up
+ // the 'Qclp' QuickTime atom:
+ fQTBytesPerFrame = packetDataSize;
+ }
+
+ useFrame(*fBuffer);
+ if (fOurSink.fPacketLossCompensate) {
+ // Save this frame, in case we need it for recovery:
+ SubsessionBuffer* tmp = fPrevBuffer; // assert: != NULL
+ fPrevBuffer = fBuffer;
+ fBuffer = tmp;
+ }
+ fBuffer->reset(); // for the next input
+
+ // Now, try getting more frames:
+ fOurSink.continuePlaying();
+}
+
+void SubsessionIOState::useFrame(SubsessionBuffer& buffer) {
+ unsigned char* const frameSource = buffer.dataStart();
+ unsigned const frameSize = buffer.bytesInUse();
+ struct timeval const& presentationTime = buffer.presentationTime();
+ int64_t const destFileOffset = TellFile64(fOurSink.fOutFid);
+ unsigned sampleNumberOfFrameStart = fQTTotNumSamples + 1;
+ Boolean avcHack = fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_avc1;
+
+ // If we're not syncing streams, or this subsession is not video, then
+ // just give this frame a fixed duration:
+ if (!fOurSink.fSyncStreams
+ || fQTcomponentSubtype != fourChar('v','i','d','e')) {
+ unsigned const frameDuration = fQTTimeUnitsPerSample*fQTSamplesPerFrame;
+ unsigned frameSizeToUse = frameSize;
+ if (avcHack) frameSizeToUse += 4; // H.264/AVC gets the frame size prefix
+
+ fQTTotNumSamples += useFrame1(frameSizeToUse, presentationTime, frameDuration, destFileOffset);
+ } else {
+ // For synced video streams, we use the difference between successive
+ // frames' presentation times as the 'frame duration'. So, record
+ // information about the *previous* frame:
+ struct timeval const& ppt = fPrevFrameState.presentationTime; //abbrev
+ if (ppt.tv_sec != 0 || ppt.tv_usec != 0) {
+ // There has been a previous frame.
+ double duration = (presentationTime.tv_sec - ppt.tv_sec)
+ + (presentationTime.tv_usec - ppt.tv_usec)/1000000.0;
+ if (duration < 0.0) duration = 0.0;
+ unsigned frameDuration
+ = (unsigned)((2*duration*fQTTimeScale+1)/2); // round
+ unsigned frameSizeToUse = fPrevFrameState.frameSize;
+ if (avcHack) frameSizeToUse += 4; // H.264/AVC gets the frame size prefix
+
+ unsigned numSamples
+ = useFrame1(frameSizeToUse, ppt, frameDuration, fPrevFrameState.destFileOffset);
+ fQTTotNumSamples += numSamples;
+ sampleNumberOfFrameStart = fQTTotNumSamples + 1;
+ }
+
+ if (avcHack && (*frameSource == H264_IDR_FRAME)) {
+ SyncFrame* newSyncFrame = new SyncFrame(fQTTotNumSamples + 1);
+ if (fTailSyncFrame == NULL) {
+ fHeadSyncFrame = newSyncFrame;
+ } else {
+ fTailSyncFrame->nextSyncFrame = newSyncFrame;
+ }
+ fTailSyncFrame = newSyncFrame;
+ }
+
+ // Remember the current frame for next time:
+ fPrevFrameState.frameSize = frameSize;
+ fPrevFrameState.presentationTime = presentationTime;
+ fPrevFrameState.destFileOffset = destFileOffset;
+ }
+
+ if (avcHack) fOurSink.addWord(frameSize);
+
+ // Write the data into the file:
+ fwrite(frameSource, 1, frameSize, fOurSink.fOutFid);
+
+ // If we have a hint track, then write to it also (only if we have a RTP stream):
+ if (hasHintTrack() && fOurSubsession.rtpSource() != NULL) {
+ // Because presentation times are used for RTP packet timestamps,
+ // we don't starting writing to the hint track until we've been synced:
+ if (!fHaveBeenSynced) {
+ fHaveBeenSynced = fOurSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP();
+ }
+ if (fHaveBeenSynced) {
+ fHintTrackForUs->useFrameForHinting(frameSize, presentationTime,
+ sampleNumberOfFrameStart);
+ }
+ }
+}
+
+void SubsessionIOState::useFrameForHinting(unsigned frameSize,
+ struct timeval presentationTime,
+ unsigned startSampleNumber) {
+ // At this point, we have a single, combined frame - not individual packets.
+ // For the hint track, we need to split the frame back up into separate packets.
+ // However, for some RTP sources, then we also need to reuse the special
+ // header bytes that were at the start of each of the RTP packets.
+ Boolean hack263 = strcmp(fOurSubsession.codecName(), "H263-1998") == 0;
+ Boolean hackm4a_generic = strcmp(fOurSubsession.mediumName(), "audio") == 0
+ && strcmp(fOurSubsession.codecName(), "MPEG4-GENERIC") == 0;
+ Boolean hackm4a_latm = strcmp(fOurSubsession.mediumName(), "audio") == 0
+ && strcmp(fOurSubsession.codecName(), "MP4A-LATM") == 0;
+ Boolean hackm4a = hackm4a_generic || hackm4a_latm;
+ Boolean haveSpecialHeaders = (hack263 || hackm4a_generic);
+
+ // If there has been a previous frame, then output a 'hint sample' for it.
+ // (We use the current frame's presentation time to compute the previous
+ // hint sample's duration.)
+ RTPSource* const rs = fOurSubsession.rtpSource(); // abbrev (ASSERT: != NULL)
+ struct timeval const& ppt = fPrevFrameState.presentationTime; //abbrev
+ if (ppt.tv_sec != 0 || ppt.tv_usec != 0) {
+ double duration = (presentationTime.tv_sec - ppt.tv_sec)
+ + (presentationTime.tv_usec - ppt.tv_usec)/1000000.0;
+ if (duration < 0.0) duration = 0.0;
+ unsigned msDuration = (unsigned)(duration*1000); // milliseconds
+ if (msDuration > fHINF.dmax) fHINF.dmax = msDuration;
+ unsigned hintSampleDuration
+ = (unsigned)((2*duration*fQTTimeScale+1)/2); // round
+ if (hackm4a) {
+ // Because multiple AAC frames can appear in a RTP packet, the presentation
+ // times of the second and subsequent frames will not be accurate.
+ // So, use the known "hintSampleDuration" instead:
+ hintSampleDuration = fTrackHintedByUs->fQTTimeUnitsPerSample;
+
+ // Also, if the 'time scale' was different from the RTP timestamp frequency,
+ // (as can happen with aacPlus), then we need to scale "hintSampleDuration"
+ // accordingly:
+ if (fTrackHintedByUs->fQTTimeScale != fOurSubsession.rtpTimestampFrequency()) {
+ unsigned const scalingFactor
+ = fOurSubsession.rtpTimestampFrequency()/fTrackHintedByUs->fQTTimeScale ;
+ hintSampleDuration *= scalingFactor;
+ }
+ }
+
+ int64_t const hintSampleDestFileOffset = TellFile64(fOurSink.fOutFid);
+
+ unsigned const maxPacketSize = 1450;
+ unsigned short numPTEntries
+ = (fPrevFrameState.frameSize + (maxPacketSize-1))/maxPacketSize; // normal case
+ unsigned char* immediateDataPtr = NULL;
+ unsigned immediateDataBytesRemaining = 0;
+ if (haveSpecialHeaders) { // special case
+ numPTEntries = fPrevFrameState.numSpecialHeaders;
+ immediateDataPtr = fPrevFrameState.specialHeaderBytes;
+ immediateDataBytesRemaining
+ = fPrevFrameState.specialHeaderBytesLength;
+ }
+ unsigned hintSampleSize
+ = fOurSink.addHalfWord(numPTEntries);// Entry count
+ hintSampleSize += fOurSink.addHalfWord(0x0000); // Reserved
+
+ unsigned offsetWithinSample = 0;
+ for (unsigned i = 0; i < numPTEntries; ++i) {
+ // Output a Packet Table entry (representing a single RTP packet):
+ unsigned short numDTEntries = 1;
+ unsigned short seqNum = fPrevFrameState.seqNum++;
+ // Note: This assumes that the input stream had no packets lost #####
+ unsigned rtpHeader = fPrevFrameState.rtpHeader;
+ if (i+1 < numPTEntries) {
+ // This is not the last RTP packet, so clear the marker bit:
+ rtpHeader &=~ (1<<23);
+ }
+ unsigned dataFrameSize = (i+1 < numPTEntries)
+ ? maxPacketSize : fPrevFrameState.frameSize - i*maxPacketSize; // normal case
+ unsigned sampleNumber = fPrevFrameState.startSampleNumber;
+
+ unsigned char immediateDataLen = 0;
+ if (haveSpecialHeaders) { // special case
+ ++numDTEntries; // to include a Data Table entry for the special hdr
+ if (immediateDataBytesRemaining > 0) {
+ if (hack263) {
+ immediateDataLen = *immediateDataPtr++;
+ --immediateDataBytesRemaining;
+ if (immediateDataLen > immediateDataBytesRemaining) {
+ // shouldn't happen (length byte was bad)
+ immediateDataLen = immediateDataBytesRemaining;
+ }
+ } else {
+ immediateDataLen = fPrevFrameState.specialHeaderBytesLength;
+ }
+ }
+ dataFrameSize = fPrevFrameState.packetSizes[i] - immediateDataLen;
+
+ if (hack263) {
+ Boolean PbitSet
+ = immediateDataLen >= 1 && (immediateDataPtr[0]&0x4) != 0;
+ if (PbitSet) {
+ offsetWithinSample += 2; // to omit the two leading 0 bytes
+ }
+ }
+ }
+
+ // Output the Packet Table:
+ hintSampleSize += fOurSink.addWord(0); // Relative transmission time
+ hintSampleSize += fOurSink.addWord(rtpHeader|seqNum);
+ // RTP header info + RTP sequence number
+ hintSampleSize += fOurSink.addHalfWord(0x0000); // Flags
+ hintSampleSize += fOurSink.addHalfWord(numDTEntries); // Entry count
+ unsigned totalPacketSize = 0;
+
+ // Output the Data Table:
+ if (haveSpecialHeaders) {
+ // use the "Immediate Data" format (1):
+ hintSampleSize += fOurSink.addByte(1); // Source
+ unsigned char len = immediateDataLen > 14 ? 14 : immediateDataLen;
+ hintSampleSize += fOurSink.addByte(len); // Length
+ totalPacketSize += len; fHINF.dimm += len;
+ unsigned char j;
+ for (j = 0; j < len; ++j) {
+ hintSampleSize += fOurSink.addByte(immediateDataPtr[j]); // Data
+ }
+ for (j = len; j < 14; ++j) {
+ hintSampleSize += fOurSink.addByte(0); // Data (padding)
+ }
+
+ immediateDataPtr += immediateDataLen;
+ immediateDataBytesRemaining -= immediateDataLen;
+ }
+ // use the "Sample Data" format (2):
+ hintSampleSize += fOurSink.addByte(2); // Source
+ hintSampleSize += fOurSink.addByte(0); // Track ref index
+ hintSampleSize += fOurSink.addHalfWord(dataFrameSize); // Length
+ totalPacketSize += dataFrameSize; fHINF.dmed += dataFrameSize;
+ hintSampleSize += fOurSink.addWord(sampleNumber); // Sample number
+ hintSampleSize += fOurSink.addWord(offsetWithinSample); // Offset
+ // Get "bytes|samples per compression block" from the hinted track:
+ unsigned short const bytesPerCompressionBlock
+ = fTrackHintedByUs->fQTBytesPerFrame;
+ unsigned short const samplesPerCompressionBlock
+ = fTrackHintedByUs->fQTSamplesPerFrame;
+ hintSampleSize += fOurSink.addHalfWord(bytesPerCompressionBlock);
+ hintSampleSize += fOurSink.addHalfWord(samplesPerCompressionBlock);
+
+ offsetWithinSample += dataFrameSize;// for the next iteration (if any)
+
+ // Tally statistics for this packet:
+ fHINF.nump += 1;
+ fHINF.tpyl += totalPacketSize;
+ totalPacketSize += 12; // add in the size of the RTP header
+ fHINF.trpy += totalPacketSize;
+ if (totalPacketSize > fHINF.pmax) fHINF.pmax = totalPacketSize;
+ }
+
+ // Make note of this completed hint sample frame:
+ fQTTotNumSamples += useFrame1(hintSampleSize, ppt, hintSampleDuration,
+ hintSampleDestFileOffset);
+ }
+
+ // Remember this frame for next time:
+ fPrevFrameState.frameSize = frameSize;
+ fPrevFrameState.presentationTime = presentationTime;
+ fPrevFrameState.startSampleNumber = startSampleNumber;
+ fPrevFrameState.rtpHeader
+ = rs->curPacketMarkerBit()<<23
+ | (rs->rtpPayloadFormat()&0x7F)<<16;
+ if (hack263) {
+ H263plusVideoRTPSource* rs_263 = (H263plusVideoRTPSource*)rs;
+ fPrevFrameState.numSpecialHeaders = rs_263->fNumSpecialHeaders;
+ fPrevFrameState.specialHeaderBytesLength = rs_263->fSpecialHeaderBytesLength;
+ unsigned i;
+ for (i = 0; i < rs_263->fSpecialHeaderBytesLength; ++i) {
+ fPrevFrameState.specialHeaderBytes[i] = rs_263->fSpecialHeaderBytes[i];
+ }
+ for (i = 0; i < rs_263->fNumSpecialHeaders; ++i) {
+ fPrevFrameState.packetSizes[i] = rs_263->fPacketSizes[i];
+ }
+ } else if (hackm4a_generic) {
+ // Synthesize a special header, so that this frame can be in its own RTP packet.
+ unsigned const sizeLength = fOurSubsession.attrVal_unsigned("sizelength");
+ unsigned const indexLength = fOurSubsession.attrVal_unsigned("indexlength");
+ if (sizeLength + indexLength != 16) {
+ envir() << "Warning: unexpected 'sizeLength' " << sizeLength
+ << " and 'indexLength' " << indexLength
+ << "seen when creating hint track\n";
+ }
+ fPrevFrameState.numSpecialHeaders = 1;
+ fPrevFrameState.specialHeaderBytesLength = 4;
+ fPrevFrameState.specialHeaderBytes[0] = 0; // AU_headers_length (high byte)
+ fPrevFrameState.specialHeaderBytes[1] = 16; // AU_headers_length (low byte)
+ fPrevFrameState.specialHeaderBytes[2] = ((frameSize<<indexLength)&0xFF00)>>8;
+ fPrevFrameState.specialHeaderBytes[3] = (frameSize<<indexLength);
+ fPrevFrameState.packetSizes[0]
+ = fPrevFrameState.specialHeaderBytesLength + frameSize;
+ }
+}
+
+unsigned SubsessionIOState::useFrame1(unsigned sourceDataSize,
+ struct timeval presentationTime,
+ unsigned frameDuration,
+ int64_t destFileOffset) {
+ // Figure out the actual frame size for this data:
+ unsigned frameSize = fQTBytesPerFrame;
+ if (frameSize == 0) {
+ // The entire packet data is assumed to be a frame:
+ frameSize = sourceDataSize;
+ }
+ unsigned const numFrames = sourceDataSize/frameSize;
+ unsigned const numSamples = numFrames*fQTSamplesPerFrame;
+
+ // Record the information about which 'chunk' this data belongs to:
+ ChunkDescriptor* newTailChunk;
+ if (fTailChunk == NULL) {
+ newTailChunk = fHeadChunk
+ = new ChunkDescriptor(destFileOffset, sourceDataSize,
+ frameSize, frameDuration, presentationTime);
+ } else {
+ newTailChunk = fTailChunk->extendChunk(destFileOffset, sourceDataSize,
+ frameSize, frameDuration,
+ presentationTime);
+ }
+ if (newTailChunk != fTailChunk) {
+ // This data created a new chunk, rather than extending the old one
+ ++fNumChunks;
+ fTailChunk = newTailChunk;
+ }
+
+ return numSamples;
+}
+
+void SubsessionIOState::onSourceClosure() {
+ fOurSourceIsActive = False;
+ fOurSink.onSourceClosure1();
+}
+
+Boolean SubsessionIOState::syncOK(struct timeval presentationTime) {
+ QuickTimeFileSink& s = fOurSink; // abbreviation
+ if (!s.fSyncStreams || fOurSubsession.rtpSource() == NULL) return True; // we don't care
+
+ if (s.fNumSyncedSubsessions < s.fNumSubsessions) {
+ // Not all subsessions have yet been synced. Check whether ours was
+ // one of the unsynced ones, and, if so, whether it is now synced:
+ if (!fHaveBeenSynced) {
+ // We weren't synchronized before
+ if (fOurSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
+ // H264 ?
+ if (fQTMediaDataAtomCreator == &QuickTimeFileSink::addAtom_avc1) {
+ // special case: audio + H264 video: wait until audio is in sync
+ if ((s.fNumSubsessions == 2) && (s.fNumSyncedSubsessions < (s.fNumSubsessions - 1))) return False;
+
+ // if audio is in sync, wait for the next IDR frame to start
+ unsigned char* const frameSource = fBuffer->dataStart();
+ if (*frameSource != H264_IDR_FRAME) return False;
+ }
+ // But now we are
+ fHaveBeenSynced = True;
+ fSyncTime = presentationTime;
+ ++s.fNumSyncedSubsessions;
+
+ if (timevalGE(fSyncTime, s.fNewestSyncTime)) {
+ s.fNewestSyncTime = fSyncTime;
+ }
+ }
+ }
+ }
+
+ // Check again whether all subsessions have been synced:
+ if (s.fNumSyncedSubsessions < s.fNumSubsessions) return False;
+
+ // Allow this data if it is more recent than the newest sync time:
+ return timevalGE(presentationTime, s.fNewestSyncTime);
+}
+
+void SubsessionIOState::setHintTrack(SubsessionIOState* hintedTrack,
+ SubsessionIOState* hintTrack) {
+ if (hintedTrack != NULL) hintedTrack->fHintTrackForUs = hintTrack;
+ if (hintTrack != NULL) hintTrack->fTrackHintedByUs = hintedTrack;
+}
+
+SyncFrame::SyncFrame(unsigned frameNum)
+ : nextSyncFrame(NULL), sfFrameNum(frameNum) {
+}
+
+void Count64::operator+=(unsigned arg) {
+ unsigned newLo = lo + arg;
+ if (newLo < lo) { // lo has overflowed
+ ++hi;
+ }
+ lo = newLo;
+}
+
+ChunkDescriptor
+::ChunkDescriptor(int64_t offsetInFile, unsigned size,
+ unsigned frameSize, unsigned frameDuration,
+ struct timeval presentationTime)
+ : fNextChunk(NULL), fOffsetInFile(offsetInFile),
+ fNumFrames(size/frameSize),
+ fFrameSize(frameSize), fFrameDuration(frameDuration),
+ fPresentationTime(presentationTime) {
+}
+
+ChunkDescriptor* ChunkDescriptor
+::extendChunk(int64_t newOffsetInFile, unsigned newSize,
+ unsigned newFrameSize, unsigned newFrameDuration,
+ struct timeval newPresentationTime) {
+ // First, check whether the new space is just at the end of this
+ // existing chunk:
+ if (newOffsetInFile == fOffsetInFile + fNumFrames*fFrameSize) {
+ // We can extend this existing chunk, provided that the frame size
+ // and frame duration have not changed:
+ if (newFrameSize == fFrameSize && newFrameDuration == fFrameDuration) {
+ fNumFrames += newSize/fFrameSize;
+ return this;
+ }
+ }
+
+ // We'll allocate a new ChunkDescriptor, and link it to the end of us:
+ ChunkDescriptor* newDescriptor
+ = new ChunkDescriptor(newOffsetInFile, newSize,
+ newFrameSize, newFrameDuration,
+ newPresentationTime);
+
+ fNextChunk = newDescriptor;
+
+ return newDescriptor;
+}
+
+
+////////// QuickTime-specific implementation //////////
+
+unsigned QuickTimeFileSink::addWord64(u_int64_t word) {
+ addByte((unsigned char)(word>>56)); addByte((unsigned char)(word>>48));
+ addByte((unsigned char)(word>>40)); addByte((unsigned char)(word>>32));
+ addByte((unsigned char)(word>>24)); addByte((unsigned char)(word>>16));
+ addByte((unsigned char)(word>>8)); addByte((unsigned char)(word));
+
+ return 8;
+}
+
+unsigned QuickTimeFileSink::addWord(unsigned word) {
+ addByte(word>>24); addByte(word>>16);
+ addByte(word>>8); addByte(word);
+
+ return 4;
+}
+
+unsigned QuickTimeFileSink::addHalfWord(unsigned short halfWord) {
+ addByte((unsigned char)(halfWord>>8)); addByte((unsigned char)halfWord);
+
+ return 2;
+}
+
+unsigned QuickTimeFileSink::addZeroWords(unsigned numWords) {
+ for (unsigned i = 0; i < numWords; ++i) {
+ addWord(0);
+ }
+
+ return numWords*4;
+}
+
+unsigned QuickTimeFileSink::add4ByteString(char const* str) {
+ addByte(str[0]); addByte(str[1]); addByte(str[2]); addByte(str[3]);
+
+ return 4;
+}
+
+unsigned QuickTimeFileSink::addArbitraryString(char const* str,
+ Boolean oneByteLength) {
+ unsigned size = 0;
+ if (oneByteLength) {
+ // Begin with a byte containing the string length:
+ unsigned strLength = strlen(str);
+ if (strLength >= 256) {
+ envir() << "QuickTimeFileSink::addArbitraryString(\""
+ << str << "\") saw string longer than we know how to handle ("
+ << strLength << "\n";
+ }
+ size += addByte((unsigned char)strLength);
+ }
+
+ while (*str != '\0') {
+ size += addByte(*str++);
+ }
+
+ return size;
+}
+
+unsigned QuickTimeFileSink::addAtomHeader(char const* atomName) {
+ // Output a placeholder for the 4-byte size:
+ addWord(0);
+
+ // Output the 4-byte atom name:
+ add4ByteString(atomName);
+
+ return 8;
+}
+
+unsigned QuickTimeFileSink::addAtomHeader64(char const* atomName) {
+ // Output 64Bit size marker
+ addWord(1);
+
+ // Output the 4-byte atom name:
+ add4ByteString(atomName);
+
+ addWord64(0);
+
+ return 16;
+}
+
+void QuickTimeFileSink::setWord(int64_t filePosn, unsigned size) {
+ do {
+ if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break;
+ addWord(size);
+ if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were
+
+ return;
+ } while (0);
+
+ // One of the SeekFile64()s failed, probable because we're not a seekable file
+ envir() << "QuickTimeFileSink::setWord(): SeekFile64 failed (err "
+ << envir().getErrno() << ")\n";
+}
+
+void QuickTimeFileSink::setWord64(int64_t filePosn, u_int64_t size) {
+ do {
+ if (SeekFile64(fOutFid, filePosn, SEEK_SET) < 0) break;
+ addWord64(size);
+ if (SeekFile64(fOutFid, 0, SEEK_END) < 0) break; // go back to where we were
+
+ return;
+ } while (0);
+
+ // One of the SeekFile64()s failed, probable because we're not a seekable file
+ envir() << "QuickTimeFileSink::setWord64(): SeekFile64 failed (err "
+ << envir().getErrno() << ")\n";
+}
+
+// Methods for writing particular atoms. Note the following macros:
+
+#define addAtom(name) \
+ unsigned QuickTimeFileSink::addAtom_##name() { \
+ int64_t initFilePosn = TellFile64(fOutFid); \
+ unsigned size = addAtomHeader("" #name "")
+
+#define addAtomEnd \
+ setWord(initFilePosn, size); \
+ return size; \
+}
+
+addAtom(ftyp);
+ size += add4ByteString("mp42");
+ size += addWord(0x00000000);
+ size += add4ByteString("mp42");
+ size += add4ByteString("isom");
+addAtomEnd;
+
+addAtom(moov);
+ size += addAtom_mvhd();
+
+ if (fGenerateMP4Format) {
+ size += addAtom_iods();
+ }
+
+ // Add a 'trak' atom for each subsession:
+ // (For some unknown reason, QuickTime Player (5.0 at least)
+ // doesn't display the movie correctly unless the audio track
+ // (if present) appears before the video track. So ensure this here.)
+ MediaSubsessionIterator iter(fInputSession);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ fCurrentIOState = (SubsessionIOState*)(subsession->miscPtr);
+ if (fCurrentIOState == NULL) continue;
+ if (strcmp(subsession->mediumName(), "audio") != 0) continue;
+
+ size += addAtom_trak();
+
+ if (fCurrentIOState->hasHintTrack()) {
+ // This track has a hint track; output it also:
+ fCurrentIOState = fCurrentIOState->fHintTrackForUs;
+ size += addAtom_trak();
+ }
+ }
+ iter.reset();
+ while ((subsession = iter.next()) != NULL) {
+ fCurrentIOState = (SubsessionIOState*)(subsession->miscPtr);
+ if (fCurrentIOState == NULL) continue;
+ if (strcmp(subsession->mediumName(), "audio") == 0) continue;
+
+ size += addAtom_trak();
+
+ if (fCurrentIOState->hasHintTrack()) {
+ // This track has a hint track; output it also:
+ fCurrentIOState = fCurrentIOState->fHintTrackForUs;
+ size += addAtom_trak();
+ }
+ }
+addAtomEnd;
+
+addAtom(mvhd);
+ size += addWord(0x00000000); // Version + Flags
+ size += addWord(fAppleCreationTime); // Creation time
+ size += addWord(fAppleCreationTime); // Modification time
+
+ // For the "Time scale" field, use the largest RTP timestamp frequency
+ // that we saw in any of the subsessions.
+ size += addWord(movieTimeScale()); // Time scale
+
+ unsigned const duration = fMaxTrackDurationM;
+ fMVHD_durationPosn = TellFile64(fOutFid);
+ size += addWord(duration); // Duration
+
+ size += addWord(0x00010000); // Preferred rate
+ size += addWord(0x01000000); // Preferred volume + Reserved[0]
+ size += addZeroWords(2); // Reserved[1-2]
+ size += addWord(0x00010000); // matrix top left corner
+ size += addZeroWords(3); // matrix
+ size += addWord(0x00010000); // matrix center
+ size += addZeroWords(3); // matrix
+ size += addWord(0x40000000); // matrix bottom right corner
+ size += addZeroWords(6); // various time fields
+ size += addWord(SubsessionIOState::fCurrentTrackNumber+1);// Next track ID
+addAtomEnd;
+
+addAtom(iods);
+ size += addWord(0x00000000); // Version + Flags
+ size += addWord(0x10808080);
+ size += addWord(0x07004FFF);
+ size += addWord(0xFF0FFFFF);
+addAtomEnd;
+
+addAtom(trak);
+ size += addAtom_tkhd();
+
+ // If we're synchronizing the media streams (or are a hint track),
+ // add an edit list that helps do this:
+ if (fCurrentIOState->fHeadChunk != NULL
+ && (fSyncStreams || fCurrentIOState->isHintTrack())) {
+ size += addAtom_edts();
+ }
+
+ // If we're generating a hint track, add a 'tref' atom:
+ if (fCurrentIOState->isHintTrack()) size += addAtom_tref();
+
+ size += addAtom_mdia();
+
+ // If we're generating a hint track, add a 'udta' atom:
+ if (fCurrentIOState->isHintTrack()) size += addAtom_udta();
+addAtomEnd;
+
+addAtom(tkhd);
+ if (fCurrentIOState->fQTEnableTrack) {
+ size += addWord(0x0000000F); // Version + Flags
+ } else {
+ // Disable this track in the movie:
+ size += addWord(0x00000000); // Version + Flags
+ }
+ size += addWord(fAppleCreationTime); // Creation time
+ size += addWord(fAppleCreationTime); // Modification time
+ size += addWord(fCurrentIOState->fTrackID); // Track ID
+ size += addWord(0x00000000); // Reserved
+
+ unsigned const duration = fCurrentIOState->fQTDurationM; // movie units
+ fCurrentIOState->fTKHD_durationPosn = TellFile64(fOutFid);
+ size += addWord(duration); // Duration
+ size += addZeroWords(3); // Reserved+Layer+Alternate grp
+ size += addWord(0x01000000); // Volume + Reserved
+ size += addWord(0x00010000); // matrix top left corner
+ size += addZeroWords(3); // matrix
+ size += addWord(0x00010000); // matrix center
+ size += addZeroWords(3); // matrix
+ size += addWord(0x40000000); // matrix bottom right corner
+ if (strcmp(fCurrentIOState->fOurSubsession.mediumName(), "video") == 0) {
+ size += addWord(fMovieWidth<<16); // Track width
+ size += addWord(fMovieHeight<<16); // Track height
+ } else {
+ size += addZeroWords(2); // not video: leave width and height fields zero
+ }
+addAtomEnd;
+
+addAtom(edts);
+ size += addAtom_elst();
+addAtomEnd;
+
+#define addEdit1(duration,trackPosition) do { \
+ unsigned trackDuration \
+ = (unsigned) ((2*(duration)*movieTimeScale()+1)/2); \
+ /* in movie time units */ \
+ size += addWord(trackDuration); /* Track duration */ \
+ totalDurationOfEdits += trackDuration; \
+ size += addWord(trackPosition); /* Media time */ \
+ size += addWord(0x00010000); /* Media rate (1x) */ \
+ ++numEdits; \
+} while (0)
+#define addEdit(duration) addEdit1((duration),editTrackPosition)
+#define addEmptyEdit(duration) addEdit1((duration),(~0))
+
+addAtom(elst);
+ size += addWord(0x00000000); // Version + Flags
+
+ // Add a dummy "Number of entries" field
+ // (and remember its position). We'll fill this field in later:
+ int64_t numEntriesPosition = TellFile64(fOutFid);
+ size += addWord(0); // dummy for "Number of entries"
+ unsigned numEdits = 0;
+ unsigned totalDurationOfEdits = 0; // in movie time units
+
+ // Run through our chunks, looking at their presentation times.
+ // From these, figure out the edits that need to be made to keep
+ // the track media data in sync with the presentation times.
+
+ double const syncThreshold = 0.1; // 100 ms
+ // don't allow the track to get out of sync by more than this
+
+ struct timeval editStartTime = fFirstDataTime;
+ unsigned editTrackPosition = 0;
+ unsigned currentTrackPosition = 0;
+ double trackDurationOfEdit = 0.0;
+ unsigned chunkDuration = 0;
+
+ ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
+ while (chunk != NULL) {
+ struct timeval const& chunkStartTime = chunk->fPresentationTime;
+ double movieDurationOfEdit
+ = (chunkStartTime.tv_sec - editStartTime.tv_sec)
+ + (chunkStartTime.tv_usec - editStartTime.tv_usec)/1000000.0;
+ trackDurationOfEdit = (currentTrackPosition-editTrackPosition)
+ / (double)(fCurrentIOState->fQTTimeScale);
+
+ double outOfSync = movieDurationOfEdit - trackDurationOfEdit;
+
+ if (outOfSync > syncThreshold) {
+ // The track's data is too short, so end this edit, add a new
+ // 'empty' edit after it, and start a new edit
+ // (at the current track posn.):
+ if (trackDurationOfEdit > 0.0) addEdit(trackDurationOfEdit);
+ addEmptyEdit(outOfSync);
+
+ editStartTime = chunkStartTime;
+ editTrackPosition = currentTrackPosition;
+ } else if (outOfSync < -syncThreshold) {
+ // The track's data is too long, so end this edit, and start
+ // a new edit (pointing at the current track posn.):
+ if (movieDurationOfEdit > 0.0) addEdit(movieDurationOfEdit);
+
+ editStartTime = chunkStartTime;
+ editTrackPosition = currentTrackPosition;
+ }
+
+ // Note the duration of this chunk:
+ unsigned numChannels = fCurrentIOState->fOurSubsession.numChannels();
+ chunkDuration = chunk->fNumFrames*chunk->fFrameDuration/numChannels;
+ currentTrackPosition += chunkDuration;
+
+ chunk = chunk->fNextChunk;
+ }
+
+ // Write out the final edit
+ trackDurationOfEdit
+ += (double)chunkDuration/fCurrentIOState->fQTTimeScale;
+ if (trackDurationOfEdit > 0.0) addEdit(trackDurationOfEdit);
+
+ // Now go back and fill in the "Number of entries" field:
+ setWord(numEntriesPosition, numEdits);
+
+ // Also, if the sum of all of the edit durations exceeds the
+ // track duration that we already computed (from sample durations),
+ // then reset the track duration to this new value:
+ if (totalDurationOfEdits > fCurrentIOState->fQTDurationM) {
+ fCurrentIOState->fQTDurationM = totalDurationOfEdits;
+ setWord(fCurrentIOState->fTKHD_durationPosn, totalDurationOfEdits);
+
+ // Also, check whether the overall movie duration needs to change:
+ if (totalDurationOfEdits > fMaxTrackDurationM) {
+ fMaxTrackDurationM = totalDurationOfEdits;
+ setWord(fMVHD_durationPosn, totalDurationOfEdits);
+ }
+
+ // Also, convert to track time scale:
+ double scaleFactor
+ = fCurrentIOState->fQTTimeScale/(double)movieTimeScale();
+ fCurrentIOState->fQTDurationT
+ = (unsigned)(totalDurationOfEdits*scaleFactor);
+ }
+addAtomEnd;
+
+addAtom(tref);
+ size += addAtom_hint();
+addAtomEnd;
+
+addAtom(hint);
+ SubsessionIOState* hintedTrack = fCurrentIOState->fTrackHintedByUs;
+ // Assert: hintedTrack != NULL
+ size += addWord(hintedTrack->fTrackID);
+addAtomEnd;
+
+addAtom(mdia);
+ size += addAtom_mdhd();
+ size += addAtom_hdlr();
+ size += addAtom_minf();
+addAtomEnd;
+
+addAtom(mdhd);
+ size += addWord(0x00000000); // Version + Flags
+ size += addWord(fAppleCreationTime); // Creation time
+ size += addWord(fAppleCreationTime); // Modification time
+
+ unsigned const timeScale = fCurrentIOState->fQTTimeScale;
+ size += addWord(timeScale); // Time scale
+
+ unsigned const duration = fCurrentIOState->fQTDurationT; // track units
+ size += addWord(duration); // Duration
+
+ size += addWord(0x00000000); // Language+Quality
+addAtomEnd;
+
+addAtom(hdlr);
+ size += addWord(0x00000000); // Version + Flags
+ size += add4ByteString("mhlr"); // Component type
+ size += addWord(fCurrentIOState->fQTcomponentSubtype);
+ // Component subtype
+ size += add4ByteString("appl"); // Component manufacturer
+ size += addWord(0x00000000); // Component flags
+ size += addWord(0x00000000); // Component flags mask
+ size += addArbitraryString(fCurrentIOState->fQTcomponentName);
+ // Component name
+addAtomEnd;
+
+addAtom(minf);
+ SubsessionIOState::atomCreationFunc mediaInformationAtomCreator
+ = fCurrentIOState->fQTMediaInformationAtomCreator;
+ size += (this->*mediaInformationAtomCreator)();
+ size += addAtom_hdlr2();
+ size += addAtom_dinf();
+ size += addAtom_stbl();
+addAtomEnd;
+
+addAtom(smhd);
+ size += addZeroWords(2); // Version+Flags+Balance+Reserved
+addAtomEnd;
+
+addAtom(vmhd);
+ size += addWord(0x00000001); // Version + Flags
+ size += addWord(0x00408000); // Graphics mode + Opcolor[red]
+ size += addWord(0x80008000); // Opcolor[green} + Opcolor[blue]
+addAtomEnd;
+
+addAtom(gmhd);
+ size += addAtom_gmin();
+addAtomEnd;
+
+addAtom(gmin);
+ size += addWord(0x00000000); // Version + Flags
+ // The following fields probably aren't used for hint tracks, so just
+ // use values that I've seen in other files:
+ size += addWord(0x00408000); // Graphics mode + Opcolor (1st 2 bytes)
+ size += addWord(0x80008000); // Opcolor (last 4 bytes)
+ size += addWord(0x00000000); // Balance + Reserved
+addAtomEnd;
+
+unsigned QuickTimeFileSink::addAtom_hdlr2() {
+ int64_t initFilePosn = TellFile64(fOutFid);
+ unsigned size = addAtomHeader("hdlr");
+ size += addWord(0x00000000); // Version + Flags
+ size += add4ByteString("dhlr"); // Component type
+ size += add4ByteString("alis"); // Component subtype
+ size += add4ByteString("appl"); // Component manufacturer
+ size += addZeroWords(2); // Component flags+Component flags mask
+ size += addArbitraryString("Apple Alias Data Handler"); // Component name
+addAtomEnd;
+
+addAtom(dinf);
+ size += addAtom_dref();
+addAtomEnd;
+
+addAtom(dref);
+ size += addWord(0x00000000); // Version + Flags
+ size += addWord(0x00000001); // Number of entries
+ size += addAtom_alis();
+addAtomEnd;
+
+addAtom(alis);
+ size += addWord(0x00000001); // Version + Flags
+addAtomEnd;
+
+addAtom(stbl);
+ size += addAtom_stsd();
+ size += addAtom_stts();
+ if (fCurrentIOState->fQTcomponentSubtype == fourChar('v','i','d','e')) {
+ size += addAtom_stss(); // only for video streams
+ }
+ size += addAtom_stsc();
+ size += addAtom_stsz();
+ size += addAtom_co64();
+addAtomEnd;
+
+addAtom(stsd);
+ size += addWord(0x00000000); // Version+Flags
+ size += addWord(0x00000001); // Number of entries
+ SubsessionIOState::atomCreationFunc mediaDataAtomCreator
+ = fCurrentIOState->fQTMediaDataAtomCreator;
+ size += (this->*mediaDataAtomCreator)();
+addAtomEnd;
+
+unsigned QuickTimeFileSink::addAtom_genericMedia() {
+ int64_t initFilePosn = TellFile64(fOutFid);
+
+ // Our source is assumed to be a "QuickTimeGenericRTPSource"
+ // Use its "sdAtom" state for our contents:
+ QuickTimeGenericRTPSource* rtpSource = (QuickTimeGenericRTPSource*)
+ fCurrentIOState->fOurSubsession.rtpSource();
+ unsigned size = 0;
+ if (rtpSource != NULL) {
+ QuickTimeGenericRTPSource::QTState& qtState = rtpSource->qtState;
+ char const* from = qtState.sdAtom;
+ size = qtState.sdAtomSize;
+ for (unsigned i = 0; i < size; ++i) addByte(from[i]);
+ }
+addAtomEnd;
+
+unsigned QuickTimeFileSink::addAtom_soundMediaGeneral() {
+ int64_t initFilePosn = TellFile64(fOutFid);
+ unsigned size = addAtomHeader(fCurrentIOState->fQTAudioDataType);
+
+// General sample description fields:
+ size += addWord(0x00000000); // Reserved
+ size += addWord(0x00000001); // Reserved+Data reference index
+// Sound sample description fields:
+ unsigned short const version = fCurrentIOState->fQTSoundSampleVersion;
+ size += addWord(version<<16); // Version+Revision level
+ size += addWord(0x00000000); // Vendor
+ unsigned short numChannels
+ = (unsigned short)(fCurrentIOState->fOurSubsession.numChannels());
+ size += addHalfWord(numChannels); // Number of channels
+ size += addHalfWord(0x0010); // Sample size
+ // size += addWord(0x00000000); // Compression ID+Packet size
+ size += addWord(0xfffe0000); // Compression ID+Packet size #####
+
+ unsigned const sampleRateFixedPoint = fCurrentIOState->fQTTimeScale << 16;
+ size += addWord(sampleRateFixedPoint); // Sample rate
+addAtomEnd;
+
+unsigned QuickTimeFileSink::addAtom_Qclp() {
+ // The beginning of this atom looks just like a general Sound Media atom,
+ // except with a version field of 1:
+ int64_t initFilePosn = TellFile64(fOutFid);
+ fCurrentIOState->fQTAudioDataType = "Qclp";
+ fCurrentIOState->fQTSoundSampleVersion = 1;
+ unsigned size = addAtom_soundMediaGeneral();
+
+ // Next, add the four fields that are particular to version 1:
+ // (Later, parameterize these #####)
+ size += addWord(0x000000a0); // samples per packet
+ size += addWord(0x00000000); // ???
+ size += addWord(0x00000000); // ???
+ size += addWord(0x00000002); // bytes per sample (uncompressed)
+
+ // Other special fields are in a 'wave' atom that follows:
+ size += addAtom_wave();
+addAtomEnd;
+
+addAtom(wave);
+ size += addAtom_frma();
+ if (strcmp(fCurrentIOState->fQTAudioDataType, "Qclp") == 0) {
+ size += addWord(0x00000014); // ???
+ size += add4ByteString("Qclp"); // ???
+ if (fCurrentIOState->fQTBytesPerFrame == 35) {
+ size += addAtom_Fclp(); // full-rate QCELP
+ } else {
+ size += addAtom_Hclp(); // half-rate QCELP
+ } // what about other QCELP 'rates'??? #####
+ size += addWord(0x00000008); // ???
+ size += addWord(0x00000000); // ???
+ size += addWord(0x00000000); // ???
+ size += addWord(0x00000008); // ???
+ } else if (strcmp(fCurrentIOState->fQTAudioDataType, "mp4a") == 0) {
+ size += addWord(0x0000000c); // ???
+ size += add4ByteString("mp4a"); // ???
+ size += addWord(0x00000000); // ???
+ size += addAtom_esds(); // ESDescriptor
+ size += addWord(0x00000008); // ???
+ size += addWord(0x00000000); // ???
+ }
+addAtomEnd;
+
+addAtom(frma);
+ size += add4ByteString(fCurrentIOState->fQTAudioDataType); // ???
+addAtomEnd;
+
+addAtom(Fclp);
+ size += addWord(0x00000000); // ???
+addAtomEnd;
+
+addAtom(Hclp);
+ size += addWord(0x00000000); // ???
+addAtomEnd;
+
+unsigned QuickTimeFileSink::addAtom_mp4a() {
+ unsigned size = 0;
+ // The beginning of this atom looks just like a general Sound Media atom,
+ // except with a version field of 1:
+ int64_t initFilePosn = TellFile64(fOutFid);
+ fCurrentIOState->fQTAudioDataType = "mp4a";
+
+ if (fGenerateMP4Format) {
+ fCurrentIOState->fQTSoundSampleVersion = 0;
+ size = addAtom_soundMediaGeneral();
+ size += addAtom_esds();
+ } else {
+ fCurrentIOState->fQTSoundSampleVersion = 1;
+ size = addAtom_soundMediaGeneral();
+
+ // Next, add the four fields that are particular to version 1:
+ // (Later, parameterize these #####)
+ size += addWord(fCurrentIOState->fQTTimeUnitsPerSample);
+ size += addWord(0x00000001); // ???
+ size += addWord(0x00000001); // ???
+ size += addWord(0x00000002); // bytes per sample (uncompressed)
+
+ // Other special fields are in a 'wave' atom that follows:
+ size += addAtom_wave();
+ }
+addAtomEnd;
+
+addAtom(esds);
+ //#####
+ MediaSubsession& subsession = fCurrentIOState->fOurSubsession;
+ if (strcmp(subsession.mediumName(), "audio") == 0) {
+ // MPEG-4 audio
+ size += addWord(0x00000000); // ???
+ size += addWord(0x03808080); // ???
+ size += addWord(0x2a000000); // ???
+ size += addWord(0x04808080); // ???
+ size += addWord(0x1c401500); // ???
+ size += addWord(0x18000000); // ???
+ size += addWord(0x6d600000); // ???
+ size += addWord(0x6d600580); // ???
+ size += addByte(0x80); size += addByte(0x80); // ???
+ } else if (strcmp(subsession.mediumName(), "video") == 0) {
+ // MPEG-4 video
+ size += addWord(0x00000000); // ???
+ size += addWord(0x03330000); // ???
+ size += addWord(0x1f042b20); // ???
+ size += addWord(0x1104fd46); // ???
+ size += addWord(0x000d4e10); // ???
+ size += addWord(0x000d4e10); // ???
+ size += addByte(0x05); // ???
+ }
+
+ // Add the source's 'config' information:
+ unsigned configSize;
+ unsigned char* config
+ = parseGeneralConfigStr(subsession.fmtp_config(), configSize);
+ size += addByte(configSize);
+ for (unsigned i = 0; i < configSize; ++i) {
+ size += addByte(config[i]);
+ }
+ delete[] config;
+
+ if (strcmp(subsession.mediumName(), "audio") == 0) {
+ // MPEG-4 audio
+ size += addWord(0x06808080); // ???
+ size += addHalfWord(0x0102); // ???
+ } else {
+ // MPEG-4 video
+ size += addHalfWord(0x0601); // ???
+ size += addByte(0x02); // ???
+ }
+ //#####
+addAtomEnd;
+
+addAtom(srcq);
+ //#####
+ size += addWord(0x00000040); // ???
+ //#####
+addAtomEnd;
+
+addAtom(h263);
+// General sample description fields:
+ size += addWord(0x00000000); // Reserved
+ size += addWord(0x00000001); // Reserved+Data reference index
+// Video sample description fields:
+ size += addWord(0x00020001); // Version+Revision level
+ size += add4ByteString("appl"); // Vendor
+ size += addWord(0x00000000); // Temporal quality
+ size += addWord(0x000002fc); // Spatial quality
+ unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight;
+ size += addWord(widthAndHeight); // Width+height
+ size += addWord(0x00480000); // Horizontal resolution
+ size += addWord(0x00480000); // Vertical resolution
+ size += addWord(0x00000000); // Data size
+ size += addWord(0x00010548); // Frame count+Compressor name (start)
+ // "H.263"
+ size += addWord(0x2e323633); // Compressor name (continued)
+ size += addZeroWords(6); // Compressor name (continued - zero)
+ size += addWord(0x00000018); // Compressor name (final)+Depth
+ size += addHalfWord(0xffff); // Color table id
+addAtomEnd;
+
+addAtom(avc1);
+// General sample description fields:
+ size += addWord(0x00000000); // Reserved
+ size += addWord(0x00000001); // Reserved+Data reference index
+// Video sample description fields:
+ size += addWord(0x00000000); // Version+Revision level
+ size += add4ByteString("appl"); // Vendor
+ size += addWord(0x00000000); // Temporal quality
+ size += addWord(0x00000000); // Spatial quality
+ unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight;
+ size += addWord(widthAndHeight); // Width+height
+ size += addWord(0x00480000); // Horizontal resolution
+ size += addWord(0x00480000); // Vertical resolution
+ size += addWord(0x00000000); // Data size
+ size += addWord(0x00010548); // Frame count+Compressor name (start)
+ // "H.264"
+ size += addWord(0x2e323634); // Compressor name (continued)
+ size += addZeroWords(6); // Compressor name (continued - zero)
+ size += addWord(0x00000018); // Compressor name (final)+Depth
+ size += addHalfWord(0xffff); // Color table id
+ size += addAtom_avcC();
+addAtomEnd;
+
+addAtom(avcC);
+// Begin by Base-64 decoding the "sprop" parameter sets strings:
+ char* psets = strDup(fCurrentIOState->fOurSubsession.fmtp_spropparametersets());
+ if (psets == NULL) return 0;
+
+ size_t comma_pos = strcspn(psets, ",");
+ psets[comma_pos] = '\0';
+ char const* sps_b64 = psets;
+ char const* pps_b64 = &psets[comma_pos+1];
+ unsigned sps_count;
+ unsigned char* sps_data = base64Decode(sps_b64, sps_count, false);
+ unsigned pps_count;
+ unsigned char* pps_data = base64Decode(pps_b64, pps_count, false);
+
+// Then add the decoded data:
+ size += addByte(0x01); // configuration version
+ size += addByte(sps_data[1]); // profile
+ size += addByte(sps_data[2]); // profile compat
+ size += addByte(sps_data[3]); // level
+ size += addByte(0xff); /* 0b11111100 | lengthsize = 0x11 */
+ size += addByte(0xe0 | (sps_count > 0 ? 1 : 0) );
+ if (sps_count > 0) {
+ size += addHalfWord(sps_count);
+ for (unsigned i = 0; i < sps_count; i++) {
+ size += addByte(sps_data[i]);
+ }
+ }
+ size += addByte(pps_count > 0 ? 1 : 0);
+ if (pps_count > 0) {
+ size += addHalfWord(pps_count);
+ for (unsigned i = 0; i < pps_count; i++) {
+ size += addByte(pps_data[i]);
+ }
+ }
+
+// Finally, delete the data that we allocated:
+ delete[] pps_data; delete[] sps_data;
+ delete[] psets;
+addAtomEnd;
+
+addAtom(mp4v);
+// General sample description fields:
+ size += addWord(0x00000000); // Reserved
+ size += addWord(0x00000001); // Reserved+Data reference index
+// Video sample description fields:
+ size += addWord(0x00020001); // Version+Revision level
+ size += add4ByteString("appl"); // Vendor
+ size += addWord(0x00000200); // Temporal quality
+ size += addWord(0x00000400); // Spatial quality
+ unsigned const widthAndHeight = (fMovieWidth<<16)|fMovieHeight;
+ size += addWord(widthAndHeight); // Width+height
+ size += addWord(0x00480000); // Horizontal resolution
+ size += addWord(0x00480000); // Vertical resolution
+ size += addWord(0x00000000); // Data size
+ size += addWord(0x00010c4d); // Frame count+Compressor name (start)
+ // "MPEG-4 Video"
+ size += addWord(0x5045472d); // Compressor name (continued)
+ size += addWord(0x34205669); // Compressor name (continued)
+ size += addWord(0x64656f00); // Compressor name (continued)
+ size += addZeroWords(4); // Compressor name (continued - zero)
+ size += addWord(0x00000018); // Compressor name (final)+Depth
+ size += addHalfWord(0xffff); // Color table id
+ size += addAtom_esds(); // ESDescriptor
+ size += addWord(0x00000000); // ???
+addAtomEnd;
+
+unsigned QuickTimeFileSink::addAtom_rtp() {
+ int64_t initFilePosn = TellFile64(fOutFid);
+ unsigned size = addAtomHeader("rtp ");
+
+ size += addWord(0x00000000); // Reserved (1st 4 bytes)
+ size += addWord(0x00000001); // Reserved (last 2 bytes) + Data ref index
+ size += addWord(0x00010001); // Hint track version + Last compat htv
+ size += addWord(1450); // Max packet size
+
+ size += addAtom_tims();
+addAtomEnd;
+
+addAtom(tims);
+ size += addWord(fCurrentIOState->fOurSubsession.rtpTimestampFrequency());
+addAtomEnd;
+
+addAtom(stts); // Time-to-Sample
+ size += addWord(0x00000000); // Version+flags
+
+ // First, add a dummy "Number of entries" field
+ // (and remember its position). We'll fill this field in later:
+ int64_t numEntriesPosition = TellFile64(fOutFid);
+ size += addWord(0); // dummy for "Number of entries"
+
+ // Then, run through the chunk descriptors, and enter the entries
+ // in this (compressed) Time-to-Sample table:
+ unsigned numEntries = 0, numSamplesSoFar = 0;
+ unsigned prevSampleDuration = 0;
+ unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
+ ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
+ while (chunk != NULL) {
+ unsigned const sampleDuration = chunk->fFrameDuration/samplesPerFrame;
+ if (sampleDuration != prevSampleDuration) {
+ // This chunk will start a new table entry,
+ // so write out the old one (if any):
+ if (chunk != fCurrentIOState->fHeadChunk) {
+ ++numEntries;
+ size += addWord(numSamplesSoFar); // Sample count
+ size += addWord(prevSampleDuration); // Sample duration
+ numSamplesSoFar = 0;
+ }
+ }
+
+ unsigned const numSamples = chunk->fNumFrames*samplesPerFrame;
+ numSamplesSoFar += numSamples;
+ prevSampleDuration = sampleDuration;
+ chunk = chunk->fNextChunk;
+ }
+
+ // Then, write out the last entry:
+ ++numEntries;
+ size += addWord(numSamplesSoFar); // Sample count
+ size += addWord(prevSampleDuration); // Sample duration
+
+ // Now go back and fill in the "Number of entries" field:
+ setWord(numEntriesPosition, numEntries);
+addAtomEnd;
+
+addAtom(stss); // Sync-Sample
+ size += addWord(0x00000000); // Version+flags
+
+ // First, add a dummy "Number of entries" field
+ // (and remember its position). We'll fill this field in later:
+ int64_t numEntriesPosition = TellFile64(fOutFid);
+ size += addWord(0); // dummy for "Number of entries"
+
+ unsigned numEntries = 0, numSamplesSoFar = 0;
+ if (fCurrentIOState->fHeadSyncFrame != NULL) {
+ SyncFrame* currentSyncFrame = fCurrentIOState->fHeadSyncFrame;
+
+ // First, count the number of frames (to use as a sanity check; see below):
+ unsigned totNumFrames = 0;
+ for (ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk; chunk != NULL; chunk = chunk->fNextChunk) totNumFrames += chunk->fNumFrames;
+
+ while (currentSyncFrame != NULL) {
+ if (currentSyncFrame->sfFrameNum >= totNumFrames) break; // sanity check
+
+ ++numEntries;
+ size += addWord(currentSyncFrame->sfFrameNum);
+ currentSyncFrame = currentSyncFrame->nextSyncFrame;
+ }
+ } else {
+ // First, run through the chunk descriptors, counting up the total number of samples:
+ unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
+ ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
+ while (chunk != NULL) {
+ unsigned const numSamples = chunk->fNumFrames*samplesPerFrame;
+ numSamplesSoFar += numSamples;
+ chunk = chunk->fNextChunk;
+ }
+
+ // Then, write out the sample numbers that we deem correspond to 'sync samples':
+ unsigned i;
+ for (i = 0; i < numSamplesSoFar; i += 12) {
+ // For an explanation of the constant "12", see http://lists.live555.com/pipermail/live-devel/2009-July/010969.html
+ // (Perhaps we should really try to keep track of which 'samples' ('frames' for video) really are 'key frames'?)
+ size += addWord(i+1);
+ ++numEntries;
+ }
+
+ // Then, write out the last entry (if we haven't already done so):
+ if (i != (numSamplesSoFar - 1)) {
+ size += addWord(numSamplesSoFar);
+ ++numEntries;
+ }
+ }
+
+ // Now go back and fill in the "Number of entries" field:
+ setWord(numEntriesPosition, numEntries);
+addAtomEnd;
+
+addAtom(stsc); // Sample-to-Chunk
+ size += addWord(0x00000000); // Version+flags
+
+ // First, add a dummy "Number of entries" field
+ // (and remember its position). We'll fill this field in later:
+ int64_t numEntriesPosition = TellFile64(fOutFid);
+ size += addWord(0); // dummy for "Number of entries"
+
+ // Then, run through the chunk descriptors, and enter the entries
+ // in this (compressed) Sample-to-Chunk table:
+ unsigned numEntries = 0, chunkNumber = 0;
+ unsigned prevSamplesPerChunk = ~0;
+ unsigned const samplesPerFrame = fCurrentIOState->fQTSamplesPerFrame;
+ ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
+ while (chunk != NULL) {
+ ++chunkNumber;
+ unsigned const samplesPerChunk = chunk->fNumFrames*samplesPerFrame;
+ if (samplesPerChunk != prevSamplesPerChunk) {
+ // This chunk will be a new table entry:
+ ++numEntries;
+ size += addWord(chunkNumber); // Chunk number
+ size += addWord(samplesPerChunk); // Samples per chunk
+ size += addWord(0x00000001); // Sample description ID
+
+ prevSamplesPerChunk = samplesPerChunk;
+ }
+ chunk = chunk->fNextChunk;
+ }
+
+ // Now go back and fill in the "Number of entries" field:
+ setWord(numEntriesPosition, numEntries);
+addAtomEnd;
+
+addAtom(stsz); // Sample Size
+ size += addWord(0x00000000); // Version+flags
+
+ // Begin by checking whether our chunks all have the same
+ // 'bytes-per-sample'. This determines whether this atom's table
+ // has just a single entry, or multiple entries.
+ Boolean haveSingleEntryTable = True;
+ double firstBPS = 0.0;
+ ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
+ while (chunk != NULL) {
+ double bps
+ = (double)(chunk->fFrameSize)/(fCurrentIOState->fQTSamplesPerFrame);
+ if (bps < 1.0) {
+ // I don't think a multiple-entry table would make sense in
+ // this case, so assume a single entry table ??? #####
+ break;
+ }
+
+ if (firstBPS == 0.0) {
+ firstBPS = bps;
+ } else if (bps != firstBPS) {
+ haveSingleEntryTable = False;
+ break;
+ }
+
+ chunk = chunk->fNextChunk;
+ }
+
+ unsigned sampleSize;
+ if (haveSingleEntryTable) {
+ if (fCurrentIOState->isHintTrack()
+ && fCurrentIOState->fHeadChunk != NULL) {
+ sampleSize = fCurrentIOState->fHeadChunk->fFrameSize
+ / fCurrentIOState->fQTSamplesPerFrame;
+ } else {
+ // The following doesn't seem right, but seems to do the right thing:
+ sampleSize = fCurrentIOState->fQTTimeUnitsPerSample; //???
+ }
+ } else {
+ sampleSize = 0; // indicates a multiple-entry table
+ }
+ size += addWord(sampleSize); // Sample size
+ unsigned const totNumSamples = fCurrentIOState->fQTTotNumSamples;
+ size += addWord(totNumSamples); // Number of entries
+
+ if (!haveSingleEntryTable) {
+ // Multiple-entry table:
+ // Run through the chunk descriptors, entering the sample sizes:
+ ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
+ while (chunk != NULL) {
+ unsigned numSamples
+ = chunk->fNumFrames*(fCurrentIOState->fQTSamplesPerFrame);
+ unsigned sampleSize
+ = chunk->fFrameSize/(fCurrentIOState->fQTSamplesPerFrame);
+ for (unsigned i = 0; i < numSamples; ++i) {
+ size += addWord(sampleSize);
+ }
+
+ chunk = chunk->fNextChunk;
+ }
+ }
+addAtomEnd;
+
+addAtom(co64); // Chunk Offset
+ size += addWord(0x00000000); // Version+flags
+ size += addWord(fCurrentIOState->fNumChunks); // Number of entries
+
+ // Run through the chunk descriptors, entering the file offsets:
+ ChunkDescriptor* chunk = fCurrentIOState->fHeadChunk;
+ while (chunk != NULL) {
+ size += addWord64(chunk->fOffsetInFile);
+
+ chunk = chunk->fNextChunk;
+ }
+addAtomEnd;
+
+addAtom(udta);
+ size += addAtom_name();
+ size += addAtom_hnti();
+ size += addAtom_hinf();
+addAtomEnd;
+
+addAtom(name);
+ char description[100];
+ sprintf(description, "Hinted %s track",
+ fCurrentIOState->fOurSubsession.mediumName());
+ size += addArbitraryString(description, False); // name of object
+addAtomEnd;
+
+addAtom(hnti);
+ size += addAtom_sdp();
+addAtomEnd;
+
+unsigned QuickTimeFileSink::addAtom_sdp() {
+ int64_t initFilePosn = TellFile64(fOutFid);
+ unsigned size = addAtomHeader("sdp ");
+
+ // Add this subsession's SDP lines:
+ char const* sdpLines = fCurrentIOState->fOurSubsession.savedSDPLines();
+ // We need to change any "a=control:trackID=" values to be this
+ // track's actual track id:
+ char* newSDPLines = new char[strlen(sdpLines)+100/*overkill*/];
+ char const* searchStr = "a=control:trackid=";
+ Boolean foundSearchString = False;
+ char const *p1, *p2, *p3;
+ for (p1 = sdpLines; *p1 != '\0'; ++p1) {
+ for (p2 = p1,p3 = searchStr; tolower(*p2) == *p3; ++p2,++p3) {}
+ if (*p3 == '\0') {
+ // We found the end of the search string, at p2.
+ int beforeTrackNumPosn = p2-sdpLines;
+ // Look for the subsequent track number, and skip over it:
+ int trackNumLength;
+ if (sscanf(p2, " %*d%n", &trackNumLength) < 0) break;
+ int afterTrackNumPosn = beforeTrackNumPosn + trackNumLength;
+
+ // Replace the old track number with the correct one:
+ int i;
+ for (i = 0; i < beforeTrackNumPosn; ++i) newSDPLines[i] = sdpLines[i];
+ sprintf(&newSDPLines[i], "%d", fCurrentIOState->fTrackID);
+ i = afterTrackNumPosn;
+ int j = i + strlen(&newSDPLines[i]);
+ while (1) {
+ if ((newSDPLines[j] = sdpLines[i]) == '\0') break;
+ ++i; ++j;
+ }
+
+ foundSearchString = True;
+ break;
+ }
+ }
+
+ if (!foundSearchString) {
+ // Because we didn't find a "a=control:trackID=<trackId>" line,
+ // add one of our own:
+ sprintf(newSDPLines, "%s%s%d\r\n",
+ sdpLines, searchStr, fCurrentIOState->fTrackID);
+ }
+
+ size += addArbitraryString(newSDPLines, False);
+ delete[] newSDPLines;
+addAtomEnd;
+
+addAtom(hinf);
+ size += addAtom_totl();
+ size += addAtom_npck();
+ size += addAtom_tpay();
+ size += addAtom_trpy();
+ size += addAtom_nump();
+ size += addAtom_tpyl();
+ // Is 'maxr' required? #####
+ size += addAtom_dmed();
+ size += addAtom_dimm();
+ size += addAtom_drep();
+ size += addAtom_tmin();
+ size += addAtom_tmax();
+ size += addAtom_pmax();
+ size += addAtom_dmax();
+ size += addAtom_payt();
+addAtomEnd;
+
+addAtom(totl);
+ size += addWord(fCurrentIOState->fHINF.trpy.lo);
+addAtomEnd;
+
+addAtom(npck);
+ size += addWord(fCurrentIOState->fHINF.nump.lo);
+addAtomEnd;
+
+addAtom(tpay);
+ size += addWord(fCurrentIOState->fHINF.tpyl.lo);
+addAtomEnd;
+
+addAtom(trpy);
+ size += addWord(fCurrentIOState->fHINF.trpy.hi);
+ size += addWord(fCurrentIOState->fHINF.trpy.lo);
+addAtomEnd;
+
+addAtom(nump);
+ size += addWord(fCurrentIOState->fHINF.nump.hi);
+ size += addWord(fCurrentIOState->fHINF.nump.lo);
+addAtomEnd;
+
+addAtom(tpyl);
+ size += addWord(fCurrentIOState->fHINF.tpyl.hi);
+ size += addWord(fCurrentIOState->fHINF.tpyl.lo);
+addAtomEnd;
+
+addAtom(dmed);
+ size += addWord(fCurrentIOState->fHINF.dmed.hi);
+ size += addWord(fCurrentIOState->fHINF.dmed.lo);
+addAtomEnd;
+
+addAtom(dimm);
+ size += addWord(fCurrentIOState->fHINF.dimm.hi);
+ size += addWord(fCurrentIOState->fHINF.dimm.lo);
+addAtomEnd;
+
+addAtom(drep);
+ size += addWord(0);
+ size += addWord(0);
+addAtomEnd;
+
+addAtom(tmin);
+ size += addWord(0);
+addAtomEnd;
+
+addAtom(tmax);
+ size += addWord(0);
+addAtomEnd;
+
+addAtom(pmax);
+ size += addWord(fCurrentIOState->fHINF.pmax);
+addAtomEnd;
+
+addAtom(dmax);
+ size += addWord(fCurrentIOState->fHINF.dmax);
+addAtomEnd;
+
+addAtom(payt);
+ MediaSubsession& ourSubsession = fCurrentIOState->fOurSubsession;
+ RTPSource* rtpSource = ourSubsession.rtpSource();
+ if (rtpSource != NULL) {
+ size += addWord(rtpSource->rtpPayloadFormat());
+
+ // Also, add a 'rtpmap' string: <mime-subtype>/<rtp-frequency>
+ unsigned rtpmapStringLength = strlen(ourSubsession.codecName()) + 20;
+ char* rtpmapString = new char[rtpmapStringLength];
+ sprintf(rtpmapString, "%s/%d",
+ ourSubsession.codecName(), rtpSource->timestampFrequency());
+ size += addArbitraryString(rtpmapString);
+ delete[] rtpmapString;
+ }
+addAtomEnd;
+
+// A dummy atom (with name "????"):
+unsigned QuickTimeFileSink::addAtom_dummy() {
+ int64_t initFilePosn = TellFile64(fOutFid);
+ unsigned size = addAtomHeader("????");
+addAtomEnd;
diff --git a/liveMedia/QuickTimeGenericRTPSource.cpp b/liveMedia/QuickTimeGenericRTPSource.cpp
new file mode 100644
index 0000000..e99236b
--- /dev/null
+++ b/liveMedia/QuickTimeGenericRTPSource.cpp
@@ -0,0 +1,274 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP Sources containing generic QuickTime stream data, as defined in
+// <http://developer.apple.com/quicktime/icefloe/dispatch026.html>
+// Implementation
+
+#include "QuickTimeGenericRTPSource.hh"
+
+///// QTGenericBufferedPacket and QTGenericBufferedPacketFactory /////
+
+// A subclass of BufferedPacket, used to separate out
+// individual frames (when PCK == 2)
+
+class QTGenericBufferedPacket: public BufferedPacket {
+public:
+ QTGenericBufferedPacket(QuickTimeGenericRTPSource& ourSource);
+ virtual ~QTGenericBufferedPacket();
+
+private: // redefined virtual functions
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+private:
+ QuickTimeGenericRTPSource& fOurSource;
+};
+
+class QTGenericBufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+
+////////// QuickTimeGenericRTPSource //////////
+
+QuickTimeGenericRTPSource*
+QuickTimeGenericRTPSource::createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* mimeTypeString) {
+ return new QuickTimeGenericRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency,
+ mimeTypeString);
+}
+
+QuickTimeGenericRTPSource
+::QuickTimeGenericRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* mimeTypeString)
+ : MultiFramedRTPSource(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency,
+ new QTGenericBufferedPacketFactory),
+ fMIMEtypeString(strDup(mimeTypeString)) {
+ qtState.PCK = 0;
+ qtState.timescale = 0;
+ qtState.sdAtom = NULL;
+ qtState.sdAtomSize = qtState.width = qtState.height = 0;
+}
+
+QuickTimeGenericRTPSource::~QuickTimeGenericRTPSource() {
+ delete[] qtState.sdAtom;
+ delete[] (char*)fMIMEtypeString;
+}
+
+Boolean QuickTimeGenericRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ // The "QuickTime Header" must be at least 4 bytes in size:
+ // Extract the known fields from the first 4 bytes:
+ unsigned expectedHeaderSize = 4;
+ if (packetSize < expectedHeaderSize) return False;
+
+ unsigned char VER = (headerStart[0]&0xF0)>>4;
+ if (VER > 1) return False; // unknown header version
+ qtState.PCK = (headerStart[0]&0x0C)>>2;
+#ifdef DEBUG
+ Boolean S = (headerStart[0]&0x02) != 0;
+#endif
+ Boolean Q = (headerStart[0]&0x01) != 0;
+
+ Boolean L = (headerStart[1]&0x80) != 0;
+
+#ifdef DEBUG
+ Boolean D = (headerStart[2]&0x80) != 0;
+ unsigned short payloadId = ((headerStart[2]&0x7F)<<8)|headerStart[3];
+#endif
+ headerStart += 4;
+
+#ifdef DEBUG
+ fprintf(stderr, "PCK: %d, S: %d, Q: %d, L: %d, D: %d, payloadId: %d\n", qtState.PCK, S, Q, L, D, payloadId);
+#endif
+
+ if (Q) { // A "QuickTime Payload Description" follows
+ expectedHeaderSize += 4;
+ if (packetSize < expectedHeaderSize) return False;
+
+#ifdef DEBUG
+ Boolean K = (headerStart[0]&0x80) != 0;
+ Boolean F = (headerStart[0]&0x40) != 0;
+ Boolean A = (headerStart[0]&0x20) != 0;
+ Boolean Z = (headerStart[0]&0x10) != 0;
+#endif
+ unsigned payloadDescriptionLength = (headerStart[2]<<8)|headerStart[3];
+ headerStart += 4;
+
+#ifdef DEBUG
+ fprintf(stderr, "\tK: %d, F: %d, A: %d, Z: %d, payloadDescriptionLength: %d\n", K, F, A, Z, payloadDescriptionLength);
+#endif
+ // Make sure "payloadDescriptionLength" is valid
+ if (payloadDescriptionLength < 12) return False;
+ expectedHeaderSize += (payloadDescriptionLength - 4);
+ unsigned nonPaddedSize = expectedHeaderSize;
+ expectedHeaderSize += 3;
+ expectedHeaderSize -= expectedHeaderSize%4; // adds padding
+ if (packetSize < expectedHeaderSize) return False;
+ unsigned char padding = expectedHeaderSize - nonPaddedSize;
+
+#ifdef DEBUG
+ unsigned mediaType = (headerStart[0]<<24)|(headerStart[1]<<16)
+ |(headerStart[2]<<8)|headerStart[3];
+#endif
+ qtState.timescale = (headerStart[4]<<24)|(headerStart[5]<<16)
+ |(headerStart[6]<<8)|headerStart[7];
+ headerStart += 8;
+
+ payloadDescriptionLength -= 12;
+#ifdef DEBUG
+ fprintf(stderr, "\tmediaType: '%c%c%c%c', timescale: %d, %d bytes of TLVs left\n", mediaType>>24, (mediaType&0xFF0000)>>16, (mediaType&0xFF00)>>8, mediaType&0xFF, qtState.timescale, payloadDescriptionLength);
+#endif
+
+ while (payloadDescriptionLength > 3) {
+ unsigned short tlvLength = (headerStart[0]<<8)|headerStart[1];
+ unsigned short tlvType = (headerStart[2]<<8)|headerStart[3];
+ payloadDescriptionLength -= 4;
+ if (tlvLength > payloadDescriptionLength) return False; // bad TLV
+ headerStart += 4;
+#ifdef DEBUG
+ fprintf(stderr, "\t\tTLV '%c%c', length %d, leaving %d remaining bytes\n", tlvType>>8, tlvType&0xFF, tlvLength, payloadDescriptionLength - tlvLength);
+ for (int i = 0; i < tlvLength; ++i) fprintf(stderr, "%02x:", headerStart[i]); fprintf(stderr, "\n");
+#endif
+
+ // Check for 'TLV's that we can use for our 'qtState'
+ switch (tlvType) {
+ case ('s'<<8|'d'): { // session description atom
+ // Sanity check: the first 4 bytes of this must equal "tlvLength":
+ unsigned atomLength = (headerStart[0]<<24)|(headerStart[1]<<16)
+ |(headerStart[2]<<8)|(headerStart[3]);
+ if (atomLength != (unsigned)tlvLength) break;
+
+ delete[] qtState.sdAtom; qtState.sdAtom = new char[tlvLength];
+ memmove(qtState.sdAtom, headerStart, tlvLength);
+ qtState.sdAtomSize = tlvLength;
+ break;
+ }
+ case ('t'<<8|'w'): { // track width
+ qtState.width = (headerStart[0]<<8)|headerStart[1];
+ break;
+ }
+ case ('t'<<8|'h'): { // track height
+ qtState.height = (headerStart[0]<<8)|headerStart[1];
+ break;
+ }
+ }
+
+ payloadDescriptionLength -= tlvLength;
+ headerStart += tlvLength;
+ }
+ if (payloadDescriptionLength > 0) return False; // malformed TLV data
+ headerStart += padding;
+ }
+
+ if (L) { // Sample-Specific info follows
+ expectedHeaderSize += 4;
+ if (packetSize < expectedHeaderSize) return False;
+
+ unsigned ssInfoLength = (headerStart[2]<<8)|headerStart[3];
+ headerStart += 4;
+
+#ifdef DEBUG
+ fprintf(stderr, "\tssInfoLength: %d\n", ssInfoLength);
+#endif
+ // Make sure "ssInfoLength" is valid
+ if (ssInfoLength < 4) return False;
+ expectedHeaderSize += (ssInfoLength - 4);
+ unsigned nonPaddedSize = expectedHeaderSize;
+ expectedHeaderSize += 3;
+ expectedHeaderSize -= expectedHeaderSize%4; // adds padding
+ if (packetSize < expectedHeaderSize) return False;
+ unsigned char padding = expectedHeaderSize - nonPaddedSize;
+
+ ssInfoLength -= 4;
+ while (ssInfoLength > 3) {
+ unsigned short tlvLength = (headerStart[0]<<8)|headerStart[1];
+#ifdef DEBUG
+ unsigned short tlvType = (headerStart[2]<<8)|headerStart[3];
+#endif
+ ssInfoLength -= 4;
+ if (tlvLength > ssInfoLength) return False; // bad TLV
+#ifdef DEBUG
+ fprintf(stderr, "\t\tTLV '%c%c', length %d, leaving %d remaining bytes\n", tlvType>>8, tlvType&0xFF, tlvLength, ssInfoLength - tlvLength);
+ for (int i = 0; i < tlvLength; ++i) fprintf(stderr, "%02x:", headerStart[4+i]); fprintf(stderr, "\n");
+#endif
+ ssInfoLength -= tlvLength;
+ headerStart += 4 + tlvLength;
+ }
+ if (ssInfoLength > 0) return False; // malformed TLV data
+ headerStart += padding;
+ }
+
+ fCurrentPacketBeginsFrame = fCurrentPacketCompletesFrame;
+ // whether the *previous* packet ended a frame
+ fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
+
+ resultSpecialHeaderSize = expectedHeaderSize;
+#ifdef DEBUG
+ fprintf(stderr, "Result special header size: %d\n", resultSpecialHeaderSize);
+#endif
+ return True;
+}
+
+char const* QuickTimeGenericRTPSource::MIMEtype() const {
+ if (fMIMEtypeString == NULL) return MultiFramedRTPSource::MIMEtype();
+
+ return fMIMEtypeString;
+}
+
+
+////////// QTGenericBufferedPacket and QTGenericBufferedPacketFactory impl
+
+QTGenericBufferedPacket
+::QTGenericBufferedPacket(QuickTimeGenericRTPSource& ourSource)
+ : fOurSource(ourSource) {
+}
+
+QTGenericBufferedPacket::~QTGenericBufferedPacket() {
+}
+
+unsigned QTGenericBufferedPacket::
+ nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
+ // We use the entire packet for a frame, unless "PCK" == 2
+ if (fOurSource.qtState.PCK != 2) return dataSize;
+
+ if (dataSize < 8) return 0; // sanity check
+
+ unsigned short sampleLength = (framePtr[2]<<8)|framePtr[3];
+ // later, extract and use the "timestamp" field #####
+ framePtr += 8;
+ dataSize -= 8;
+
+ return sampleLength < dataSize ? sampleLength : dataSize;
+}
+
+BufferedPacket* QTGenericBufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* ourSource) {
+ return new QTGenericBufferedPacket((QuickTimeGenericRTPSource&)(*ourSource));
+}
diff --git a/liveMedia/RTCP.cpp b/liveMedia/RTCP.cpp
new file mode 100644
index 0000000..816588f
--- /dev/null
+++ b/liveMedia/RTCP.cpp
@@ -0,0 +1,1376 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTCP
+// Implementation
+
+#include "RTCP.hh"
+#include "GroupsockHelper.hh"
+#include "rtcp_from_spec.h"
+#if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4)
+#define snprintf _snprintf
+#endif
+
+////////// RTCPMemberDatabase //////////
+
+class RTCPMemberDatabase {
+public:
+ RTCPMemberDatabase(RTCPInstance& ourRTCPInstance)
+ : fOurRTCPInstance(ourRTCPInstance), fNumMembers(1 /*ourself*/),
+ fTable(HashTable::create(ONE_WORD_HASH_KEYS)) {
+ }
+
+ virtual ~RTCPMemberDatabase() {
+ delete fTable;
+ }
+
+ Boolean isMember(u_int32_t ssrc) const {
+ return fTable->Lookup((char*)(long)ssrc) != NULL;
+ }
+
+ Boolean noteMembership(u_int32_t ssrc, unsigned curTimeCount) {
+ Boolean isNew = !isMember(ssrc);
+
+ if (isNew) {
+ ++fNumMembers;
+ }
+
+ // Record the current time, so we can age stale members
+ fTable->Add((char*)(long)ssrc, (void*)(long)curTimeCount);
+
+ return isNew;
+ }
+
+ Boolean remove(u_int32_t ssrc) {
+ Boolean wasPresent = fTable->Remove((char*)(long)ssrc);
+ if (wasPresent) {
+ --fNumMembers;
+ }
+ return wasPresent;
+ }
+
+ unsigned numMembers() const {
+ return fNumMembers;
+ }
+
+ void reapOldMembers(unsigned threshold);
+
+private:
+ RTCPInstance& fOurRTCPInstance;
+ unsigned fNumMembers;
+ HashTable* fTable;
+};
+
+void RTCPMemberDatabase::reapOldMembers(unsigned threshold) {
+ Boolean foundOldMember;
+ u_int32_t oldSSRC = 0;
+
+ do {
+ foundOldMember = False;
+
+ HashTable::Iterator* iter
+ = HashTable::Iterator::create(*fTable);
+ uintptr_t timeCount;
+ char const* key;
+ while ((timeCount = (uintptr_t)(iter->next(key))) != 0) {
+#ifdef DEBUG
+ fprintf(stderr, "reap: checking SSRC 0x%lx: %ld (threshold %d)\n", (unsigned long)key, timeCount, threshold);
+#endif
+ if (timeCount < (uintptr_t)threshold) { // this SSRC is old
+ uintptr_t ssrc = (uintptr_t)key;
+ oldSSRC = (u_int32_t)ssrc;
+ foundOldMember = True;
+ }
+ }
+ delete iter;
+
+ if (foundOldMember) {
+#ifdef DEBUG
+ fprintf(stderr, "reap: removing SSRC 0x%x\n", oldSSRC);
+#endif
+ fOurRTCPInstance.removeSSRC(oldSSRC, True);
+ }
+ } while (foundOldMember);
+}
+
+
+////////// RTCPInstance //////////
+
+static double dTimeNow() {
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ return (double) (timeNow.tv_sec + timeNow.tv_usec/1000000.0);
+}
+
+static unsigned const maxRTCPPacketSize = 1438;
+ // bytes (1500, minus some allowance for IP, UDP, UMTP headers; SRTCP trailers)
+static unsigned const preferredRTCPPacketSize = 1000; // bytes
+
+RTCPInstance::RTCPInstance(UsageEnvironment& env, Groupsock* RTCPgs,
+ unsigned totSessionBW,
+ unsigned char const* cname,
+ RTPSink* sink, RTPSource* source,
+ Boolean isSSMTransmitter,
+ SRTPCryptographicContext* crypto)
+ : Medium(env), fRTCPInterface(this, RTCPgs), fTotSessionBW(totSessionBW),
+ fSink(sink), fSource(source), fIsSSMTransmitter(isSSMTransmitter), fCrypto(crypto),
+ fCNAME(RTCP_SDES_CNAME, cname), fOutgoingReportCount(1),
+ fAveRTCPSize(0), fIsInitial(1), fPrevNumMembers(0),
+ fLastSentSize(0), fLastReceivedSize(0), fLastReceivedSSRC(0),
+ fTypeOfEvent(EVENT_UNKNOWN), fTypeOfPacket(PACKET_UNKNOWN_TYPE),
+ fHaveJustSentPacket(False), fLastPacketSentSize(0),
+ fByeHandlerTask(NULL), fByeWithReasonHandlerTask(NULL), fByeHandlerClientData(NULL),
+ fSRHandlerTask(NULL), fSRHandlerClientData(NULL),
+ fRRHandlerTask(NULL), fRRHandlerClientData(NULL),
+ fSpecificRRHandlerTable(NULL),
+ fAppHandlerTask(NULL), fAppHandlerClientData(NULL) {
+#ifdef DEBUG
+ fprintf(stderr, "RTCPInstance[%p]::RTCPInstance()\n", this);
+#endif
+ if (fTotSessionBW == 0) { // not allowed!
+ env << "RTCPInstance::RTCPInstance error: totSessionBW parameter should not be zero!\n";
+ fTotSessionBW = 1;
+ }
+
+ if (isSSMTransmitter) RTCPgs->multicastSendOnly(); // don't receive multicast
+
+ double timeNow = dTimeNow();
+ fPrevReportTime = fNextReportTime = timeNow;
+
+ fKnownMembers = new RTCPMemberDatabase(*this);
+ fInBuf = new unsigned char[maxRTCPPacketSize];
+ if (fKnownMembers == NULL || fInBuf == NULL) return;
+ fNumBytesAlreadyRead = 0;
+
+ fOutBuf = new OutPacketBuffer(preferredRTCPPacketSize, maxRTCPPacketSize, 1500);
+ if (fOutBuf == NULL) return;
+
+ if (fSource != NULL && fSource->RTPgs() == RTCPgs) {
+ // We're receiving RTCP reports that are multiplexed with RTP, so ask the RTP source
+ // to give them to us:
+ fSource->registerForMultiplexedRTCPPackets(this);
+ } else {
+ // Arrange to handle incoming reports from the network:
+ TaskScheduler::BackgroundHandlerProc* handler
+ = (TaskScheduler::BackgroundHandlerProc*)&incomingReportHandler;
+ fRTCPInterface.startNetworkReading(handler);
+ }
+
+ // Send our first report.
+ fTypeOfEvent = EVENT_REPORT;
+ onExpire(this);
+}
+
+struct RRHandlerRecord {
+ TaskFunc* rrHandlerTask;
+ void* rrHandlerClientData;
+};
+
+RTCPInstance::~RTCPInstance() {
+#ifdef DEBUG
+ fprintf(stderr, "RTCPInstance[%p]::~RTCPInstance()\n", this);
+#endif
+ // Begin by sending a BYE. We have to do this immediately, without
+ // 'reconsideration', because "this" is going away.
+ fTypeOfEvent = EVENT_BYE; // not used, but...
+ sendBYE();
+
+ if (fSource != NULL && fSource->RTPgs() == fRTCPInterface.gs()) {
+ // We were receiving RTCP reports that were multiplexed with RTP, so tell the RTP source
+ // to stop giving them to us:
+ fSource->deregisterForMultiplexedRTCPPackets();
+ fRTCPInterface.forgetOurGroupsock();
+ // so that the "fRTCPInterface" destructor doesn't turn off background read handling
+ }
+
+ if (fSpecificRRHandlerTable != NULL) {
+ AddressPortLookupTable::Iterator iter(*fSpecificRRHandlerTable);
+ RRHandlerRecord* rrHandler;
+ while ((rrHandler = (RRHandlerRecord*)iter.next()) != NULL) {
+ delete rrHandler;
+ }
+ delete fSpecificRRHandlerTable;
+ }
+
+ delete fKnownMembers;
+ delete fOutBuf;
+ delete[] fInBuf;
+}
+
+void RTCPInstance::noteArrivingRR(struct sockaddr_in const& fromAddressAndPort,
+ int tcpSocketNum, unsigned char tcpStreamChannelId) {
+ // If a 'RR handler' was set, call it now:
+
+ // Specific RR handler:
+ if (fSpecificRRHandlerTable != NULL) {
+ netAddressBits fromAddr;
+ portNumBits fromPortNum;
+ if (tcpSocketNum < 0) {
+ // Normal case: We read the RTCP packet over UDP
+ fromAddr = fromAddressAndPort.sin_addr.s_addr;
+ fromPortNum = ntohs(fromAddressAndPort.sin_port);
+ } else {
+ // Special case: We read the RTCP packet over TCP (interleaved)
+ // Hack: Use the TCP socket and channel id to look up the handler
+ fromAddr = tcpSocketNum;
+ fromPortNum = tcpStreamChannelId;
+ }
+ Port fromPort(fromPortNum);
+ RRHandlerRecord* rrHandler
+ = (RRHandlerRecord*)(fSpecificRRHandlerTable->Lookup(fromAddr, (~0), fromPort));
+ if (rrHandler != NULL) {
+ if (rrHandler->rrHandlerTask != NULL) {
+ (*(rrHandler->rrHandlerTask))(rrHandler->rrHandlerClientData);
+ }
+ }
+ }
+
+ // General RR handler:
+ if (fRRHandlerTask != NULL) (*fRRHandlerTask)(fRRHandlerClientData);
+}
+
+RTCPInstance* RTCPInstance::createNew(UsageEnvironment& env, Groupsock* RTCPgs,
+ unsigned totSessionBW,
+ unsigned char const* cname,
+ RTPSink* sink, RTPSource* source,
+ Boolean isSSMTransmitter,
+ SRTPCryptographicContext* crypt) {
+ return new RTCPInstance(env, RTCPgs, totSessionBW, cname, sink, source,
+ isSSMTransmitter, crypt);
+}
+
+Boolean RTCPInstance::lookupByName(UsageEnvironment& env,
+ char const* instanceName,
+ RTCPInstance*& resultInstance) {
+ resultInstance = NULL; // unless we succeed
+
+ Medium* medium;
+ if (!Medium::lookupByName(env, instanceName, medium)) return False;
+
+ if (!medium->isRTCPInstance()) {
+ env.setResultMsg(instanceName, " is not a RTCP instance");
+ return False;
+ }
+
+ resultInstance = (RTCPInstance*)medium;
+ return True;
+}
+
+Boolean RTCPInstance::isRTCPInstance() const {
+ return True;
+}
+
+unsigned RTCPInstance::numMembers() const {
+ if (fKnownMembers == NULL) return 0;
+
+ return fKnownMembers->numMembers();
+}
+
+void RTCPInstance::setByeHandler(TaskFunc* handlerTask, void* clientData,
+ Boolean handleActiveParticipantsOnly) {
+ fByeHandlerTask = handlerTask;
+ fByeWithReasonHandlerTask = NULL;
+ fByeHandlerClientData = clientData;
+ fByeHandleActiveParticipantsOnly = handleActiveParticipantsOnly;
+}
+
+void RTCPInstance::setByeWithReasonHandler(ByeWithReasonHandlerFunc* handlerTask, void* clientData,
+ Boolean handleActiveParticipantsOnly) {
+ fByeHandlerTask = NULL;
+ fByeWithReasonHandlerTask = handlerTask;
+ fByeHandlerClientData = clientData;
+ fByeHandleActiveParticipantsOnly = handleActiveParticipantsOnly;
+}
+
+void RTCPInstance::setSRHandler(TaskFunc* handlerTask, void* clientData) {
+ fSRHandlerTask = handlerTask;
+ fSRHandlerClientData = clientData;
+}
+
+void RTCPInstance::setRRHandler(TaskFunc* handlerTask, void* clientData) {
+ fRRHandlerTask = handlerTask;
+ fRRHandlerClientData = clientData;
+}
+
+void RTCPInstance
+::setSpecificRRHandler(netAddressBits fromAddress, Port fromPort,
+ TaskFunc* handlerTask, void* clientData) {
+ if (handlerTask == NULL && clientData == NULL) {
+ unsetSpecificRRHandler(fromAddress, fromPort);
+ return;
+ }
+
+ RRHandlerRecord* rrHandler = new RRHandlerRecord;
+ rrHandler->rrHandlerTask = handlerTask;
+ rrHandler->rrHandlerClientData = clientData;
+ if (fSpecificRRHandlerTable == NULL) {
+ fSpecificRRHandlerTable = new AddressPortLookupTable;
+ }
+ RRHandlerRecord* existingRecord = (RRHandlerRecord*)fSpecificRRHandlerTable->Add(fromAddress, (~0), fromPort, rrHandler);
+ delete existingRecord; // if any
+
+}
+
+void RTCPInstance
+::unsetSpecificRRHandler(netAddressBits fromAddress, Port fromPort) {
+ if (fSpecificRRHandlerTable == NULL) return;
+
+ RRHandlerRecord* rrHandler
+ = (RRHandlerRecord*)(fSpecificRRHandlerTable->Lookup(fromAddress, (~0), fromPort));
+ if (rrHandler != NULL) {
+ fSpecificRRHandlerTable->Remove(fromAddress, (~0), fromPort);
+ delete rrHandler;
+ }
+}
+
+void RTCPInstance::setAppHandler(RTCPAppHandlerFunc* handlerTask, void* clientData) {
+ fAppHandlerTask = handlerTask;
+ fAppHandlerClientData = clientData;
+}
+
+void RTCPInstance::sendAppPacket(u_int8_t subtype, char const* name,
+ u_int8_t* appDependentData, unsigned appDependentDataSize) {
+ // Set up the first 4 bytes: V,PT,subtype,PT,length:
+ u_int32_t rtcpHdr = 0x80000000; // version 2, no padding
+ rtcpHdr |= (subtype&0x1F)<<24;
+ rtcpHdr |= (RTCP_PT_APP<<16);
+ unsigned length = 2 + (appDependentDataSize+3)/4;
+ rtcpHdr |= (length&0xFFFF);
+ fOutBuf->enqueueWord(rtcpHdr);
+
+ // Set up the next 4 bytes: SSRC:
+ fOutBuf->enqueueWord(fSource != NULL ? fSource->SSRC() : fSink != NULL ? fSink->SSRC() : 0);
+
+ // Set up the next 4 bytes: name:
+ char nameBytes[4];
+ nameBytes[0] = nameBytes[1] = nameBytes[2] = nameBytes[3] = '\0'; // by default
+ if (name != NULL) {
+ snprintf(nameBytes, 4, "%s", name);
+ }
+ fOutBuf->enqueue((u_int8_t*)nameBytes, 4);
+
+ // Set up the remaining bytes (if any): application-dependent data (+ padding):
+ if (appDependentData != NULL && appDependentDataSize > 0) {
+ fOutBuf->enqueue(appDependentData, appDependentDataSize);
+
+ unsigned modulo = appDependentDataSize%4;
+ unsigned paddingSize = modulo == 0 ? 0 : 4-modulo;
+ u_int8_t const paddingByte = 0x00;
+ for (unsigned i = 0; i < paddingSize; ++i) fOutBuf->enqueue(&paddingByte, 1);
+ }
+
+ // Finally, send the packet:
+ sendBuiltPacket();
+}
+
+void RTCPInstance::setStreamSocket(int sockNum,
+ unsigned char streamChannelId) {
+ // Turn off background read handling:
+ fRTCPInterface.stopNetworkReading();
+
+ // Switch to RTCP-over-TCP:
+ fRTCPInterface.setStreamSocket(sockNum, streamChannelId);
+
+ // Turn background reading back on:
+ TaskScheduler::BackgroundHandlerProc* handler
+ = (TaskScheduler::BackgroundHandlerProc*)&incomingReportHandler;
+ fRTCPInterface.startNetworkReading(handler);
+}
+
+void RTCPInstance::addStreamSocket(int sockNum,
+ unsigned char streamChannelId) {
+ // First, turn off background read handling for the default (UDP) socket:
+ envir().taskScheduler().turnOffBackgroundReadHandling(fRTCPInterface.gs()->socketNum());
+
+ // Add the RTCP-over-TCP interface:
+ fRTCPInterface.addStreamSocket(sockNum, streamChannelId);
+
+ // Turn on background reading for this socket (in case it's not on already):
+ TaskScheduler::BackgroundHandlerProc* handler
+ = (TaskScheduler::BackgroundHandlerProc*)&incomingReportHandler;
+ fRTCPInterface.startNetworkReading(handler);
+}
+
+void RTCPInstance
+::injectReport(u_int8_t const* packet, unsigned packetSize, struct sockaddr_in const& fromAddress) {
+ if (packetSize > maxRTCPPacketSize) packetSize = maxRTCPPacketSize;
+ memmove(fInBuf, packet, packetSize);
+
+ processIncomingReport(packetSize, fromAddress, -1, 0xFF); // assume report received over UDP
+}
+
+static unsigned const IP_UDP_HDR_SIZE = 28;
+ // overhead (bytes) of IP and UDP hdrs
+
+#define ADVANCE(n) pkt += (n); packetSize -= (n)
+
+void RTCPInstance::incomingReportHandler(RTCPInstance* instance,
+ int /*mask*/) {
+ instance->incomingReportHandler1();
+}
+
+void RTCPInstance::incomingReportHandler1() {
+ do {
+ if (fNumBytesAlreadyRead >= maxRTCPPacketSize) {
+ envir() << "RTCPInstance error: Hit limit when reading incoming packet over TCP. (fNumBytesAlreadyRead ("
+ << fNumBytesAlreadyRead << ") >= maxRTCPPacketSize (" << maxRTCPPacketSize
+ << ")). The remote endpoint is using a buggy implementation of RTP/RTCP-over-TCP. Please upgrade it!\n";
+ break;
+ }
+
+ unsigned numBytesRead;
+ struct sockaddr_in fromAddress;
+ int tcpSocketNum;
+ unsigned char tcpStreamChannelId;
+ Boolean packetReadWasIncomplete;
+ Boolean readResult
+ = fRTCPInterface.handleRead(&fInBuf[fNumBytesAlreadyRead], maxRTCPPacketSize - fNumBytesAlreadyRead,
+ numBytesRead, fromAddress,
+ tcpSocketNum, tcpStreamChannelId,
+ packetReadWasIncomplete);
+
+ unsigned packetSize = 0;
+ if (packetReadWasIncomplete) {
+ fNumBytesAlreadyRead += numBytesRead;
+ return; // more reads are needed to get the entire packet
+ } else { // normal case: We've read the entire packet
+ packetSize = fNumBytesAlreadyRead + numBytesRead;
+ fNumBytesAlreadyRead = 0; // for next time
+ }
+ if (!readResult) break;
+
+ // Ignore the packet if it was looped-back from ourself:
+ Boolean packetWasFromOurHost = False;
+ if (RTCPgs()->wasLoopedBackFromUs(envir(), fromAddress)) {
+ packetWasFromOurHost = True;
+ // However, we still want to handle incoming RTCP packets from
+ // *other processes* on the same machine. To distinguish this
+ // case from a true loop-back, check whether we've just sent a
+ // packet of the same size. (This check isn't perfect, but it seems
+ // to be the best we can do.)
+ if (fHaveJustSentPacket && fLastPacketSentSize == packetSize) {
+ // This is a true loop-back:
+ fHaveJustSentPacket = False;
+ break; // ignore this packet
+ }
+ }
+
+ if (fIsSSMTransmitter && !packetWasFromOurHost) {
+ // This packet is assumed to have been received via unicast (because we're
+ // a SSM transmitter, and SSM receivers send back RTCP "RR" packets via unicast).
+ // 'Reflect' the packet by resending it to the multicast group, so that any other receivers
+ // can also get to see it.
+
+ // NOTE: Denial-of-service attacks are possible here.
+ // Users of this software may wish to add their own,
+ // application-specific mechanism for 'authenticating' the
+ // validity of this packet before reflecting it.
+
+ // NOTE: The test for "!packetWasFromOurHost" means that we won't reflect RTCP packets
+ // that come from other processes on the same host as us. The reason for this is that the
+ // 'packet size' test above is not 100% reliable; some packets that were truly looped back
+ // from us might not be detected as such, and this might lead to infinite
+ // forwarding/receiving of some packets. To avoid this possibility, we reflect only
+ // RTCP packets that we know for sure originated elsewhere.
+ // (Note, though, that if we ever re-enable the code in "Groupsock::multicastSendOnly()",
+ // then we could remove the test for "!packetWasFromOurHost".)
+ fRTCPInterface.sendPacket(fInBuf, packetSize);
+ fHaveJustSentPacket = True;
+ fLastPacketSentSize = packetSize;
+ }
+
+ processIncomingReport(packetSize, fromAddress, tcpSocketNum, tcpStreamChannelId);
+ } while (0);
+}
+
+void RTCPInstance
+::processIncomingReport(unsigned packetSize, struct sockaddr_in const& fromAddressAndPort,
+ int tcpSocketNum, unsigned char tcpStreamChannelId) {
+ do {
+ if (fCrypto != NULL) { // The packet is assumed to be SRTCP. Verify/decrypt it first:
+ unsigned newPacketSize;
+ if (!fCrypto->processIncomingSRTCPPacket(fInBuf, packetSize, newPacketSize)) break;
+ packetSize = newPacketSize;
+ }
+
+ Boolean callByeHandler = False;
+ char* reason = NULL; // by default, unless/until a BYE packet with a 'reason' arrives
+ unsigned char* pkt = fInBuf;
+
+#ifdef DEBUG
+ fprintf(stderr, "[%p]saw incoming RTCP packet (from ", this);
+ if (tcpSocketNum < 0) {
+ // Note that "fromAddressAndPort" is valid only if we're receiving over UDP (not over TCP):
+ fprintf(stderr, "address %s, port %d", AddressString(fromAddressAndPort).val(), ntohs(fromAddressAndPort.sin_port));
+ } else {
+ fprintf(stderr, "TCP socket #%d, stream channel id %d", tcpSocketNum, tcpStreamChannelId);
+ }
+ fprintf(stderr, ")\n");
+ for (unsigned i = 0; i < packetSize; ++i) {
+ if (i%4 == 0) fprintf(stderr, " ");
+ fprintf(stderr, "%02x", pkt[i]);
+ }
+ fprintf(stderr, "\n");
+#endif
+ int totPacketSize = IP_UDP_HDR_SIZE + packetSize;
+
+ // Check the RTCP packet for validity:
+ // It must at least contain a header (4 bytes), and this header
+ // must be version=2, with no padding bit, and a payload type of
+ // SR (200), RR (201), or APP (204):
+ if (packetSize < 4) break;
+ unsigned rtcpHdr = ntohl(*(u_int32_t*)pkt);
+ if ((rtcpHdr & 0xE0FE0000) != (0x80000000 | (RTCP_PT_SR<<16)) &&
+ (rtcpHdr & 0xE0FF0000) != (0x80000000 | (RTCP_PT_APP<<16))) {
+#ifdef DEBUG
+ fprintf(stderr, "rejected bad RTCP packet: header 0x%08x\n", rtcpHdr);
+#endif
+ break;
+ }
+
+ // Process each of the individual RTCP 'subpackets' in (what may be)
+ // a compound RTCP packet.
+ int typeOfPacket = PACKET_UNKNOWN_TYPE;
+ unsigned reportSenderSSRC = 0;
+ Boolean packetOK = False;
+ while (1) {
+ u_int8_t rc = (rtcpHdr>>24)&0x1F;
+ u_int8_t pt = (rtcpHdr>>16)&0xFF;
+ unsigned length = 4*(rtcpHdr&0xFFFF); // doesn't count hdr
+ ADVANCE(4); // skip over the header
+ if (length > packetSize) break;
+
+ // Assume that each RTCP subpacket begins with a 4-byte SSRC:
+ if (length < 4) break; length -= 4;
+ reportSenderSSRC = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
+#ifdef HACK_FOR_CHROME_WEBRTC_BUG
+ if (reportSenderSSRC == 0x00000001 && pt == RTCP_PT_RR) {
+ // Chrome (and Opera) WebRTC receivers have a bug that causes them to always send
+ // SSRC 1 in their "RR"s. To work around this (to help us distinguish between different
+ // receivers), we use a fake SSRC in this case consisting of the IP address, XORed with
+ // the port number:
+ reportSenderSSRC = fromAddressAndPort.sin_addr.s_addr^fromAddressAndPort.sin_port;
+ }
+#endif
+
+ Boolean subPacketOK = False;
+ switch (pt) {
+ case RTCP_PT_SR: {
+#ifdef DEBUG
+ fprintf(stderr, "SR\n");
+#endif
+ if (length < 20) break; length -= 20;
+
+ // Extract the NTP timestamp, and note this:
+ unsigned NTPmsw = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
+ unsigned NTPlsw = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
+ unsigned rtpTimestamp = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
+ if (fSource != NULL) {
+ RTPReceptionStatsDB& receptionStats
+ = fSource->receptionStatsDB();
+ receptionStats.noteIncomingSR(reportSenderSSRC,
+ NTPmsw, NTPlsw, rtpTimestamp);
+ }
+ ADVANCE(8); // skip over packet count, octet count
+
+ // If a 'SR handler' was set, call it now:
+ if (fSRHandlerTask != NULL) (*fSRHandlerTask)(fSRHandlerClientData);
+
+ // The rest of the SR is handled like a RR (so, no "break;" here)
+ }
+ case RTCP_PT_RR: {
+#ifdef DEBUG
+ fprintf(stderr, "RR\n");
+#endif
+ unsigned reportBlocksSize = rc*(6*4);
+ if (length < reportBlocksSize) break;
+ length -= reportBlocksSize;
+
+ if (fSink != NULL) {
+ // Use this information to update stats about our transmissions:
+ RTPTransmissionStatsDB& transmissionStats = fSink->transmissionStatsDB();
+ for (unsigned i = 0; i < rc; ++i) {
+ unsigned senderSSRC = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
+ // We care only about reports about our own transmission, not others'
+ if (senderSSRC == fSink->SSRC()) {
+ unsigned lossStats = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
+ unsigned highestReceived = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
+ unsigned jitter = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
+ unsigned timeLastSR = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
+ unsigned timeSinceLastSR = ntohl(*(u_int32_t*)pkt); ADVANCE(4);
+ transmissionStats.noteIncomingRR(reportSenderSSRC, fromAddressAndPort,
+ lossStats,
+ highestReceived, jitter,
+ timeLastSR, timeSinceLastSR);
+ } else {
+ ADVANCE(4*5);
+ }
+ }
+ } else {
+ ADVANCE(reportBlocksSize);
+ }
+
+ if (pt == RTCP_PT_RR) { // i.e., we didn't fall through from 'SR'
+ noteArrivingRR(fromAddressAndPort, tcpSocketNum, tcpStreamChannelId);
+ }
+
+ subPacketOK = True;
+ typeOfPacket = PACKET_RTCP_REPORT;
+ break;
+ }
+ case RTCP_PT_BYE: {
+#ifdef DEBUG
+ fprintf(stderr, "BYE");
+#endif
+ // Check whether there was a 'reason for leaving':
+ if (length > 0) {
+ u_int8_t reasonLength = *pkt;
+ if (reasonLength > length-1) {
+ // The 'reason' length field is too large!
+#ifdef DEBUG
+ fprintf(stderr, "\nError: The 'reason' length %d is too large (it should be <= %d)\n",
+ reasonLength, length-1);
+#endif
+ reasonLength = length-1;
+ }
+ reason = new char[reasonLength + 1];
+ for (unsigned i = 0; i < reasonLength; ++i) {
+ reason[i] = pkt[1+i];
+ }
+ reason[reasonLength] = '\0';
+#ifdef DEBUG
+ fprintf(stderr, " (reason:%s)", reason);
+#endif
+ }
+#ifdef DEBUG
+ fprintf(stderr, "\n");
+#endif
+ // If a 'BYE handler' was set, arrange for it to be called at the end of this routine.
+ // (Note: We don't call it immediately, in case it happens to cause "this" to be deleted.)
+ if ((fByeHandlerTask != NULL || fByeWithReasonHandlerTask != NULL)
+ && (!fByeHandleActiveParticipantsOnly
+ || (fSource != NULL
+ && fSource->receptionStatsDB().lookup(reportSenderSSRC) != NULL)
+ || (fSink != NULL
+ && fSink->transmissionStatsDB().lookup(reportSenderSSRC) != NULL))) {
+ callByeHandler = True;
+ }
+
+ // We should really check for & handle >1 SSRCs being present #####
+
+ subPacketOK = True;
+ typeOfPacket = PACKET_BYE;
+ break;
+ }
+ case RTCP_PT_APP: {
+ u_int8_t& subtype = rc; // In "APP" packets, the "rc" field gets used as "subtype"
+#ifdef DEBUG
+ fprintf(stderr, "APP (subtype 0x%02x)\n", subtype);
+#endif
+ if (length < 4) {
+#ifdef DEBUG
+ fprintf(stderr, "\tError: No \"name\" field!\n");
+#endif
+ break;
+ }
+ length -= 4;
+#ifdef DEBUG
+ fprintf(stderr, "\tname:%c%c%c%c\n", pkt[0], pkt[1], pkt[2], pkt[3]);
+#endif
+ u_int32_t nameBytes = (pkt[0]<<24)|(pkt[1]<<16)|(pkt[2]<<8)|(pkt[3]);
+ ADVANCE(4); // skip over "name", to the 'application-dependent data'
+#ifdef DEBUG
+ fprintf(stderr, "\tapplication-dependent data size: %d bytes\n", length);
+#endif
+
+ // If an 'APP' packet handler was set, call it now:
+ if (fAppHandlerTask != NULL) {
+ (*fAppHandlerTask)(fAppHandlerClientData, subtype, nameBytes, pkt, length);
+ }
+ subPacketOK = True;
+ typeOfPacket = PACKET_RTCP_APP;
+ break;
+ }
+ // Other RTCP packet types that we don't yet handle:
+ case RTCP_PT_SDES: {
+#ifdef DEBUG
+ // 'Handle' SDES packets only in debugging code, by printing out the 'SDES items':
+ fprintf(stderr, "SDES\n");
+
+ // Process each 'chunk':
+ Boolean chunkOK = False;
+ ADVANCE(-4); length += 4; // hack so that we see the first SSRC/CSRC again
+ while (length >= 8) { // A valid chunk must be at least 8 bytes long
+ chunkOK = False; // until we learn otherwise
+
+ u_int32_t SSRC_CSRC = ntohl(*(u_int32_t*)pkt); ADVANCE(4); length -= 4;
+ fprintf(stderr, "\tSSRC/CSRC: 0x%08x\n", SSRC_CSRC);
+
+ // Process each 'SDES item' in the chunk:
+ u_int8_t itemType = *pkt; ADVANCE(1); --length;
+ while (itemType != 0) {
+ unsigned itemLen = *pkt; ADVANCE(1); --length;
+ // Make sure "itemLen" allows for at least 1 zero byte at the end of the chunk:
+ if (itemLen + 1 > length || pkt[itemLen] != 0) break;
+
+ fprintf(stderr, "\t\t%s:%s\n",
+ itemType == 1 ? "CNAME" :
+ itemType == 2 ? "NAME" :
+ itemType == 3 ? "EMAIL" :
+ itemType == 4 ? "PHONE" :
+ itemType == 5 ? "LOC" :
+ itemType == 6 ? "TOOL" :
+ itemType == 7 ? "NOTE" :
+ itemType == 8 ? "PRIV" :
+ "(unknown)",
+ itemType < 8 ? (char*)pkt // hack, because we know it's '\0'-terminated
+ : "???"/* don't try to print out PRIV or unknown items */);
+ ADVANCE(itemLen); length -= itemLen;
+
+ itemType = *pkt; ADVANCE(1); --length;
+ }
+ if (itemType != 0) break; // bad 'SDES item'
+
+ // Thus, itemType == 0. This zero 'type' marks the end of the list of SDES items.
+ // Skip over remaining zero padding bytes, so that this chunk ends on a 4-byte boundary:
+ while (length%4 > 0 && *pkt == 0) { ADVANCE(1); --length; }
+ if (length%4 > 0) break; // Bad (non-zero) padding byte
+
+ chunkOK = True;
+ }
+ if (!chunkOK || length > 0) break; // bad chunk, or not enough bytes for the last chunk
+#endif
+ subPacketOK = True;
+ break;
+ }
+ case RTCP_PT_RTPFB: {
+#ifdef DEBUG
+ fprintf(stderr, "RTPFB(unhandled)\n");
+#endif
+ subPacketOK = True;
+ break;
+ }
+ case RTCP_PT_PSFB: {
+#ifdef DEBUG
+ fprintf(stderr, "PSFB(unhandled)\n");
+ // Temporary code to show "Receiver Estimated Maximum Bitrate" (REMB) feedback reports:
+ //#####
+ if (length >= 12 && pkt[4] == 'R' && pkt[5] == 'E' && pkt[6] == 'M' && pkt[7] == 'B') {
+ u_int8_t exp = pkt[9]>>2;
+ u_int32_t mantissa = ((pkt[9]&0x03)<<16)|(pkt[10]<<8)|pkt[11];
+ double remb = (double)mantissa;
+ while (exp > 0) {
+ remb *= 2.0;
+ exp /= 2;
+ }
+ fprintf(stderr, "\tReceiver Estimated Max Bitrate (REMB): %g bps\n", remb);
+ }
+#endif
+ subPacketOK = True;
+ break;
+ }
+ case RTCP_PT_XR: {
+#ifdef DEBUG
+ fprintf(stderr, "XR(unhandled)\n");
+#endif
+ subPacketOK = True;
+ break;
+ }
+ case RTCP_PT_AVB: {
+#ifdef DEBUG
+ fprintf(stderr, "AVB(unhandled)\n");
+#endif
+ subPacketOK = True;
+ break;
+ }
+ case RTCP_PT_RSI: {
+#ifdef DEBUG
+ fprintf(stderr, "RSI(unhandled)\n");
+#endif
+ subPacketOK = True;
+ break;
+ }
+ case RTCP_PT_TOKEN: {
+#ifdef DEBUG
+ fprintf(stderr, "TOKEN(unhandled)\n");
+#endif
+ subPacketOK = True;
+ break;
+ }
+ case RTCP_PT_IDMS: {
+#ifdef DEBUG
+ fprintf(stderr, "IDMS(unhandled)\n");
+#endif
+ subPacketOK = True;
+ break;
+ }
+ default: {
+#ifdef DEBUG
+ fprintf(stderr, "UNKNOWN TYPE(0x%x)\n", pt);
+#endif
+ subPacketOK = True;
+ break;
+ }
+ }
+ if (!subPacketOK) break;
+
+ // need to check for (& handle) SSRC collision! #####
+
+#ifdef DEBUG
+ fprintf(stderr, "validated RTCP subpacket: rc:%d, pt:%d, bytes remaining:%d, report sender SSRC:0x%08x\n", rc, pt, length, reportSenderSSRC);
+#endif
+
+ // Skip over any remaining bytes in this subpacket:
+ ADVANCE(length);
+
+ // Check whether another RTCP 'subpacket' follows:
+ if (packetSize == 0) {
+ packetOK = True;
+ break;
+ } else if (packetSize < 4) {
+#ifdef DEBUG
+ fprintf(stderr, "extraneous %d bytes at end of RTCP packet!\n", packetSize);
+#endif
+ break;
+ }
+ rtcpHdr = ntohl(*(u_int32_t*)pkt);
+ if ((rtcpHdr & 0xC0000000) != 0x80000000) {
+#ifdef DEBUG
+ fprintf(stderr, "bad RTCP subpacket: header 0x%08x\n", rtcpHdr);
+#endif
+ break;
+ }
+ }
+
+ if (!packetOK) {
+#ifdef DEBUG
+ fprintf(stderr, "rejected bad RTCP subpacket: header 0x%08x\n", rtcpHdr);
+#endif
+ break;
+ } else {
+#ifdef DEBUG
+ fprintf(stderr, "validated entire RTCP packet\n");
+#endif
+ }
+
+ onReceive(typeOfPacket, totPacketSize, reportSenderSSRC);
+
+ // Finally, if we need to call a "BYE" handler, do so now (in case it causes "this" to get deleted):
+ if (callByeHandler) {
+ if (fByeHandlerTask != NULL) { // call a BYE handler without including a 'reason'
+ TaskFunc* byeHandler = fByeHandlerTask;
+ fByeHandlerTask = NULL; // because we call the handler only once, by default
+ (*byeHandler)(fByeHandlerClientData);
+ } else if (fByeWithReasonHandlerTask != NULL) { // call a BYE handler that includes a 'reason'
+ ByeWithReasonHandlerFunc* byeHandler = fByeWithReasonHandlerTask;
+ fByeWithReasonHandlerTask = NULL; // because we call the handler only once, by default
+ (*byeHandler)(fByeHandlerClientData, reason);
+ // Note that the handler function is responsible for delete[]ing "reason"
+ }
+ }
+ } while (0);
+}
+
+void RTCPInstance::onReceive(int typeOfPacket, int totPacketSize, u_int32_t ssrc) {
+ fTypeOfPacket = typeOfPacket;
+ fLastReceivedSize = totPacketSize;
+ fLastReceivedSSRC = ssrc;
+
+ int members = (int)numMembers();
+ int senders = (fSink != NULL) ? 1 : 0;
+
+ OnReceive(this, // p
+ this, // e
+ &members, // members
+ &fPrevNumMembers, // pmembers
+ &senders, // senders
+ &fAveRTCPSize, // avg_rtcp_size
+ &fPrevReportTime, // tp
+ dTimeNow(), // tc
+ fNextReportTime);
+}
+
+void RTCPInstance::sendReport() {
+#ifdef DEBUG
+ fprintf(stderr, "sending REPORT\n");
+#endif
+ // Begin by including a SR and/or RR report:
+ if (!addReport()) return;
+
+ // Then, include a SDES:
+ addSDES();
+
+ // Send the report:
+ sendBuiltPacket();
+
+ // Periodically clean out old members from our SSRC membership database:
+ const unsigned membershipReapPeriod = 5;
+ if ((++fOutgoingReportCount) % membershipReapPeriod == 0) {
+ unsigned threshold = fOutgoingReportCount - membershipReapPeriod;
+ fKnownMembers->reapOldMembers(threshold);
+ }
+}
+
+void RTCPInstance::sendBYE(char const* reason) {
+#ifdef DEBUG
+ if (reason != NULL) {
+ fprintf(stderr, "sending BYE (reason:%s)\n", reason);
+ } else {
+ fprintf(stderr, "sending BYE\n");
+ }
+#endif
+ // The packet must begin with a SR and/or RR report:
+ (void)addReport(True);
+
+ addBYE(reason);
+ sendBuiltPacket();
+}
+
+void RTCPInstance::sendBuiltPacket() {
+#ifdef DEBUG
+ fprintf(stderr, "sending RTCP packet\n");
+ unsigned char* p = fOutBuf->packet();
+ for (unsigned i = 0; i < fOutBuf->curPacketSize(); ++i) {
+ if (i%4 == 0) fprintf(stderr," ");
+ fprintf(stderr, "%02x", p[i]);
+ }
+ fprintf(stderr, "\n");
+#endif
+ unsigned reportSize = fOutBuf->curPacketSize();
+ if (fCrypto != NULL) { // Encrypt/tag the data before sending it:
+ unsigned newReportSize;
+ if (!fCrypto->processOutgoingSRTCPPacket(fOutBuf->packet(), reportSize, newReportSize)) return;
+ reportSize = newReportSize;
+ }
+ fRTCPInterface.sendPacket(fOutBuf->packet(), reportSize);
+ fOutBuf->resetOffset();
+
+ fLastSentSize = IP_UDP_HDR_SIZE + reportSize;
+ fHaveJustSentPacket = True;
+ fLastPacketSentSize = reportSize;
+}
+
+int RTCPInstance::checkNewSSRC() {
+ return fKnownMembers->noteMembership(fLastReceivedSSRC,
+ fOutgoingReportCount);
+}
+
+void RTCPInstance::removeLastReceivedSSRC() {
+ removeSSRC(fLastReceivedSSRC, False/*keep stats around*/);
+}
+
+void RTCPInstance::removeSSRC(u_int32_t ssrc, Boolean alsoRemoveStats) {
+ fKnownMembers->remove(ssrc);
+
+ if (alsoRemoveStats) {
+ // Also, remove records of this SSRC from any reception or transmission stats
+ if (fSource != NULL) fSource->receptionStatsDB().removeRecord(ssrc);
+ if (fSink != NULL) fSink->transmissionStatsDB().removeRecord(ssrc);
+ }
+}
+
+void RTCPInstance::onExpire(RTCPInstance* instance) {
+ instance->onExpire1();
+}
+
+// Member functions to build specific kinds of report:
+
+Boolean RTCPInstance::addReport(Boolean alwaysAdd) {
+ // Include a SR or a RR, depending on whether we have an associated sink or source:
+ if (fSink != NULL) {
+ if (!alwaysAdd) {
+ if (!fSink->enableRTCPReports()) return False;
+
+ // Hack: Don't send a SR during those (brief) times when the timestamp of the
+ // next outgoing RTP packet has been preset, to ensure that that timestamp gets
+ // used for that outgoing packet. (David Bertrand, 2006.07.18)
+ if (fSink->nextTimestampHasBeenPreset()) return False;
+ }
+
+ addSR();
+ }
+ if (fSource != NULL) {
+ if (!alwaysAdd) {
+ if (!fSource->enableRTCPReports()) return False;
+ }
+
+ addRR();
+ }
+
+ return True;
+}
+
+void RTCPInstance::addSR() {
+ // ASSERT: fSink != NULL
+
+ enqueueCommonReportPrefix(RTCP_PT_SR, fSink->SSRC(),
+ 5 /* extra words in a SR */);
+
+ // Now, add the 'sender info' for our sink
+
+ // Insert the NTP and RTP timestamps for the 'wallclock time':
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ fOutBuf->enqueueWord(timeNow.tv_sec + 0x83AA7E80);
+ // NTP timestamp most-significant word (1970 epoch -> 1900 epoch)
+ double fractionalPart = (timeNow.tv_usec/15625.0)*0x04000000; // 2^32/10^6
+ fOutBuf->enqueueWord((unsigned)(fractionalPart+0.5));
+ // NTP timestamp least-significant word
+ unsigned rtpTimestamp = fSink->convertToRTPTimestamp(timeNow);
+ fOutBuf->enqueueWord(rtpTimestamp); // RTP ts
+
+ // Insert the packet and byte counts:
+ fOutBuf->enqueueWord(fSink->packetCount());
+ fOutBuf->enqueueWord(fSink->octetCount());
+
+ enqueueCommonReportSuffix();
+}
+
+void RTCPInstance::addRR() {
+ // ASSERT: fSource != NULL
+
+ enqueueCommonReportPrefix(RTCP_PT_RR, fSource->SSRC());
+ enqueueCommonReportSuffix();
+}
+
+void RTCPInstance::enqueueCommonReportPrefix(unsigned char packetType,
+ u_int32_t SSRC,
+ unsigned numExtraWords) {
+ unsigned numReportingSources;
+ if (fSource == NULL) {
+ numReportingSources = 0; // we don't receive anything
+ } else {
+ RTPReceptionStatsDB& allReceptionStats
+ = fSource->receptionStatsDB();
+ numReportingSources = allReceptionStats.numActiveSourcesSinceLastReset();
+ // This must be <32, to fit in 5 bits:
+ if (numReportingSources >= 32) { numReportingSources = 32; }
+ // Later: support adding more reports to handle >32 sources (unlikely)#####
+ }
+
+ unsigned rtcpHdr = 0x80000000; // version 2, no padding
+ rtcpHdr |= (numReportingSources<<24);
+ rtcpHdr |= (packetType<<16);
+ rtcpHdr |= (1 + numExtraWords + 6*numReportingSources);
+ // each report block is 6 32-bit words long
+ fOutBuf->enqueueWord(rtcpHdr);
+
+ fOutBuf->enqueueWord(SSRC);
+}
+
+void RTCPInstance::enqueueCommonReportSuffix() {
+ // Output the report blocks for each source:
+ if (fSource != NULL) {
+ RTPReceptionStatsDB& allReceptionStats
+ = fSource->receptionStatsDB();
+
+ RTPReceptionStatsDB::Iterator iterator(allReceptionStats);
+ while (1) {
+ RTPReceptionStats* receptionStats = iterator.next();
+ if (receptionStats == NULL) break;
+ enqueueReportBlock(receptionStats);
+ }
+
+ allReceptionStats.reset(); // because we have just generated a report
+ }
+}
+
+void
+RTCPInstance::enqueueReportBlock(RTPReceptionStats* stats) {
+ fOutBuf->enqueueWord(stats->SSRC());
+
+ unsigned highestExtSeqNumReceived = stats->highestExtSeqNumReceived();
+
+ unsigned totNumExpected
+ = highestExtSeqNumReceived - stats->baseExtSeqNumReceived();
+ int totNumLost = totNumExpected - stats->totNumPacketsReceived();
+ // 'Clamp' this loss number to a 24-bit signed value:
+ if (totNumLost > 0x007FFFFF) {
+ totNumLost = 0x007FFFFF;
+ } else if (totNumLost < 0) {
+ if (totNumLost < -0x00800000) totNumLost = 0x00800000; // unlikely, but...
+ totNumLost &= 0x00FFFFFF;
+ }
+
+ unsigned numExpectedSinceLastReset
+ = highestExtSeqNumReceived - stats->lastResetExtSeqNumReceived();
+ int numLostSinceLastReset
+ = numExpectedSinceLastReset - stats->numPacketsReceivedSinceLastReset();
+ unsigned char lossFraction;
+ if (numExpectedSinceLastReset == 0 || numLostSinceLastReset < 0) {
+ lossFraction = 0;
+ } else {
+ lossFraction = (unsigned char)
+ ((numLostSinceLastReset << 8) / numExpectedSinceLastReset);
+ }
+
+ fOutBuf->enqueueWord((lossFraction<<24) | totNumLost);
+ fOutBuf->enqueueWord(highestExtSeqNumReceived);
+
+ fOutBuf->enqueueWord(stats->jitter());
+
+ unsigned NTPmsw = stats->lastReceivedSR_NTPmsw();
+ unsigned NTPlsw = stats->lastReceivedSR_NTPlsw();
+ unsigned LSR = ((NTPmsw&0xFFFF)<<16)|(NTPlsw>>16); // middle 32 bits
+ fOutBuf->enqueueWord(LSR);
+
+ // Figure out how long has elapsed since the last SR rcvd from this src:
+ struct timeval const& LSRtime = stats->lastReceivedSR_time(); // "last SR"
+ struct timeval timeNow, timeSinceLSR;
+ gettimeofday(&timeNow, NULL);
+ if (timeNow.tv_usec < LSRtime.tv_usec) {
+ timeNow.tv_usec += 1000000;
+ timeNow.tv_sec -= 1;
+ }
+ timeSinceLSR.tv_sec = timeNow.tv_sec - LSRtime.tv_sec;
+ timeSinceLSR.tv_usec = timeNow.tv_usec - LSRtime.tv_usec;
+ // The enqueued time is in units of 1/65536 seconds.
+ // (Note that 65536/1000000 == 1024/15625)
+ unsigned DLSR;
+ if (LSR == 0) {
+ DLSR = 0;
+ } else {
+ DLSR = (timeSinceLSR.tv_sec<<16)
+ | ( (((timeSinceLSR.tv_usec<<11)+15625)/31250) & 0xFFFF);
+ }
+ fOutBuf->enqueueWord(DLSR);
+}
+
+void RTCPInstance::addSDES() {
+ // For now we support only the CNAME item; later support more #####
+
+ // Begin by figuring out the size of the entire SDES report:
+ unsigned numBytes = 4;
+ // counts the SSRC, but not the header; it'll get subtracted out
+ numBytes += fCNAME.totalSize(); // includes id and length
+ numBytes += 1; // the special END item
+
+ unsigned num4ByteWords = (numBytes + 3)/4;
+
+ unsigned rtcpHdr = 0x81000000; // version 2, no padding, 1 SSRC chunk
+ rtcpHdr |= (RTCP_PT_SDES<<16);
+ rtcpHdr |= num4ByteWords;
+ fOutBuf->enqueueWord(rtcpHdr);
+
+ if (fSource != NULL) {
+ fOutBuf->enqueueWord(fSource->SSRC());
+ } else if (fSink != NULL) {
+ fOutBuf->enqueueWord(fSink->SSRC());
+ }
+
+ // Add the CNAME:
+ fOutBuf->enqueue(fCNAME.data(), fCNAME.totalSize());
+
+ // Add the 'END' item (i.e., a zero byte), plus any more needed to pad:
+ unsigned numPaddingBytesNeeded = 4 - (fOutBuf->curPacketSize() % 4);
+ unsigned char const zero = '\0';
+ while (numPaddingBytesNeeded-- > 0) fOutBuf->enqueue(&zero, 1);
+}
+
+void RTCPInstance::addBYE(char const* reason) {
+ u_int32_t rtcpHdr = 0x81000000; // version 2, no padding, 1 SSRC
+ rtcpHdr |= (RTCP_PT_BYE<<16);
+ u_int16_t num32BitWords = 2; // by default, two 32-bit words total (i.e., with 1 SSRC)
+ u_int8_t reasonLength8Bits = 0; // by default
+ if (reason != NULL) {
+ // We need to add more 32-bit words for the 'length+reason':
+ unsigned const reasonLength = strlen(reason);
+ reasonLength8Bits = reasonLength < 0xFF ? (u_int8_t)reasonLength : 0xFF;
+ unsigned numExtraWords = ((1/*reason length field*/+reasonLength8Bits)+3)/4;
+
+ num32BitWords += numExtraWords;
+ }
+ rtcpHdr |= (num32BitWords-1); // length field
+ fOutBuf->enqueueWord(rtcpHdr);
+
+ if (fSource != NULL) {
+ fOutBuf->enqueueWord(fSource->SSRC());
+ } else if (fSink != NULL) {
+ fOutBuf->enqueueWord(fSink->SSRC());
+ }
+
+ num32BitWords -= 2; // ASSERT: num32BitWords >= 0
+ if (num32BitWords > 0) {
+ // Add a length+'reason for leaving':
+ // First word:
+ u_int32_t lengthPlusFirst3ReasonBytes = reasonLength8Bits<<24;
+ unsigned index = 0;
+ if (reasonLength8Bits > index) lengthPlusFirst3ReasonBytes |= ((u_int8_t)reason[index++])<<16;
+ if (reasonLength8Bits > index) lengthPlusFirst3ReasonBytes |= ((u_int8_t)reason[index++])<<8;
+ if (reasonLength8Bits > index) lengthPlusFirst3ReasonBytes |= (u_int8_t)reason[index++];
+ fOutBuf->enqueueWord(lengthPlusFirst3ReasonBytes);
+
+ // Any subsequent words:
+ if (reasonLength8Bits > 3) {
+ // ASSERT: num32BitWords > 1
+ while (--num32BitWords > 0) {
+ u_int32_t fourMoreReasonBytes = 0;
+ if (reasonLength8Bits > index) fourMoreReasonBytes |= ((u_int8_t)reason[index++])<<24;
+ if (reasonLength8Bits > index) fourMoreReasonBytes |= ((u_int8_t)reason[index++])<<16;
+ if (reasonLength8Bits > index) fourMoreReasonBytes |= ((u_int8_t)reason[index++])<<8;
+ if (reasonLength8Bits > index) fourMoreReasonBytes |= (u_int8_t)reason[index++];
+ fOutBuf->enqueueWord(fourMoreReasonBytes);
+ }
+ }
+ }
+}
+
+void RTCPInstance::schedule(double nextTime) {
+ fNextReportTime = nextTime;
+
+ double secondsToDelay = nextTime - dTimeNow();
+ if (secondsToDelay < 0) secondsToDelay = 0;
+#ifdef DEBUG
+ fprintf(stderr, "schedule(%f->%f)\n", secondsToDelay, nextTime);
+#endif
+ int64_t usToGo = (int64_t)(secondsToDelay * 1000000);
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(usToGo,
+ (TaskFunc*)RTCPInstance::onExpire, this);
+}
+
+void RTCPInstance::reschedule(double nextTime) {
+ envir().taskScheduler().unscheduleDelayedTask(nextTask());
+ schedule(nextTime);
+}
+
+void RTCPInstance::onExpire1() {
+ nextTask() = NULL;
+
+ // Note: fTotSessionBW is kbits per second
+ double rtcpBW = 0.05*fTotSessionBW*1024/8; // -> bytes per second
+
+ OnExpire(this, // event
+ numMembers(), // members
+ (fSink != NULL) ? 1 : 0, // senders
+ rtcpBW, // rtcp_bw
+ (fSink != NULL) ? 1 : 0, // we_sent
+ &fAveRTCPSize, // ave_rtcp_size
+ &fIsInitial, // initial
+ dTimeNow(), // tc
+ &fPrevReportTime, // tp
+ &fPrevNumMembers // pmembers
+ );
+}
+
+////////// SDESItem //////////
+
+SDESItem::SDESItem(unsigned char tag, unsigned char const* value) {
+ unsigned length = strlen((char const*)value);
+ if (length > 0xFF) length = 0xFF; // maximum data length for a SDES item
+
+ fData[0] = tag;
+ fData[1] = (unsigned char)length;
+ memmove(&fData[2], value, length);
+}
+
+unsigned SDESItem::totalSize() const {
+ return 2 + (unsigned)fData[1];
+}
+
+
+////////// Implementation of routines imported by the "rtcp_from_spec" C code
+
+extern "C" void Schedule(double nextTime, event e) {
+ RTCPInstance* instance = (RTCPInstance*)e;
+ if (instance == NULL) return;
+
+ instance->schedule(nextTime);
+}
+
+extern "C" void Reschedule(double nextTime, event e) {
+ RTCPInstance* instance = (RTCPInstance*)e;
+ if (instance == NULL) return;
+
+ instance->reschedule(nextTime);
+}
+
+extern "C" void SendRTCPReport(event e) {
+ RTCPInstance* instance = (RTCPInstance*)e;
+ if (instance == NULL) return;
+
+ instance->sendReport();
+}
+
+extern "C" void SendBYEPacket(event e) {
+ RTCPInstance* instance = (RTCPInstance*)e;
+ if (instance == NULL) return;
+
+ instance->sendBYE();
+}
+
+extern "C" int TypeOfEvent(event e) {
+ RTCPInstance* instance = (RTCPInstance*)e;
+ if (instance == NULL) return EVENT_UNKNOWN;
+
+ return instance->typeOfEvent();
+}
+
+extern "C" int SentPacketSize(event e) {
+ RTCPInstance* instance = (RTCPInstance*)e;
+ if (instance == NULL) return 0;
+
+ return instance->sentPacketSize();
+}
+
+extern "C" int PacketType(packet p) {
+ RTCPInstance* instance = (RTCPInstance*)p;
+ if (instance == NULL) return PACKET_UNKNOWN_TYPE;
+
+ return instance->packetType();
+}
+
+extern "C" int ReceivedPacketSize(packet p) {
+ RTCPInstance* instance = (RTCPInstance*)p;
+ if (instance == NULL) return 0;
+
+ return instance->receivedPacketSize();
+}
+
+extern "C" int NewMember(packet p) {
+ RTCPInstance* instance = (RTCPInstance*)p;
+ if (instance == NULL) return 0;
+
+ return instance->checkNewSSRC();
+}
+
+extern "C" int NewSender(packet /*p*/) {
+ return 0; // we don't yet recognize senders other than ourselves #####
+}
+
+extern "C" void AddMember(packet /*p*/) {
+ // Do nothing; all of the real work was done when NewMember() was called
+}
+
+extern "C" void AddSender(packet /*p*/) {
+ // we don't yet recognize senders other than ourselves #####
+}
+
+extern "C" void RemoveMember(packet p) {
+ RTCPInstance* instance = (RTCPInstance*)p;
+ if (instance == NULL) return;
+
+ instance->removeLastReceivedSSRC();
+}
+
+extern "C" void RemoveSender(packet /*p*/) {
+ // we don't yet recognize senders other than ourselves #####
+}
+
+extern "C" double drand30() {
+ unsigned tmp = our_random()&0x3FFFFFFF; // a random 30-bit integer
+ return tmp/(double)(1024*1024*1024);
+}
diff --git a/liveMedia/RTPInterface.cpp b/liveMedia/RTPInterface.cpp
new file mode 100644
index 0000000..78b37d4
--- /dev/null
+++ b/liveMedia/RTPInterface.cpp
@@ -0,0 +1,632 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// An abstraction of a network interface used for RTP (or RTCP).
+// (This allows the RTP-over-TCP hack (RFC 2326, section 10.12) to
+// be implemented transparently.)
+// Implementation
+
+#include "RTPInterface.hh"
+#include <GroupsockHelper.hh>
+#include <stdio.h>
+
+////////// Helper Functions - Definition //////////
+
+// Helper routines and data structures, used to implement
+// sending/receiving RTP/RTCP over a TCP socket:
+
+class tcpStreamRecord {
+ public:
+ tcpStreamRecord(int streamSocketNum, unsigned char streamChannelId,
+ tcpStreamRecord* next);
+ virtual ~tcpStreamRecord();
+
+public:
+ tcpStreamRecord* fNext;
+ int fStreamSocketNum;
+ unsigned char fStreamChannelId;
+};
+
+// Reading RTP-over-TCP is implemented using two levels of hash tables.
+// The top-level hash table maps TCP socket numbers to a
+// "SocketDescriptor" that contains a hash table for each of the
+// sub-channels that are reading from this socket.
+
+static HashTable* socketHashTable(UsageEnvironment& env, Boolean createIfNotPresent = True) {
+ _Tables* ourTables = _Tables::getOurTables(env, createIfNotPresent);
+ if (ourTables == NULL) return NULL;
+
+ if (ourTables->socketTable == NULL) {
+ // Create a new socket number -> SocketDescriptor mapping table:
+ ourTables->socketTable = HashTable::create(ONE_WORD_HASH_KEYS);
+ }
+ return (HashTable*)(ourTables->socketTable);
+}
+
+class SocketDescriptor {
+public:
+ SocketDescriptor(UsageEnvironment& env, int socketNum);
+ virtual ~SocketDescriptor();
+
+ void registerRTPInterface(unsigned char streamChannelId,
+ RTPInterface* rtpInterface);
+ RTPInterface* lookupRTPInterface(unsigned char streamChannelId);
+ void deregisterRTPInterface(unsigned char streamChannelId);
+
+ void setServerRequestAlternativeByteHandler(ServerRequestAlternativeByteHandler* handler, void* clientData) {
+ fServerRequestAlternativeByteHandler = handler;
+ fServerRequestAlternativeByteHandlerClientData = clientData;
+ }
+
+private:
+ static void tcpReadHandler(SocketDescriptor*, int mask);
+ Boolean tcpReadHandler1(int mask);
+
+private:
+ UsageEnvironment& fEnv;
+ int fOurSocketNum;
+ HashTable* fSubChannelHashTable;
+ ServerRequestAlternativeByteHandler* fServerRequestAlternativeByteHandler;
+ void* fServerRequestAlternativeByteHandlerClientData;
+ u_int8_t fStreamChannelId, fSizeByte1;
+ Boolean fReadErrorOccurred, fDeleteMyselfNext, fAreInReadHandlerLoop;
+ enum { AWAITING_DOLLAR, AWAITING_STREAM_CHANNEL_ID, AWAITING_SIZE1, AWAITING_SIZE2, AWAITING_PACKET_DATA } fTCPReadingState;
+};
+
+static SocketDescriptor* lookupSocketDescriptor(UsageEnvironment& env, int sockNum, Boolean createIfNotFound = True) {
+ HashTable* table = socketHashTable(env, createIfNotFound);
+ if (table == NULL) return NULL;
+
+ char const* key = (char const*)(long)sockNum;
+ SocketDescriptor* socketDescriptor = (SocketDescriptor*)(table->Lookup(key));
+ if (socketDescriptor == NULL) {
+ if (createIfNotFound) {
+ socketDescriptor = new SocketDescriptor(env, sockNum);
+ table->Add((char const*)(long)(sockNum), socketDescriptor);
+ } else if (table->IsEmpty()) {
+ // We can also delete the table (to reclaim space):
+ _Tables* ourTables = _Tables::getOurTables(env);
+ delete table;
+ ourTables->socketTable = NULL;
+ ourTables->reclaimIfPossible();
+ }
+ }
+
+ return socketDescriptor;
+}
+
+static void removeSocketDescription(UsageEnvironment& env, int sockNum) {
+ char const* key = (char const*)(long)sockNum;
+ HashTable* table = socketHashTable(env);
+ table->Remove(key);
+
+ if (table->IsEmpty()) {
+ // We can also delete the table (to reclaim space):
+ _Tables* ourTables = _Tables::getOurTables(env);
+ delete table;
+ ourTables->socketTable = NULL;
+ ourTables->reclaimIfPossible();
+ }
+}
+
+
+////////// RTPInterface - Implementation //////////
+
+RTPInterface::RTPInterface(Medium* owner, Groupsock* gs)
+ : fOwner(owner), fGS(gs),
+ fTCPStreams(NULL),
+ fNextTCPReadSize(0), fNextTCPReadStreamSocketNum(-1),
+ fNextTCPReadStreamChannelId(0xFF), fReadHandlerProc(NULL),
+ fAuxReadHandlerFunc(NULL), fAuxReadHandlerClientData(NULL) {
+ // Make the socket non-blocking, even though it will be read from only asynchronously, when packets arrive.
+ // The reason for this is that, in some OSs, reads on a blocking socket can (allegedly) sometimes block,
+ // even if the socket was previously reported (e.g., by "select()") as having data available.
+ // (This can supposedly happen if the UDP checksum fails, for example.)
+ makeSocketNonBlocking(fGS->socketNum());
+ increaseSendBufferTo(envir(), fGS->socketNum(), 50*1024);
+}
+
+RTPInterface::~RTPInterface() {
+ stopNetworkReading();
+ delete fTCPStreams;
+}
+
+void RTPInterface::setStreamSocket(int sockNum,
+ unsigned char streamChannelId) {
+ fGS->removeAllDestinations();
+ envir().taskScheduler().disableBackgroundHandling(fGS->socketNum()); // turn off any reading on our datagram socket
+ fGS->reset(); // and close our datagram socket, because we won't be using it anymore
+
+ addStreamSocket(sockNum, streamChannelId);
+}
+
+void RTPInterface::addStreamSocket(int sockNum,
+ unsigned char streamChannelId) {
+ if (sockNum < 0) return;
+
+ for (tcpStreamRecord* streams = fTCPStreams; streams != NULL;
+ streams = streams->fNext) {
+ if (streams->fStreamSocketNum == sockNum
+ && streams->fStreamChannelId == streamChannelId) {
+ return; // we already have it
+ }
+ }
+
+ fTCPStreams = new tcpStreamRecord(sockNum, streamChannelId, fTCPStreams);
+
+ // Also, make sure this new socket is set up for receiving RTP/RTCP-over-TCP:
+ SocketDescriptor* socketDescriptor = lookupSocketDescriptor(envir(), sockNum);
+ socketDescriptor->registerRTPInterface(streamChannelId, this);
+}
+
+static void deregisterSocket(UsageEnvironment& env, int sockNum, unsigned char streamChannelId) {
+ SocketDescriptor* socketDescriptor = lookupSocketDescriptor(env, sockNum, False);
+ if (socketDescriptor != NULL) {
+ socketDescriptor->deregisterRTPInterface(streamChannelId);
+ // Note: This may delete "socketDescriptor",
+ // if no more interfaces are using this socket
+ }
+}
+
+void RTPInterface::removeStreamSocket(int sockNum,
+ unsigned char streamChannelId) {
+ // Remove - from our list of 'TCP streams' - the record of the (sockNum,streamChannelId) pair.
+ // (However "streamChannelId" == 0xFF is a special case, meaning remove all
+ // (sockNum,*) pairs.)
+
+ while (1) {
+ tcpStreamRecord** streamsPtr = &fTCPStreams;
+
+ while (*streamsPtr != NULL) {
+ if ((*streamsPtr)->fStreamSocketNum == sockNum
+ && (streamChannelId == 0xFF || streamChannelId == (*streamsPtr)->fStreamChannelId)) {
+ // Delete the record pointed to by *streamsPtr :
+ unsigned char streamChannelIdToRemove = (*streamsPtr)->fStreamChannelId;
+ tcpStreamRecord* next = (*streamsPtr)->fNext;
+ (*streamsPtr)->fNext = NULL;
+ delete (*streamsPtr);
+ *streamsPtr = next;
+
+ // And 'deregister' this socket,channelId pair:
+ deregisterSocket(envir(), sockNum, streamChannelIdToRemove);
+
+ if (streamChannelId != 0xFF) return; // we're done
+ break; // start again from the beginning of the list, in case the list has changed
+ } else {
+ streamsPtr = &((*streamsPtr)->fNext);
+ }
+ }
+ if (*streamsPtr == NULL) break;
+ }
+}
+
+void RTPInterface::setServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum,
+ ServerRequestAlternativeByteHandler* handler, void* clientData) {
+ SocketDescriptor* socketDescriptor = lookupSocketDescriptor(env, socketNum, False);
+
+ if (socketDescriptor != NULL) socketDescriptor->setServerRequestAlternativeByteHandler(handler, clientData);
+}
+
+void RTPInterface::clearServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum) {
+ setServerRequestAlternativeByteHandler(env, socketNum, NULL, NULL);
+}
+
+Boolean RTPInterface::sendPacket(unsigned char* packet, unsigned packetSize) {
+ Boolean success = True; // we'll return False instead if any of the sends fail
+
+ // Normal case: Send as a UDP packet:
+ if (!fGS->output(envir(), packet, packetSize)) success = False;
+
+ // Also, send over each of our TCP sockets:
+ tcpStreamRecord* nextStream;
+ for (tcpStreamRecord* stream = fTCPStreams; stream != NULL; stream = nextStream) {
+ nextStream = stream->fNext; // Set this now, in case the following deletes "stream":
+ if (!sendRTPorRTCPPacketOverTCP(packet, packetSize,
+ stream->fStreamSocketNum, stream->fStreamChannelId)) {
+ success = False;
+ }
+ }
+
+ return success;
+}
+
+void RTPInterface
+::startNetworkReading(TaskScheduler::BackgroundHandlerProc* handlerProc) {
+ // Normal case: Arrange to read UDP packets:
+ envir().taskScheduler().
+ turnOnBackgroundReadHandling(fGS->socketNum(), handlerProc, fOwner);
+
+ // Also, receive RTP over TCP, on each of our TCP connections:
+ fReadHandlerProc = handlerProc;
+ for (tcpStreamRecord* streams = fTCPStreams; streams != NULL;
+ streams = streams->fNext) {
+ // Get a socket descriptor for "streams->fStreamSocketNum":
+ SocketDescriptor* socketDescriptor = lookupSocketDescriptor(envir(), streams->fStreamSocketNum);
+
+ // Tell it about our subChannel:
+ socketDescriptor->registerRTPInterface(streams->fStreamChannelId, this);
+ }
+}
+
+Boolean RTPInterface::handleRead(unsigned char* buffer, unsigned bufferMaxSize,
+ unsigned& bytesRead, struct sockaddr_in& fromAddress,
+ int& tcpSocketNum, unsigned char& tcpStreamChannelId,
+ Boolean& packetReadWasIncomplete) {
+ packetReadWasIncomplete = False; // by default
+ Boolean readSuccess;
+ if (fNextTCPReadStreamSocketNum < 0) {
+ // Normal case: read from the (datagram) 'groupsock':
+ tcpSocketNum = -1;
+ readSuccess = fGS->handleRead(buffer, bufferMaxSize, bytesRead, fromAddress);
+ } else {
+ // Read from the TCP connection:
+ tcpSocketNum = fNextTCPReadStreamSocketNum;
+ tcpStreamChannelId = fNextTCPReadStreamChannelId;
+
+ bytesRead = 0;
+ unsigned totBytesToRead = fNextTCPReadSize;
+ if (totBytesToRead > bufferMaxSize) totBytesToRead = bufferMaxSize;
+ unsigned curBytesToRead = totBytesToRead;
+ int curBytesRead;
+ while ((curBytesRead = readSocket(envir(), fNextTCPReadStreamSocketNum,
+ &buffer[bytesRead], curBytesToRead,
+ fromAddress)) > 0) {
+ bytesRead += curBytesRead;
+ if (bytesRead >= totBytesToRead) break;
+ curBytesToRead -= curBytesRead;
+ }
+ fNextTCPReadSize -= bytesRead;
+ if (fNextTCPReadSize == 0) {
+ // We've read all of the data that we asked for
+ readSuccess = True;
+ } else if (curBytesRead < 0) {
+ // There was an error reading the socket
+ bytesRead = 0;
+ readSuccess = False;
+ } else {
+ // We need to read more bytes, and there was not an error reading the socket
+ packetReadWasIncomplete = True;
+ return True;
+ }
+ fNextTCPReadStreamSocketNum = -1; // default, for next time
+ }
+
+ if (readSuccess && fAuxReadHandlerFunc != NULL) {
+ // Also pass the newly-read packet data to our auxilliary handler:
+ (*fAuxReadHandlerFunc)(fAuxReadHandlerClientData, buffer, bytesRead);
+ }
+ return readSuccess;
+}
+
+void RTPInterface::stopNetworkReading() {
+ // Normal case
+ if (fGS != NULL) envir().taskScheduler().turnOffBackgroundReadHandling(fGS->socketNum());
+
+ // Also turn off read handling on each of our TCP connections:
+ for (tcpStreamRecord* streams = fTCPStreams; streams != NULL; streams = streams->fNext) {
+ deregisterSocket(envir(), streams->fStreamSocketNum, streams->fStreamChannelId);
+ }
+}
+
+
+////////// Helper Functions - Implementation /////////
+
+Boolean RTPInterface::sendRTPorRTCPPacketOverTCP(u_int8_t* packet, unsigned packetSize,
+ int socketNum, unsigned char streamChannelId) {
+#ifdef DEBUG_SEND
+ fprintf(stderr, "sendRTPorRTCPPacketOverTCP: %d bytes over channel %d (socket %d)\n",
+ packetSize, streamChannelId, socketNum); fflush(stderr);
+#endif
+ // Send a RTP/RTCP packet over TCP, using the encoding defined in RFC 2326, section 10.12:
+ // $<streamChannelId><packetSize><packet>
+ // (If the initial "send()" of '$<streamChannelId><packetSize>' succeeds, then we force
+ // the subsequent "send()" for the <packet> data to succeed, even if we have to do so with
+ // a blocking "send()".)
+ do {
+ u_int8_t framingHeader[4];
+ framingHeader[0] = '$';
+ framingHeader[1] = streamChannelId;
+ framingHeader[2] = (u_int8_t) ((packetSize&0xFF00)>>8);
+ framingHeader[3] = (u_int8_t) (packetSize&0xFF);
+ if (!sendDataOverTCP(socketNum, framingHeader, 4, False)) break;
+
+ if (!sendDataOverTCP(socketNum, packet, packetSize, True)) break;
+#ifdef DEBUG_SEND
+ fprintf(stderr, "sendRTPorRTCPPacketOverTCP: completed\n"); fflush(stderr);
+#endif
+
+ return True;
+ } while (0);
+
+#ifdef DEBUG_SEND
+ fprintf(stderr, "sendRTPorRTCPPacketOverTCP: failed! (errno %d)\n", envir().getErrno()); fflush(stderr);
+#endif
+ return False;
+}
+
+#ifndef RTPINTERFACE_BLOCKING_WRITE_TIMEOUT_MS
+#define RTPINTERFACE_BLOCKING_WRITE_TIMEOUT_MS 500
+#endif
+
+Boolean RTPInterface::sendDataOverTCP(int socketNum, u_int8_t const* data, unsigned dataSize, Boolean forceSendToSucceed) {
+ int sendResult = send(socketNum, (char const*)data, dataSize, 0/*flags*/);
+ if (sendResult < (int)dataSize) {
+ // The TCP send() failed - at least partially.
+
+ unsigned numBytesSentSoFar = sendResult < 0 ? 0 : (unsigned)sendResult;
+ if (numBytesSentSoFar > 0 || (forceSendToSucceed && envir().getErrno() == EAGAIN)) {
+ // The OS's TCP send buffer has filled up (because the stream's bitrate has exceeded
+ // the capacity of the TCP connection!).
+ // Force this data write to succeed, by blocking if necessary until it does:
+ unsigned numBytesRemainingToSend = dataSize - numBytesSentSoFar;
+#ifdef DEBUG_SEND
+ fprintf(stderr, "sendDataOverTCP: resending %d-byte send (blocking)\n", numBytesRemainingToSend); fflush(stderr);
+#endif
+ makeSocketBlocking(socketNum, RTPINTERFACE_BLOCKING_WRITE_TIMEOUT_MS);
+ sendResult = send(socketNum, (char const*)(&data[numBytesSentSoFar]), numBytesRemainingToSend, 0/*flags*/);
+ if ((unsigned)sendResult != numBytesRemainingToSend) {
+ // The blocking "send()" failed, or timed out. In either case, we assume that the
+ // TCP connection has failed (or is 'hanging' indefinitely), and we stop using it
+ // (for both RTP and RTP).
+ // (If we kept using the socket here, the RTP or RTCP packet write would be in an
+ // incomplete, inconsistent state.)
+#ifdef DEBUG_SEND
+ fprintf(stderr, "sendDataOverTCP: blocking send() failed (delivering %d bytes out of %d); closing socket %d\n", sendResult, numBytesRemainingToSend, socketNum); fflush(stderr);
+#endif
+ removeStreamSocket(socketNum, 0xFF);
+ return False;
+ }
+ makeSocketNonBlocking(socketNum);
+
+ return True;
+ } else if (sendResult < 0 && envir().getErrno() != EAGAIN) {
+ // Because the "send()" call failed, assume that the socket is now unusable, so stop
+ // using it (for both RTP and RTCP):
+ removeStreamSocket(socketNum, 0xFF);
+ }
+
+ return False;
+ }
+
+ return True;
+}
+
+SocketDescriptor::SocketDescriptor(UsageEnvironment& env, int socketNum)
+ :fEnv(env), fOurSocketNum(socketNum),
+ fSubChannelHashTable(HashTable::create(ONE_WORD_HASH_KEYS)),
+ fServerRequestAlternativeByteHandler(NULL), fServerRequestAlternativeByteHandlerClientData(NULL),
+ fReadErrorOccurred(False), fDeleteMyselfNext(False), fAreInReadHandlerLoop(False), fTCPReadingState(AWAITING_DOLLAR) {
+}
+
+SocketDescriptor::~SocketDescriptor() {
+ fEnv.taskScheduler().turnOffBackgroundReadHandling(fOurSocketNum);
+ removeSocketDescription(fEnv, fOurSocketNum);
+
+ if (fSubChannelHashTable != NULL) {
+ // Remove knowledge of this socket from any "RTPInterface"s that are using it:
+ HashTable::Iterator* iter = HashTable::Iterator::create(*fSubChannelHashTable);
+ RTPInterface* rtpInterface;
+ char const* key;
+
+ while ((rtpInterface = (RTPInterface*)(iter->next(key))) != NULL) {
+ u_int64_t streamChannelIdLong = (u_int64_t)key;
+ unsigned char streamChannelId = (unsigned char)streamChannelIdLong;
+
+ rtpInterface->removeStreamSocket(fOurSocketNum, streamChannelId);
+ }
+ delete iter;
+
+ // Then remove the hash table entries themselves, and then remove the hash table:
+ while (fSubChannelHashTable->RemoveNext() != NULL) {}
+ delete fSubChannelHashTable;
+ }
+
+ // Finally:
+ if (fServerRequestAlternativeByteHandler != NULL) {
+ // Hack: Pass a special character to our alternative byte handler, to tell it that either
+ // - an error occurred when reading the TCP socket, or
+ // - no error occurred, but it needs to take over control of the TCP socket once again.
+ u_int8_t specialChar = fReadErrorOccurred ? 0xFF : 0xFE;
+ (*fServerRequestAlternativeByteHandler)(fServerRequestAlternativeByteHandlerClientData, specialChar);
+ }
+}
+
+void SocketDescriptor::registerRTPInterface(unsigned char streamChannelId,
+ RTPInterface* rtpInterface) {
+ Boolean isFirstRegistration = fSubChannelHashTable->IsEmpty();
+#if defined(DEBUG_SEND)||defined(DEBUG_RECEIVE)
+ fprintf(stderr, "SocketDescriptor(socket %d)::registerRTPInterface(channel %d): isFirstRegistration %d\n", fOurSocketNum, streamChannelId, isFirstRegistration);
+#endif
+ fSubChannelHashTable->Add((char const*)(long)streamChannelId,
+ rtpInterface);
+
+ if (isFirstRegistration) {
+ // Arrange to handle reads on this TCP socket:
+ TaskScheduler::BackgroundHandlerProc* handler
+ = (TaskScheduler::BackgroundHandlerProc*)&tcpReadHandler;
+ fEnv.taskScheduler().
+ setBackgroundHandling(fOurSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION, handler, this);
+ }
+}
+
+RTPInterface* SocketDescriptor
+::lookupRTPInterface(unsigned char streamChannelId) {
+ char const* lookupArg = (char const*)(long)streamChannelId;
+ return (RTPInterface*)(fSubChannelHashTable->Lookup(lookupArg));
+}
+
+void SocketDescriptor
+::deregisterRTPInterface(unsigned char streamChannelId) {
+#if defined(DEBUG_SEND)||defined(DEBUG_RECEIVE)
+ fprintf(stderr, "SocketDescriptor(socket %d)::deregisterRTPInterface(channel %d)\n", fOurSocketNum, streamChannelId);
+#endif
+ fSubChannelHashTable->Remove((char const*)(long)streamChannelId);
+
+ if (fSubChannelHashTable->IsEmpty()) {
+ // No more interfaces are using us, so it's curtains for us now:
+ if (fAreInReadHandlerLoop) {
+ fDeleteMyselfNext = True; // we can't delete ourself yet, but we'll do so from "tcpReadHandler()" below
+ } else {
+ delete this;
+ }
+ }
+}
+
+void SocketDescriptor::tcpReadHandler(SocketDescriptor* socketDescriptor, int mask) {
+ // Call the read handler until it returns false, with a limit to avoid starving other sockets
+ unsigned count = 2000;
+ socketDescriptor->fAreInReadHandlerLoop = True;
+ while (!socketDescriptor->fDeleteMyselfNext && socketDescriptor->tcpReadHandler1(mask) && --count > 0) {}
+ socketDescriptor->fAreInReadHandlerLoop = False;
+ if (socketDescriptor->fDeleteMyselfNext) delete socketDescriptor;
+}
+
+Boolean SocketDescriptor::tcpReadHandler1(int mask) {
+ // We expect the following data over the TCP channel:
+ // optional RTSP command or response bytes (before the first '$' character)
+ // a '$' character
+ // a 1-byte channel id
+ // a 2-byte packet size (in network byte order)
+ // the packet data.
+ // However, because the socket is being read asynchronously, this data might arrive in pieces.
+
+ u_int8_t c;
+ struct sockaddr_in fromAddress;
+ if (fTCPReadingState != AWAITING_PACKET_DATA) {
+ int result = readSocket(fEnv, fOurSocketNum, &c, 1, fromAddress);
+ if (result == 0) { // There was no more data to read
+ return False;
+ } else if (result != 1) { // error reading TCP socket, so we will no longer handle it
+#ifdef DEBUG_RECEIVE
+ fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): readSocket(1 byte) returned %d (error)\n", fOurSocketNum, result);
+#endif
+ fReadErrorOccurred = True;
+ fDeleteMyselfNext = True;
+ return False;
+ }
+ }
+
+ Boolean callAgain = True;
+ switch (fTCPReadingState) {
+ case AWAITING_DOLLAR: {
+ if (c == '$') {
+#ifdef DEBUG_RECEIVE
+ fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): Saw '$'\n", fOurSocketNum);
+#endif
+ fTCPReadingState = AWAITING_STREAM_CHANNEL_ID;
+ } else {
+ // This character is part of a RTSP request or command, which is handled separately:
+ if (fServerRequestAlternativeByteHandler != NULL && c != 0xFF && c != 0xFE) {
+ // Hack: 0xFF and 0xFE are used as special signaling characters, so don't send them
+ (*fServerRequestAlternativeByteHandler)(fServerRequestAlternativeByteHandlerClientData, c);
+ }
+ }
+ break;
+ }
+ case AWAITING_STREAM_CHANNEL_ID: {
+ // The byte that we read is the stream channel id.
+ if (lookupRTPInterface(c) != NULL) { // sanity check
+ fStreamChannelId = c;
+ fTCPReadingState = AWAITING_SIZE1;
+ } else {
+ // This wasn't a stream channel id that we expected. We're (somehow) in a strange state. Try to recover:
+#ifdef DEBUG_RECEIVE
+ fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): Saw nonexistent stream channel id: 0x%02x\n", fOurSocketNum, c);
+#endif
+ fTCPReadingState = AWAITING_DOLLAR;
+ }
+ break;
+ }
+ case AWAITING_SIZE1: {
+ // The byte that we read is the first (high) byte of the 16-bit RTP or RTCP packet 'size'.
+ fSizeByte1 = c;
+ fTCPReadingState = AWAITING_SIZE2;
+ break;
+ }
+ case AWAITING_SIZE2: {
+ // The byte that we read is the second (low) byte of the 16-bit RTP or RTCP packet 'size'.
+ unsigned short size = (fSizeByte1<<8)|c;
+
+ // Record the information about the packet data that will be read next:
+ RTPInterface* rtpInterface = lookupRTPInterface(fStreamChannelId);
+ if (rtpInterface != NULL) {
+ rtpInterface->fNextTCPReadSize = size;
+ rtpInterface->fNextTCPReadStreamSocketNum = fOurSocketNum;
+ rtpInterface->fNextTCPReadStreamChannelId = fStreamChannelId;
+ }
+ fTCPReadingState = AWAITING_PACKET_DATA;
+ break;
+ }
+ case AWAITING_PACKET_DATA: {
+ callAgain = False;
+ fTCPReadingState = AWAITING_DOLLAR; // the next state, unless we end up having to read more data in the current state
+ // Call the appropriate read handler to get the packet data from the TCP stream:
+ RTPInterface* rtpInterface = lookupRTPInterface(fStreamChannelId);
+ if (rtpInterface != NULL) {
+ if (rtpInterface->fNextTCPReadSize == 0) {
+ // We've already read all the data for this packet.
+ break;
+ }
+ if (rtpInterface->fReadHandlerProc != NULL) {
+#ifdef DEBUG_RECEIVE
+ fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): reading %d bytes on channel %d\n", fOurSocketNum, rtpInterface->fNextTCPReadSize, rtpInterface->fNextTCPReadStreamChannelId);
+#endif
+ fTCPReadingState = AWAITING_PACKET_DATA;
+ rtpInterface->fReadHandlerProc(rtpInterface->fOwner, mask);
+ } else {
+#ifdef DEBUG_RECEIVE
+ fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): No handler proc for \"rtpInterface\" for channel %d; need to skip %d remaining bytes\n", fOurSocketNum, fStreamChannelId, rtpInterface->fNextTCPReadSize);
+#endif
+ int result = readSocket(fEnv, fOurSocketNum, &c, 1, fromAddress);
+ if (result < 0) { // error reading TCP socket, so we will no longer handle it
+#ifdef DEBUG_RECEIVE
+ fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): readSocket(1 byte) returned %d (error)\n", fOurSocketNum, result);
+#endif
+ fReadErrorOccurred = True;
+ fDeleteMyselfNext = True;
+ return False;
+ } else {
+ fTCPReadingState = AWAITING_PACKET_DATA;
+ if (result == 1) {
+ --rtpInterface->fNextTCPReadSize;
+ callAgain = True;
+ }
+ }
+ }
+ }
+#ifdef DEBUG_RECEIVE
+ else fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): No \"rtpInterface\" for channel %d\n", fOurSocketNum, fStreamChannelId);
+#endif
+ }
+ }
+
+ return callAgain;
+}
+
+
+////////// tcpStreamRecord implementation //////////
+
+tcpStreamRecord
+::tcpStreamRecord(int streamSocketNum, unsigned char streamChannelId,
+ tcpStreamRecord* next)
+ : fNext(next),
+ fStreamSocketNum(streamSocketNum), fStreamChannelId(streamChannelId) {
+}
+
+tcpStreamRecord::~tcpStreamRecord() {
+ delete fNext;
+}
diff --git a/liveMedia/RTPSink.cpp b/liveMedia/RTPSink.cpp
new file mode 100644
index 0000000..dd288f2
--- /dev/null
+++ b/liveMedia/RTPSink.cpp
@@ -0,0 +1,358 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP Sinks
+// Implementation
+
+#include "RTPSink.hh"
+#include "GroupsockHelper.hh"
+
+////////// RTPSink //////////
+
+Boolean RTPSink::lookupByName(UsageEnvironment& env, char const* sinkName,
+ RTPSink*& resultSink) {
+ resultSink = NULL; // unless we succeed
+
+ MediaSink* sink;
+ if (!MediaSink::lookupByName(env, sinkName, sink)) return False;
+
+ if (!sink->isRTPSink()) {
+ env.setResultMsg(sinkName, " is not a RTP sink");
+ return False;
+ }
+
+ resultSink = (RTPSink*)sink;
+ return True;
+}
+
+Boolean RTPSink::isRTPSink() const {
+ return True;
+}
+
+RTPSink::RTPSink(UsageEnvironment& env,
+ Groupsock* rtpGS, unsigned char rtpPayloadType,
+ unsigned rtpTimestampFrequency,
+ char const* rtpPayloadFormatName,
+ unsigned numChannels)
+ : MediaSink(env), fRTPInterface(this, rtpGS),
+ fRTPPayloadType(rtpPayloadType),
+ fPacketCount(0), fOctetCount(0), fTotalOctetCount(0),
+ fTimestampFrequency(rtpTimestampFrequency), fNextTimestampHasBeenPreset(False), fEnableRTCPReports(True),
+ fNumChannels(numChannels), fEstimatedBitrate(0) {
+ fRTPPayloadFormatName
+ = strDup(rtpPayloadFormatName == NULL ? "???" : rtpPayloadFormatName);
+ gettimeofday(&fCreationTime, NULL);
+ fTotalOctetCountStartTime = fCreationTime;
+ resetPresentationTimes();
+
+ fSeqNo = (u_int16_t)our_random();
+ fSSRC = our_random32();
+ fTimestampBase = our_random32();
+
+ fTransmissionStatsDB = new RTPTransmissionStatsDB(*this);
+}
+
+RTPSink::~RTPSink() {
+ delete fTransmissionStatsDB;
+ delete[] (char*)fRTPPayloadFormatName;
+ fRTPInterface.forgetOurGroupsock();
+ // so that the "fRTPInterface" destructor doesn't turn off background read handling (in case
+ // its 'groupsock' is being shared with something else that does background read handling).
+}
+
+u_int32_t RTPSink::convertToRTPTimestamp(struct timeval tv) {
+ // Begin by converting from "struct timeval" units to RTP timestamp units:
+ u_int32_t timestampIncrement = (fTimestampFrequency*tv.tv_sec);
+ timestampIncrement += (u_int32_t)(fTimestampFrequency*(tv.tv_usec/1000000.0) + 0.5); // note: rounding
+
+ // Then add this to our 'timestamp base':
+ if (fNextTimestampHasBeenPreset) {
+ // Make the returned timestamp the same as the current "fTimestampBase",
+ // so that timestamps begin with the value that was previously preset:
+ fTimestampBase -= timestampIncrement;
+ fNextTimestampHasBeenPreset = False;
+ }
+
+ u_int32_t const rtpTimestamp = fTimestampBase + timestampIncrement;
+#ifdef DEBUG_TIMESTAMPS
+ fprintf(stderr, "fTimestampBase: 0x%08x, tv: %lu.%06ld\n\t=> RTP timestamp: 0x%08x\n",
+ fTimestampBase, tv.tv_sec, tv.tv_usec, rtpTimestamp);
+ fflush(stderr);
+#endif
+
+ return rtpTimestamp;
+}
+
+u_int32_t RTPSink::presetNextTimestamp() {
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+
+ u_int32_t tsNow = convertToRTPTimestamp(timeNow);
+ if (!groupsockBeingUsed().hasMultipleDestinations()) {
+ // Don't adjust the timestamp stream if we already have another destination ongoing
+ fTimestampBase = tsNow;
+ fNextTimestampHasBeenPreset = True;
+ }
+
+ return tsNow;
+}
+
+void RTPSink::getTotalBitrate(unsigned& outNumBytes, double& outElapsedTime) {
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+
+ outNumBytes = fTotalOctetCount;
+ outElapsedTime = (double)(timeNow.tv_sec-fTotalOctetCountStartTime.tv_sec)
+ + (timeNow.tv_usec-fTotalOctetCountStartTime.tv_usec)/1000000.0;
+
+ fTotalOctetCount = 0;
+ fTotalOctetCountStartTime = timeNow;
+}
+
+void RTPSink::resetPresentationTimes() {
+ fInitialPresentationTime.tv_sec = fMostRecentPresentationTime.tv_sec = 0;
+ fInitialPresentationTime.tv_usec = fMostRecentPresentationTime.tv_usec = 0;
+}
+
+char const* RTPSink::sdpMediaType() const {
+ return "data";
+ // default SDP media (m=) type, unless redefined by subclasses
+}
+
+char* RTPSink::rtpmapLine() const {
+ if (rtpPayloadType() >= 96) { // the payload format type is dynamic
+ char* encodingParamsPart;
+ if (numChannels() != 1) {
+ encodingParamsPart = new char[1 + 20 /* max int len */];
+ sprintf(encodingParamsPart, "/%d", numChannels());
+ } else {
+ encodingParamsPart = strDup("");
+ }
+ char const* const rtpmapFmt = "a=rtpmap:%d %s/%d%s\r\n";
+ unsigned rtpmapFmtSize = strlen(rtpmapFmt)
+ + 3 /* max char len */ + strlen(rtpPayloadFormatName())
+ + 20 /* max int len */ + strlen(encodingParamsPart);
+ char* rtpmapLine = new char[rtpmapFmtSize];
+ sprintf(rtpmapLine, rtpmapFmt,
+ rtpPayloadType(), rtpPayloadFormatName(),
+ rtpTimestampFrequency(), encodingParamsPart);
+ delete[] encodingParamsPart;
+
+ return rtpmapLine;
+ } else {
+ // The payload format is staic, so there's no "a=rtpmap:" line:
+ return strDup("");
+ }
+}
+
+char const* RTPSink::auxSDPLine() {
+ return NULL; // by default
+}
+
+
+////////// RTPTransmissionStatsDB //////////
+
+RTPTransmissionStatsDB::RTPTransmissionStatsDB(RTPSink& rtpSink)
+ : fOurRTPSink(rtpSink),
+ fTable(HashTable::create(ONE_WORD_HASH_KEYS)) {
+ fNumReceivers=0;
+}
+
+RTPTransmissionStatsDB::~RTPTransmissionStatsDB() {
+ // First, remove and delete all stats records from the table:
+ RTPTransmissionStats* stats;
+ while ((stats = (RTPTransmissionStats*)fTable->RemoveNext()) != NULL) {
+ delete stats;
+ }
+
+ // Then, delete the table itself:
+ delete fTable;
+}
+
+void RTPTransmissionStatsDB
+::noteIncomingRR(u_int32_t SSRC, struct sockaddr_in const& lastFromAddress,
+ unsigned lossStats, unsigned lastPacketNumReceived,
+ unsigned jitter, unsigned lastSRTime, unsigned diffSR_RRTime) {
+ RTPTransmissionStats* stats = lookup(SSRC);
+ if (stats == NULL) {
+ // This is the first time we've heard of this SSRC.
+ // Create a new record for it:
+ stats = new RTPTransmissionStats(fOurRTPSink, SSRC);
+ if (stats == NULL) return;
+ add(SSRC, stats);
+#ifdef DEBUG_RR
+ fprintf(stderr, "Adding new entry for SSRC %x in RTPTransmissionStatsDB\n", SSRC);
+#endif
+ }
+
+ stats->noteIncomingRR(lastFromAddress,
+ lossStats, lastPacketNumReceived, jitter,
+ lastSRTime, diffSR_RRTime);
+}
+
+void RTPTransmissionStatsDB::removeRecord(u_int32_t SSRC) {
+ RTPTransmissionStats* stats = lookup(SSRC);
+ if (stats != NULL) {
+ long SSRC_long = (long)SSRC;
+ fTable->Remove((char const*)SSRC_long);
+ --fNumReceivers;
+ delete stats;
+ }
+}
+
+RTPTransmissionStatsDB::Iterator
+::Iterator(RTPTransmissionStatsDB& receptionStatsDB)
+ : fIter(HashTable::Iterator::create(*(receptionStatsDB.fTable))) {
+}
+
+RTPTransmissionStatsDB::Iterator::~Iterator() {
+ delete fIter;
+}
+
+RTPTransmissionStats*
+RTPTransmissionStatsDB::Iterator::next() {
+ char const* key; // dummy
+
+ return (RTPTransmissionStats*)(fIter->next(key));
+}
+
+RTPTransmissionStats* RTPTransmissionStatsDB::lookup(u_int32_t SSRC) const {
+ long SSRC_long = (long)SSRC;
+ return (RTPTransmissionStats*)(fTable->Lookup((char const*)SSRC_long));
+}
+
+void RTPTransmissionStatsDB::add(u_int32_t SSRC, RTPTransmissionStats* stats) {
+ long SSRC_long = (long)SSRC;
+ fTable->Add((char const*)SSRC_long, stats);
+ ++fNumReceivers;
+}
+
+
+////////// RTPTransmissionStats //////////
+
+RTPTransmissionStats::RTPTransmissionStats(RTPSink& rtpSink, u_int32_t SSRC)
+ : fOurRTPSink(rtpSink), fSSRC(SSRC), fLastPacketNumReceived(0),
+ fPacketLossRatio(0), fTotNumPacketsLost(0), fJitter(0),
+ fLastSRTime(0), fDiffSR_RRTime(0), fAtLeastTwoRRsHaveBeenReceived(False), fFirstPacket(True),
+ fTotalOctetCount_hi(0), fTotalOctetCount_lo(0),
+ fTotalPacketCount_hi(0), fTotalPacketCount_lo(0) {
+ gettimeofday(&fTimeCreated, NULL);
+
+ fLastOctetCount = rtpSink.octetCount();
+ fLastPacketCount = rtpSink.packetCount();
+}
+
+RTPTransmissionStats::~RTPTransmissionStats() {}
+
+void RTPTransmissionStats
+::noteIncomingRR(struct sockaddr_in const& lastFromAddress,
+ unsigned lossStats, unsigned lastPacketNumReceived,
+ unsigned jitter, unsigned lastSRTime,
+ unsigned diffSR_RRTime) {
+ if (fFirstPacket) {
+ fFirstPacket = False;
+ fFirstPacketNumReported = lastPacketNumReceived;
+ } else {
+ fAtLeastTwoRRsHaveBeenReceived = True;
+ fOldLastPacketNumReceived = fLastPacketNumReceived;
+ fOldTotNumPacketsLost = fTotNumPacketsLost;
+ }
+ gettimeofday(&fTimeReceived, NULL);
+
+ fLastFromAddress = lastFromAddress;
+ fPacketLossRatio = lossStats>>24;
+ fTotNumPacketsLost = lossStats&0xFFFFFF;
+ fLastPacketNumReceived = lastPacketNumReceived;
+ fJitter = jitter;
+ fLastSRTime = lastSRTime;
+ fDiffSR_RRTime = diffSR_RRTime;
+#ifdef DEBUG_RR
+ fprintf(stderr, "RTCP RR data (received at %lu.%06ld): lossStats 0x%08x, lastPacketNumReceived 0x%08x, jitter 0x%08x, lastSRTime 0x%08x, diffSR_RRTime 0x%08x\n",
+ fTimeReceived.tv_sec, fTimeReceived.tv_usec, lossStats, lastPacketNumReceived, jitter, lastSRTime, diffSR_RRTime);
+ unsigned rtd = roundTripDelay();
+ fprintf(stderr, "=> round-trip delay: 0x%04x (== %f seconds)\n", rtd, rtd/65536.0);
+#endif
+
+ // Update our counts of the total number of octets and packets sent towards
+ // this receiver:
+ u_int32_t newOctetCount = fOurRTPSink.octetCount();
+ u_int32_t octetCountDiff = newOctetCount - fLastOctetCount;
+ fLastOctetCount = newOctetCount;
+ u_int32_t prevTotalOctetCount_lo = fTotalOctetCount_lo;
+ fTotalOctetCount_lo += octetCountDiff;
+ if (fTotalOctetCount_lo < prevTotalOctetCount_lo) { // wrap around
+ ++fTotalOctetCount_hi;
+ }
+
+ u_int32_t newPacketCount = fOurRTPSink.packetCount();
+ u_int32_t packetCountDiff = newPacketCount - fLastPacketCount;
+ fLastPacketCount = newPacketCount;
+ u_int32_t prevTotalPacketCount_lo = fTotalPacketCount_lo;
+ fTotalPacketCount_lo += packetCountDiff;
+ if (fTotalPacketCount_lo < prevTotalPacketCount_lo) { // wrap around
+ ++fTotalPacketCount_hi;
+ }
+}
+
+unsigned RTPTransmissionStats::roundTripDelay() const {
+ // Compute the round-trip delay that was indicated by the most recently-received
+ // RTCP RR packet. Use the method noted in the RTP/RTCP specification (RFC 3350).
+
+ if (fLastSRTime == 0) {
+ // Either no RTCP RR packet has been received yet, or else the
+ // reporting receiver has not yet received any RTCP SR packets from us:
+ return 0;
+ }
+
+ // First, convert the time that we received the last RTCP RR packet to NTP format,
+ // in units of 1/65536 (2^-16) seconds:
+ unsigned lastReceivedTimeNTP_high
+ = fTimeReceived.tv_sec + 0x83AA7E80; // 1970 epoch -> 1900 epoch
+ double fractionalPart = (fTimeReceived.tv_usec*0x0400)/15625.0; // 2^16/10^6
+ unsigned lastReceivedTimeNTP
+ = (unsigned)((lastReceivedTimeNTP_high<<16) + fractionalPart + 0.5);
+
+ int rawResult = lastReceivedTimeNTP - fLastSRTime - fDiffSR_RRTime;
+ if (rawResult < 0) {
+ // This can happen if there's clock drift between the sender and receiver,
+ // and if the round-trip time was very small.
+ rawResult = 0;
+ }
+ return (unsigned)rawResult;
+}
+
+void RTPTransmissionStats::getTotalOctetCount(u_int32_t& hi, u_int32_t& lo) {
+ hi = fTotalOctetCount_hi;
+ lo = fTotalOctetCount_lo;
+}
+
+void RTPTransmissionStats::getTotalPacketCount(u_int32_t& hi, u_int32_t& lo) {
+ hi = fTotalPacketCount_hi;
+ lo = fTotalPacketCount_lo;
+}
+
+unsigned RTPTransmissionStats::packetsReceivedSinceLastRR() const {
+ if (!fAtLeastTwoRRsHaveBeenReceived) return 0;
+
+ return fLastPacketNumReceived-fOldLastPacketNumReceived;
+}
+
+int RTPTransmissionStats::packetsLostBetweenRR() const {
+ if (!fAtLeastTwoRRsHaveBeenReceived) return 0;
+
+ return fTotNumPacketsLost - fOldTotNumPacketsLost;
+}
diff --git a/liveMedia/RTPSource.cpp b/liveMedia/RTPSource.cpp
new file mode 100644
index 0000000..bfbf672
--- /dev/null
+++ b/liveMedia/RTPSource.cpp
@@ -0,0 +1,409 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP Sources
+// Implementation
+
+#include "RTPSource.hh"
+#include "GroupsockHelper.hh"
+
+////////// RTPSource //////////
+
+Boolean RTPSource::lookupByName(UsageEnvironment& env,
+ char const* sourceName,
+ RTPSource*& resultSource) {
+ resultSource = NULL; // unless we succeed
+
+ MediaSource* source;
+ if (!MediaSource::lookupByName(env, sourceName, source)) return False;
+
+ if (!source->isRTPSource()) {
+ env.setResultMsg(sourceName, " is not a RTP source");
+ return False;
+ }
+
+ resultSource = (RTPSource*)source;
+ return True;
+}
+
+Boolean RTPSource::hasBeenSynchronizedUsingRTCP() {
+ return fCurPacketHasBeenSynchronizedUsingRTCP;
+}
+
+Boolean RTPSource::isRTPSource() const {
+ return True;
+}
+
+RTPSource::RTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency)
+ : FramedSource(env),
+ fRTPInterface(this, RTPgs),
+ fCurPacketHasBeenSynchronizedUsingRTCP(False), fLastReceivedSSRC(0),
+ fRTCPInstanceForMultiplexedRTCPPackets(NULL), fCrypto(NULL),
+ fRTPPayloadFormat(rtpPayloadFormat), fTimestampFrequency(rtpTimestampFrequency),
+ fSSRC(our_random32()), fEnableRTCPReports(True) {
+ fReceptionStatsDB = new RTPReceptionStatsDB();
+}
+
+RTPSource::~RTPSource() {
+ delete fReceptionStatsDB;
+}
+
+void RTPSource::getAttributes() const {
+ envir().setResultMsg(""); // Fix later to get attributes from header #####
+}
+
+
+////////// RTPReceptionStatsDB //////////
+
+RTPReceptionStatsDB::RTPReceptionStatsDB()
+ : fTable(HashTable::create(ONE_WORD_HASH_KEYS)), fTotNumPacketsReceived(0) {
+ reset();
+}
+
+void RTPReceptionStatsDB::reset() {
+ fNumActiveSourcesSinceLastReset = 0;
+
+ Iterator iter(*this);
+ RTPReceptionStats* stats;
+ while ((stats = iter.next()) != NULL) {
+ stats->reset();
+ }
+}
+
+RTPReceptionStatsDB::~RTPReceptionStatsDB() {
+ // First, remove and delete all stats records from the table:
+ RTPReceptionStats* stats;
+ while ((stats = (RTPReceptionStats*)fTable->RemoveNext()) != NULL) {
+ delete stats;
+ }
+
+ // Then, delete the table itself:
+ delete fTable;
+}
+
+void RTPReceptionStatsDB
+::noteIncomingPacket(u_int32_t SSRC, u_int16_t seqNum,
+ u_int32_t rtpTimestamp, unsigned timestampFrequency,
+ Boolean useForJitterCalculation,
+ struct timeval& resultPresentationTime,
+ Boolean& resultHasBeenSyncedUsingRTCP,
+ unsigned packetSize) {
+ ++fTotNumPacketsReceived;
+ RTPReceptionStats* stats = lookup(SSRC);
+ if (stats == NULL) {
+ // This is the first time we've heard from this SSRC.
+ // Create a new record for it:
+ stats = new RTPReceptionStats(SSRC, seqNum);
+ if (stats == NULL) return;
+ add(SSRC, stats);
+ }
+
+ if (stats->numPacketsReceivedSinceLastReset() == 0) {
+ ++fNumActiveSourcesSinceLastReset;
+ }
+
+ stats->noteIncomingPacket(seqNum, rtpTimestamp, timestampFrequency,
+ useForJitterCalculation,
+ resultPresentationTime,
+ resultHasBeenSyncedUsingRTCP, packetSize);
+}
+
+void RTPReceptionStatsDB
+::noteIncomingSR(u_int32_t SSRC,
+ u_int32_t ntpTimestampMSW, u_int32_t ntpTimestampLSW,
+ u_int32_t rtpTimestamp) {
+ RTPReceptionStats* stats = lookup(SSRC);
+ if (stats == NULL) {
+ // This is the first time we've heard of this SSRC.
+ // Create a new record for it:
+ stats = new RTPReceptionStats(SSRC);
+ if (stats == NULL) return;
+ add(SSRC, stats);
+ }
+
+ stats->noteIncomingSR(ntpTimestampMSW, ntpTimestampLSW, rtpTimestamp);
+}
+
+void RTPReceptionStatsDB::removeRecord(u_int32_t SSRC) {
+ RTPReceptionStats* stats = lookup(SSRC);
+ if (stats != NULL) {
+ long SSRC_long = (long)SSRC;
+ fTable->Remove((char const*)SSRC_long);
+ delete stats;
+ }
+}
+
+RTPReceptionStatsDB::Iterator
+::Iterator(RTPReceptionStatsDB& receptionStatsDB)
+ : fIter(HashTable::Iterator::create(*(receptionStatsDB.fTable))) {
+}
+
+RTPReceptionStatsDB::Iterator::~Iterator() {
+ delete fIter;
+}
+
+RTPReceptionStats*
+RTPReceptionStatsDB::Iterator::next(Boolean includeInactiveSources) {
+ char const* key; // dummy
+
+ // If asked, skip over any sources that haven't been active
+ // since the last reset:
+ RTPReceptionStats* stats;
+ do {
+ stats = (RTPReceptionStats*)(fIter->next(key));
+ } while (stats != NULL && !includeInactiveSources
+ && stats->numPacketsReceivedSinceLastReset() == 0);
+
+ return stats;
+}
+
+RTPReceptionStats* RTPReceptionStatsDB::lookup(u_int32_t SSRC) const {
+ long SSRC_long = (long)SSRC;
+ return (RTPReceptionStats*)(fTable->Lookup((char const*)SSRC_long));
+}
+
+void RTPReceptionStatsDB::add(u_int32_t SSRC, RTPReceptionStats* stats) {
+ long SSRC_long = (long)SSRC;
+ fTable->Add((char const*)SSRC_long, stats);
+}
+
+////////// RTPReceptionStats //////////
+
+RTPReceptionStats::RTPReceptionStats(u_int32_t SSRC, u_int16_t initialSeqNum) {
+ initSeqNum(initialSeqNum);
+ init(SSRC);
+}
+
+RTPReceptionStats::RTPReceptionStats(u_int32_t SSRC) {
+ init(SSRC);
+}
+
+RTPReceptionStats::~RTPReceptionStats() {
+}
+
+void RTPReceptionStats::init(u_int32_t SSRC) {
+ fSSRC = SSRC;
+ fTotNumPacketsReceived = 0;
+ fTotBytesReceived_hi = fTotBytesReceived_lo = 0;
+ fBaseExtSeqNumReceived = 0;
+ fHighestExtSeqNumReceived = 0;
+ fHaveSeenInitialSequenceNumber = False;
+ fLastTransit = ~0;
+ fPreviousPacketRTPTimestamp = 0;
+ fJitter = 0.0;
+ fLastReceivedSR_NTPmsw = fLastReceivedSR_NTPlsw = 0;
+ fLastReceivedSR_time.tv_sec = fLastReceivedSR_time.tv_usec = 0;
+ fLastPacketReceptionTime.tv_sec = fLastPacketReceptionTime.tv_usec = 0;
+ fMinInterPacketGapUS = 0x7FFFFFFF;
+ fMaxInterPacketGapUS = 0;
+ fTotalInterPacketGaps.tv_sec = fTotalInterPacketGaps.tv_usec = 0;
+ fHasBeenSynchronized = False;
+ fSyncTime.tv_sec = fSyncTime.tv_usec = 0;
+ reset();
+}
+
+void RTPReceptionStats::initSeqNum(u_int16_t initialSeqNum) {
+ fBaseExtSeqNumReceived = 0x10000 | initialSeqNum;
+ fHighestExtSeqNumReceived = 0x10000 | initialSeqNum;
+ fHaveSeenInitialSequenceNumber = True;
+}
+
+#ifndef MILLION
+#define MILLION 1000000
+#endif
+
+void RTPReceptionStats
+::noteIncomingPacket(u_int16_t seqNum, u_int32_t rtpTimestamp,
+ unsigned timestampFrequency,
+ Boolean useForJitterCalculation,
+ struct timeval& resultPresentationTime,
+ Boolean& resultHasBeenSyncedUsingRTCP,
+ unsigned packetSize) {
+ if (!fHaveSeenInitialSequenceNumber) initSeqNum(seqNum);
+
+ ++fNumPacketsReceivedSinceLastReset;
+ ++fTotNumPacketsReceived;
+ u_int32_t prevTotBytesReceived_lo = fTotBytesReceived_lo;
+ fTotBytesReceived_lo += packetSize;
+ if (fTotBytesReceived_lo < prevTotBytesReceived_lo) { // wrap-around
+ ++fTotBytesReceived_hi;
+ }
+
+ // Check whether the new sequence number is the highest yet seen:
+ unsigned oldSeqNum = (fHighestExtSeqNumReceived&0xFFFF);
+ unsigned seqNumCycle = (fHighestExtSeqNumReceived&0xFFFF0000);
+ unsigned seqNumDifference = (unsigned)((int)seqNum-(int)oldSeqNum);
+ unsigned newSeqNum = 0;
+ if (seqNumLT((u_int16_t)oldSeqNum, seqNum)) {
+ // This packet was not an old packet received out of order, so check it:
+
+ if (seqNumDifference >= 0x8000) {
+ // The sequence number wrapped around, so start a new cycle:
+ seqNumCycle += 0x10000;
+ }
+
+ newSeqNum = seqNumCycle|seqNum;
+ if (newSeqNum > fHighestExtSeqNumReceived) {
+ fHighestExtSeqNumReceived = newSeqNum;
+ }
+ } else if (fTotNumPacketsReceived > 1) {
+ // This packet was an old packet received out of order
+
+ if ((int)seqNumDifference >= 0x8000) {
+ // The sequence number wrapped around, so switch to an old cycle:
+ seqNumCycle -= 0x10000;
+ }
+
+ newSeqNum = seqNumCycle|seqNum;
+ if (newSeqNum < fBaseExtSeqNumReceived) {
+ fBaseExtSeqNumReceived = newSeqNum;
+ }
+ }
+
+ // Record the inter-packet delay
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ if (fLastPacketReceptionTime.tv_sec != 0
+ || fLastPacketReceptionTime.tv_usec != 0) {
+ unsigned gap
+ = (timeNow.tv_sec - fLastPacketReceptionTime.tv_sec)*MILLION
+ + timeNow.tv_usec - fLastPacketReceptionTime.tv_usec;
+ if (gap > fMaxInterPacketGapUS) {
+ fMaxInterPacketGapUS = gap;
+ }
+ if (gap < fMinInterPacketGapUS) {
+ fMinInterPacketGapUS = gap;
+ }
+ fTotalInterPacketGaps.tv_usec += gap;
+ if (fTotalInterPacketGaps.tv_usec >= MILLION) {
+ ++fTotalInterPacketGaps.tv_sec;
+ fTotalInterPacketGaps.tv_usec -= MILLION;
+ }
+ }
+ fLastPacketReceptionTime = timeNow;
+
+ // Compute the current 'jitter' using the received packet's RTP timestamp,
+ // and the RTP timestamp that would correspond to the current time.
+ // (Use the code from appendix A.8 in the RTP spec.)
+ // Note, however, that we don't use this packet if its timestamp is
+ // the same as that of the previous packet (this indicates a multi-packet
+ // fragment), or if we've been explicitly told not to use this packet.
+ if (useForJitterCalculation
+ && rtpTimestamp != fPreviousPacketRTPTimestamp) {
+ unsigned arrival = (timestampFrequency*timeNow.tv_sec);
+ arrival += (unsigned)
+ ((2.0*timestampFrequency*timeNow.tv_usec + 1000000.0)/2000000);
+ // note: rounding
+ int transit = arrival - rtpTimestamp;
+ if (fLastTransit == (~0)) fLastTransit = transit; // hack for first time
+ int d = transit - fLastTransit;
+ fLastTransit = transit;
+ if (d < 0) d = -d;
+ fJitter += (1.0/16.0) * ((double)d - fJitter);
+ }
+
+ // Return the 'presentation time' that corresponds to "rtpTimestamp":
+ if (fSyncTime.tv_sec == 0 && fSyncTime.tv_usec == 0) {
+ // This is the first timestamp that we've seen, so use the current
+ // 'wall clock' time as the synchronization time. (This will be
+ // corrected later when we receive RTCP SRs.)
+ fSyncTimestamp = rtpTimestamp;
+ fSyncTime = timeNow;
+ }
+
+ int timestampDiff = rtpTimestamp - fSyncTimestamp;
+ // Note: This works even if the timestamp wraps around
+ // (as long as "int" is 32 bits)
+
+ // Divide this by the timestamp frequency to get real time:
+ double timeDiff = timestampDiff/(double)timestampFrequency;
+
+ // Add this to the 'sync time' to get our result:
+ unsigned const million = 1000000;
+ unsigned seconds, uSeconds;
+ if (timeDiff >= 0.0) {
+ seconds = fSyncTime.tv_sec + (unsigned)(timeDiff);
+ uSeconds = fSyncTime.tv_usec
+ + (unsigned)((timeDiff - (unsigned)timeDiff)*million);
+ if (uSeconds >= million) {
+ uSeconds -= million;
+ ++seconds;
+ }
+ } else {
+ timeDiff = -timeDiff;
+ seconds = fSyncTime.tv_sec - (unsigned)(timeDiff);
+ uSeconds = fSyncTime.tv_usec
+ - (unsigned)((timeDiff - (unsigned)timeDiff)*million);
+ if ((int)uSeconds < 0) {
+ uSeconds += million;
+ --seconds;
+ }
+ }
+ resultPresentationTime.tv_sec = seconds;
+ resultPresentationTime.tv_usec = uSeconds;
+ resultHasBeenSyncedUsingRTCP = fHasBeenSynchronized;
+
+ // Save these as the new synchronization timestamp & time:
+ fSyncTimestamp = rtpTimestamp;
+ fSyncTime = resultPresentationTime;
+
+ fPreviousPacketRTPTimestamp = rtpTimestamp;
+}
+
+void RTPReceptionStats::noteIncomingSR(u_int32_t ntpTimestampMSW,
+ u_int32_t ntpTimestampLSW,
+ u_int32_t rtpTimestamp) {
+ fLastReceivedSR_NTPmsw = ntpTimestampMSW;
+ fLastReceivedSR_NTPlsw = ntpTimestampLSW;
+
+ gettimeofday(&fLastReceivedSR_time, NULL);
+
+ // Use this SR to update time synchronization information:
+ fSyncTimestamp = rtpTimestamp;
+ fSyncTime.tv_sec = ntpTimestampMSW - 0x83AA7E80; // 1/1/1900 -> 1/1/1970
+ double microseconds = (ntpTimestampLSW*15625.0)/0x04000000; // 10^6/2^32
+ fSyncTime.tv_usec = (unsigned)(microseconds+0.5);
+ fHasBeenSynchronized = True;
+}
+
+double RTPReceptionStats::totNumKBytesReceived() const {
+ double const hiMultiplier = 0x20000000/125.0; // == (2^32)/(10^3)
+ return fTotBytesReceived_hi*hiMultiplier + fTotBytesReceived_lo/1000.0;
+}
+
+unsigned RTPReceptionStats::jitter() const {
+ return (unsigned)fJitter;
+}
+
+void RTPReceptionStats::reset() {
+ fNumPacketsReceivedSinceLastReset = 0;
+ fLastResetExtSeqNumReceived = fHighestExtSeqNumReceived;
+}
+
+Boolean seqNumLT(u_int16_t s1, u_int16_t s2) {
+ // a 'less-than' on 16-bit sequence numbers
+ int diff = s2-s1;
+ if (diff > 0) {
+ return (diff < 0x8000);
+ } else if (diff < 0) {
+ return (diff < -0x8000);
+ } else { // diff == 0
+ return False;
+ }
+}
diff --git a/liveMedia/RTSPClient.cpp b/liveMedia/RTSPClient.cpp
new file mode 100644
index 0000000..f47acb5
--- /dev/null
+++ b/liveMedia/RTSPClient.cpp
@@ -0,0 +1,2125 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic RTSP client
+// Implementation
+
+#include "RTSPClient.hh"
+#include "RTSPCommon.hh"
+#include "Base64.hh"
+#include "Locale.hh"
+#include <GroupsockHelper.hh>
+#include "ourMD5.hh"
+
+RTSPClient* RTSPClient::createNew(UsageEnvironment& env, char const* rtspURL,
+ int verbosityLevel,
+ char const* applicationName,
+ portNumBits tunnelOverHTTPPortNum,
+ int socketNumToServer) {
+ return new RTSPClient(env, rtspURL,
+ verbosityLevel, applicationName, tunnelOverHTTPPortNum, socketNumToServer);
+}
+
+unsigned RTSPClient::sendDescribeCommand(responseHandler* responseHandler, Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ return sendRequest(new RequestRecord(++fCSeq, "DESCRIBE", responseHandler));
+}
+
+unsigned RTSPClient::sendOptionsCommand(responseHandler* responseHandler, Authenticator* authenticator) {
+ if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
+ return sendRequest(new RequestRecord(++fCSeq, "OPTIONS", responseHandler));
+}
+
+unsigned RTSPClient::sendAnnounceCommand(char const* sdpDescription, responseHandler* responseHandler, Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ return sendRequest(new RequestRecord(++fCSeq, "ANNOUNCE", responseHandler, NULL, NULL, False, 0.0, 0.0, 0.0, sdpDescription));
+}
+
+unsigned RTSPClient::sendSetupCommand(MediaSubsession& subsession, responseHandler* responseHandler,
+ Boolean streamOutgoing, Boolean streamUsingTCP, Boolean forceMulticastOnUnspecified,
+ Authenticator* authenticator) {
+ if (fTunnelOverHTTPPortNum != 0) streamUsingTCP = True; // RTSP-over-HTTP tunneling uses TCP (by definition)
+ // However, if we're using a TLS connection, streaming over TCP doesn't work, so disable it:
+ if (fTLS.isNeeded) streamUsingTCP = False;
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+
+ u_int32_t booleanFlags = 0;
+ if (streamUsingTCP) booleanFlags |= 0x1;
+ if (streamOutgoing) booleanFlags |= 0x2;
+ if (forceMulticastOnUnspecified) booleanFlags |= 0x4;
+ return sendRequest(new RequestRecord(++fCSeq, "SETUP", responseHandler, NULL, &subsession, booleanFlags));
+}
+
+unsigned RTSPClient::sendPlayCommand(MediaSession& session, responseHandler* responseHandler,
+ double start, double end, float scale,
+ Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ sendDummyUDPPackets(session); // hack to improve NAT traversal
+ return sendRequest(new RequestRecord(++fCSeq, "PLAY", responseHandler, &session, NULL, 0, start, end, scale));
+}
+
+unsigned RTSPClient::sendPlayCommand(MediaSubsession& subsession, responseHandler* responseHandler,
+ double start, double end, float scale,
+ Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ sendDummyUDPPackets(subsession); // hack to improve NAT traversal
+ return sendRequest(new RequestRecord(++fCSeq, "PLAY", responseHandler, NULL, &subsession, 0, start, end, scale));
+}
+
+unsigned RTSPClient::sendPlayCommand(MediaSession& session, responseHandler* responseHandler,
+ char const* absStartTime, char const* absEndTime, float scale,
+ Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ sendDummyUDPPackets(session); // hack to improve NAT traversal
+ return sendRequest(new RequestRecord(++fCSeq, responseHandler, absStartTime, absEndTime, scale, &session, NULL));
+}
+
+unsigned RTSPClient::sendPlayCommand(MediaSubsession& subsession, responseHandler* responseHandler,
+ char const* absStartTime, char const* absEndTime, float scale,
+ Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ sendDummyUDPPackets(subsession); // hack to improve NAT traversal
+ return sendRequest(new RequestRecord(++fCSeq, responseHandler, absStartTime, absEndTime, scale, NULL, &subsession));
+}
+
+unsigned RTSPClient::sendPauseCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ return sendRequest(new RequestRecord(++fCSeq, "PAUSE", responseHandler, &session));
+}
+
+unsigned RTSPClient::sendPauseCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ return sendRequest(new RequestRecord(++fCSeq, "PAUSE", responseHandler, NULL, &subsession));
+}
+
+unsigned RTSPClient::sendRecordCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ return sendRequest(new RequestRecord(++fCSeq, "RECORD", responseHandler, &session));
+}
+
+unsigned RTSPClient::sendRecordCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ return sendRequest(new RequestRecord(++fCSeq, "RECORD", responseHandler, NULL, &subsession));
+}
+
+unsigned RTSPClient::sendTeardownCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ return sendRequest(new RequestRecord(++fCSeq, "TEARDOWN", responseHandler, &session));
+}
+
+unsigned RTSPClient::sendTeardownCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ return sendRequest(new RequestRecord(++fCSeq, "TEARDOWN", responseHandler, NULL, &subsession));
+}
+
+unsigned RTSPClient::sendSetParameterCommand(MediaSession& session, responseHandler* responseHandler,
+ char const* parameterName, char const* parameterValue,
+ Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+ char* paramString = new char[strlen(parameterName) + strlen(parameterValue) + 10];
+ sprintf(paramString, "%s: %s\r\n", parameterName, parameterValue);
+ unsigned result = sendRequest(new RequestRecord(++fCSeq, "SET_PARAMETER", responseHandler, &session, NULL, False, 0.0, 0.0, 0.0, paramString));
+ delete[] paramString;
+ return result;
+}
+
+unsigned RTSPClient::sendGetParameterCommand(MediaSession& session, responseHandler* responseHandler, char const* parameterName,
+ Authenticator* authenticator) {
+ if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator;
+
+ // We assume that:
+ // parameterName is NULL or "" means: Send no body in the request.
+ // parameterName is non-empty means: Send "<parameterName>\r\n" as the request body.
+ unsigned parameterNameLen = parameterName == NULL ? 0 : strlen(parameterName);
+ char* paramString = new char[parameterNameLen + 3]; // the 3 is for \r\n + the '\0' byte
+ if (parameterName == NULL || parameterName[0] == '\0') {
+ paramString[0] = '\0';
+ } else {
+ sprintf(paramString, "%s\r\n", parameterName);
+ }
+ unsigned result = sendRequest(new RequestRecord(++fCSeq, "GET_PARAMETER", responseHandler, &session, NULL, False, 0.0, 0.0, 0.0, paramString));
+ delete[] paramString;
+ return result;
+}
+
+void RTSPClient::sendDummyUDPPackets(MediaSession& session, unsigned numDummyPackets) {
+ MediaSubsessionIterator iter(session);
+ MediaSubsession* subsession;
+
+ while ((subsession = iter.next()) != NULL) {
+ sendDummyUDPPackets(*subsession, numDummyPackets);
+ }
+}
+
+void RTSPClient::sendDummyUDPPackets(MediaSubsession& subsession, unsigned numDummyPackets) {
+ // Hack: To increase the likelihood of UDP packets from the server reaching us,
+ // if we're behind a NAT, send a few 'dummy' UDP packets to the server now.
+ // (We do this on both our RTP port and our RTCP port.)
+ Groupsock* gs1 = NULL; Groupsock* gs2 = NULL;
+ if (subsession.rtpSource() != NULL) gs1 = subsession.rtpSource()->RTPgs();
+ if (subsession.rtcpInstance() != NULL) gs2 = subsession.rtcpInstance()->RTCPgs();
+ u_int32_t const dummy = 0xFEEDFACE;
+ for (unsigned i = 0; i < numDummyPackets; ++i) {
+ if (gs1 != NULL) gs1->output(envir(), (unsigned char*)&dummy, sizeof dummy);
+ if (gs2 != NULL) gs2->output(envir(), (unsigned char*)&dummy, sizeof dummy);
+ }
+}
+
+void RTSPClient::setSpeed(MediaSession& session, float speed) {
+ // Optionally set download speed for session to be used later on PLAY command:
+ // The user should call this function after the MediaSession is instantiated, but before the
+ // first "sendPlayCommand()" is called.
+ session.speed() = speed;
+ MediaSubsessionIterator iter(session);
+ MediaSubsession* subsession;
+
+ while ((subsession = iter.next()) != NULL) {
+ subsession->speed() = speed;
+ }
+}
+
+Boolean RTSPClient::changeResponseHandler(unsigned cseq, responseHandler* newResponseHandler) {
+ // Look for the matching request record in each of our 'pending requests' queues:
+ RequestRecord* request;
+ if ((request = fRequestsAwaitingConnection.findByCSeq(cseq)) != NULL
+ || (request = fRequestsAwaitingHTTPTunneling.findByCSeq(cseq)) != NULL
+ || (request = fRequestsAwaitingResponse.findByCSeq(cseq)) != NULL) {
+ request->handler() = newResponseHandler;
+ return True;
+ }
+
+ return False;
+}
+
+Boolean RTSPClient::lookupByName(UsageEnvironment& env,
+ char const* instanceName,
+ RTSPClient*& resultClient) {
+ resultClient = NULL; // unless we succeed
+
+ Medium* medium;
+ if (!Medium::lookupByName(env, instanceName, medium)) return False;
+
+ if (!medium->isRTSPClient()) {
+ env.setResultMsg(instanceName, " is not a RTSP client");
+ return False;
+ }
+
+ resultClient = (RTSPClient*)medium;
+ return True;
+}
+
+static void copyUsernameOrPasswordStringFromURL(char* dest, char const* src, unsigned len) {
+ // Normally, we just copy from the source to the destination. However, if the source contains
+ // %-encoded characters, then we decode them while doing the copy:
+ while (len > 0) {
+ int nBefore = 0;
+ int nAfter = 0;
+
+ if (*src == '%' && len >= 3 && sscanf(src+1, "%n%2hhx%n", &nBefore, dest, &nAfter) == 1) {
+ unsigned codeSize = nAfter - nBefore; // should be 1 or 2
+
+ ++dest;
+ src += (1 + codeSize);
+ len -= (1 + codeSize);
+ } else {
+ *dest++ = *src++;
+ --len;
+ }
+ }
+ *dest = '\0';
+}
+
+Boolean RTSPClient::parseRTSPURL(char const* url,
+ char*& username, char*& password,
+ NetAddress& address,
+ portNumBits& portNum,
+ char const** urlSuffix) {
+ do {
+ // Parse the URL as "rtsp://[<username>[:<password>]@]<server-address-or-name>[:<port>][/<stream-name>]" (or "rtsps://...")
+ char const* prefix1 = "rtsp://";
+ unsigned const prefix1Length = 7;
+ char const* prefix2 = "rtsps://";
+ unsigned const prefix2Length = 8;
+
+ portNumBits defaultPortNumber;
+ char const* from;
+ if (_strncasecmp(url, prefix1, prefix1Length) == 0) {
+ defaultPortNumber = 554;
+ from = &url[prefix1Length];
+ } else if (_strncasecmp(url, prefix2, prefix2Length) == 0) {
+ useTLS();
+ defaultPortNumber = 322;
+ from = &url[prefix2Length];
+ } else {
+ envir().setResultMsg("URL does not begin with \"rtsp://\" or \"rtsps://\"");
+ break;
+ }
+
+ unsigned const parseBufferSize = 100;
+ char parseBuffer[parseBufferSize];
+
+ // Check whether "<username>[:<password>]@" occurs next.
+ // We do this by checking whether '@' appears before the end of the URL, or before the first '/'.
+ username = password = NULL; // default return values
+ char const* colonPasswordStart = NULL;
+ char const* lastAtPtr = NULL;
+ for (char const* p = from; *p != '\0' && *p != '/'; ++p) {
+ if (*p == ':' && colonPasswordStart == NULL) {
+ colonPasswordStart = p;
+ } else if (*p == '@') {
+ lastAtPtr = p;
+ }
+ }
+ if (lastAtPtr != NULL) {
+ // We found <username> (and perhaps <password>). Copy them into newly-allocated result strings:
+ if (colonPasswordStart == NULL || colonPasswordStart > lastAtPtr) colonPasswordStart = lastAtPtr;
+
+ char const* usernameStart = from;
+ unsigned usernameLen = colonPasswordStart - usernameStart;
+ username = new char[usernameLen + 1] ; // allow for the trailing '\0'
+ copyUsernameOrPasswordStringFromURL(username, usernameStart, usernameLen);
+
+ char const* passwordStart = colonPasswordStart;
+ if (passwordStart < lastAtPtr) ++passwordStart; // skip over the ':'
+ unsigned passwordLen = lastAtPtr - passwordStart;
+ password = new char[passwordLen + 1]; // allow for the trailing '\0'
+ copyUsernameOrPasswordStringFromURL(password, passwordStart, passwordLen);
+
+ from = lastAtPtr + 1; // skip over the '@'
+ }
+
+ // Next, parse <server-address-or-name>
+ char* to = &parseBuffer[0];
+ unsigned i;
+ for (i = 0; i < parseBufferSize; ++i) {
+ if (*from == '\0' || *from == ':' || *from == '/') {
+ // We've completed parsing the address
+ *to = '\0';
+ break;
+ }
+ *to++ = *from++;
+ }
+ if (i == parseBufferSize) {
+ envir().setResultMsg("URL is too long");
+ break;
+ }
+
+ NetAddressList addresses(parseBuffer);
+ if (addresses.numAddresses() == 0) {
+ envir().setResultMsg("Failed to find network address for \"",
+ parseBuffer, "\"");
+ break;
+ }
+ address = *(addresses.firstAddress());
+
+ portNum = defaultPortNumber; // unless it's specified explicitly in the URL
+ char nextChar = *from;
+ if (nextChar == ':') {
+ int portNumInt;
+ if (sscanf(++from, "%d", &portNumInt) != 1) {
+ envir().setResultMsg("No port number follows ':'");
+ break;
+ }
+ if (portNumInt < 1 || portNumInt > 65535) {
+ envir().setResultMsg("Bad port number");
+ break;
+ }
+ portNum = (portNumBits)portNumInt;
+ while (*from >= '0' && *from <= '9') ++from; // skip over port number
+ }
+
+ // The remainder of the URL is the suffix:
+ if (urlSuffix != NULL) *urlSuffix = from;
+
+ return True;
+ } while (0);
+
+ // An error occurred in the parsing:
+ return False;
+}
+
+void RTSPClient::setUserAgentString(char const* userAgentName) {
+ if (userAgentName == NULL) return;
+
+ // Change the existing user agent header string:
+ char const* const formatStr = "User-Agent: %s\r\n";
+ unsigned const headerSize = strlen(formatStr) + strlen(userAgentName);
+ delete[] fUserAgentHeaderStr;
+ fUserAgentHeaderStr = new char[headerSize];
+ sprintf(fUserAgentHeaderStr, formatStr, userAgentName);
+ fUserAgentHeaderStrLen = strlen(fUserAgentHeaderStr);
+}
+
+unsigned RTSPClient::responseBufferSize = 20000; // default value; you can reassign this in your application if you need to
+
+RTSPClient::RTSPClient(UsageEnvironment& env, char const* rtspURL,
+ int verbosityLevel, char const* applicationName,
+ portNumBits tunnelOverHTTPPortNum, int socketNumToServer)
+ : Medium(env),
+ desiredMaxIncomingPacketSize(0), fVerbosityLevel(verbosityLevel), fCSeq(1),
+ fAllowBasicAuthentication(True), fServerAddress(0),
+ fTunnelOverHTTPPortNum(tunnelOverHTTPPortNum),
+ fUserAgentHeaderStr(NULL), fUserAgentHeaderStrLen(0),
+ fInputSocketNum(-1), fOutputSocketNum(-1), fBaseURL(NULL), fTCPStreamIdCount(0),
+ fLastSessionId(NULL), fSessionTimeoutParameter(0), fSessionCookieCounter(0), fHTTPTunnelingConnectionIsPending(False),
+ fTLS(*this) {
+ setBaseURL(rtspURL);
+
+ fResponseBuffer = new char[responseBufferSize+1];
+ resetResponseBuffer();
+
+ if (socketNumToServer >= 0) {
+ // This socket number is (assumed to be) already connected to the server.
+ // Use it, and arrange to handle responses to requests sent on it:
+ fInputSocketNum = fOutputSocketNum = socketNumToServer;
+ env.taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION,
+ (TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this);
+ }
+
+ // Set the "User-Agent:" header to use in each request:
+ char const* const libName = "LIVE555 Streaming Media v";
+ char const* const libVersionStr = LIVEMEDIA_LIBRARY_VERSION_STRING;
+ char const* libPrefix; char const* libSuffix;
+ if (applicationName == NULL || applicationName[0] == '\0') {
+ applicationName = libPrefix = libSuffix = "";
+ } else {
+ libPrefix = " (";
+ libSuffix = ")";
+ }
+ unsigned userAgentNameSize
+ = strlen(applicationName) + strlen(libPrefix) + strlen(libName) + strlen(libVersionStr) + strlen(libSuffix) + 1;
+ char* userAgentName = new char[userAgentNameSize];
+ sprintf(userAgentName, "%s%s%s%s%s", applicationName, libPrefix, libName, libVersionStr, libSuffix);
+ setUserAgentString(userAgentName);
+ delete[] userAgentName;
+}
+
+RTSPClient::~RTSPClient() {
+ reset();
+
+ delete[] fResponseBuffer;
+ delete[] fUserAgentHeaderStr;
+}
+
+void RTSPClient::reset() {
+ resetTCPSockets();
+ resetResponseBuffer();
+ fRequestsAwaitingConnection.reset();
+ fRequestsAwaitingHTTPTunneling.reset();
+ fRequestsAwaitingResponse.reset();
+ fServerAddress = 0;
+
+ setBaseURL(NULL);
+
+ fCurrentAuthenticator.reset();
+
+ delete[] fLastSessionId; fLastSessionId = NULL;
+}
+
+void RTSPClient::setBaseURL(char const* url) {
+ delete[] fBaseURL; fBaseURL = strDup(url);
+}
+
+int RTSPClient::grabSocket() {
+ int inputSocket = fInputSocketNum;
+ RTPInterface::clearServerRequestAlternativeByteHandler(envir(), fInputSocketNum); // in case we were receiving RTP-over-TCP
+ fInputSocketNum = -1;
+
+ return inputSocket;
+}
+
+unsigned RTSPClient::sendRequest(RequestRecord* request) {
+ char* cmd = NULL;
+ do {
+ Boolean connectionIsPending = False;
+ if (!fRequestsAwaitingConnection.isEmpty()) {
+ // A connection is currently pending (with at least one enqueued request). Enqueue this request also:
+ connectionIsPending = True;
+ } else if (fInputSocketNum < 0) { // we need to open a connection
+ int connectResult = openConnection();
+ if (connectResult < 0) break; // an error occurred
+ else if (connectResult == 0) {
+ // A connection is pending
+ connectionIsPending = True;
+ } // else the connection succeeded. Continue sending the command.
+ }
+ if (connectionIsPending) {
+ fRequestsAwaitingConnection.enqueue(request);
+ return request->cseq();
+ }
+
+ // If requested (and we're not already doing it, or have done it), set up the special protocol for tunneling RTSP-over-HTTP:
+ if (fTunnelOverHTTPPortNum != 0 && strcmp(request->commandName(), "GET") != 0 && fOutputSocketNum == fInputSocketNum) {
+ if (!setupHTTPTunneling1()) break;
+ fRequestsAwaitingHTTPTunneling.enqueue(request);
+ return request->cseq();
+ }
+
+ // Construct and send the command:
+
+ // First, construct command-specific headers that we need:
+
+ char* cmdURL = fBaseURL; // by default
+ Boolean cmdURLWasAllocated = False;
+
+ char const* protocolStr = "RTSP/1.0"; // by default
+
+ char* extraHeaders = (char*)""; // by default
+ Boolean extraHeadersWereAllocated = False;
+
+ char* contentLengthHeader = (char*)""; // by default
+ Boolean contentLengthHeaderWasAllocated = False;
+
+ if (!setRequestFields(request,
+ cmdURL, cmdURLWasAllocated,
+ protocolStr,
+ extraHeaders, extraHeadersWereAllocated)) {
+ break;
+ }
+
+ char const* contentStr = request->contentStr(); // by default
+ if (contentStr == NULL) contentStr = "";
+ unsigned contentStrLen = strlen(contentStr);
+ if (contentStrLen > 0) {
+ char const* contentLengthHeaderFmt =
+ "Content-Length: %d\r\n";
+ unsigned contentLengthHeaderSize = strlen(contentLengthHeaderFmt)
+ + 20 /* max int len */;
+ contentLengthHeader = new char[contentLengthHeaderSize];
+ sprintf(contentLengthHeader, contentLengthHeaderFmt, contentStrLen);
+ contentLengthHeaderWasAllocated = True;
+ }
+
+ char* authenticatorStr = createAuthenticatorString(request->commandName(), fBaseURL);
+
+ char const* const cmdFmt =
+ "%s %s %s\r\n"
+ "CSeq: %d\r\n"
+ "%s"
+ "%s"
+ "%s"
+ "%s"
+ "\r\n"
+ "%s";
+ unsigned cmdSize = strlen(cmdFmt)
+ + strlen(request->commandName()) + strlen(cmdURL) + strlen(protocolStr)
+ + 20 /* max int len */
+ + strlen(authenticatorStr)
+ + fUserAgentHeaderStrLen
+ + strlen(extraHeaders)
+ + strlen(contentLengthHeader)
+ + contentStrLen;
+ cmd = new char[cmdSize];
+ sprintf(cmd, cmdFmt,
+ request->commandName(), cmdURL, protocolStr,
+ request->cseq(),
+ authenticatorStr,
+ fUserAgentHeaderStr,
+ extraHeaders,
+ contentLengthHeader,
+ contentStr);
+ delete[] authenticatorStr;
+ if (cmdURLWasAllocated) delete[] cmdURL;
+ if (extraHeadersWereAllocated) delete[] extraHeaders;
+ if (contentLengthHeaderWasAllocated) delete[] contentLengthHeader;
+
+ if (fVerbosityLevel >= 1) envir() << "Sending request: " << cmd << "\n";
+
+ if (fTunnelOverHTTPPortNum != 0 && strcmp(request->commandName(), "GET") != 0 && strcmp(request->commandName(), "POST") != 0) {
+ // When we're tunneling RTSP-over-HTTP, we Base-64-encode the request before we send it.
+ // (However, we don't do this for the HTTP "GET" and "POST" commands that we use to set up the tunnel.)
+ char* origCmd = cmd;
+ cmd = base64Encode(origCmd, strlen(cmd));
+ if (fVerbosityLevel >= 1) envir() << "\tThe request was base-64 encoded to: " << cmd << "\n\n";
+ delete[] origCmd;
+ }
+
+ if (write(cmd, strlen(cmd)) < 0) {
+ char const* errFmt = "%s write() failed: ";
+ unsigned const errLength = strlen(errFmt) + strlen(request->commandName());
+ char* err = new char[errLength];
+ sprintf(err, errFmt, request->commandName());
+ envir().setResultErrMsg(err);
+ delete[] err;
+ break;
+ }
+
+ // The command send succeeded, so enqueue the request record, so that its response (when it comes) can be handled.
+ // However, note that we do not expect a response to a POST command with RTSP-over-HTTP, so don't enqueue that.
+ int cseq = request->cseq();
+
+ if (fTunnelOverHTTPPortNum == 0 || strcmp(request->commandName(), "POST") != 0) {
+ fRequestsAwaitingResponse.enqueue(request);
+ } else {
+ delete request;
+ }
+
+ delete[] cmd;
+ return cseq;
+ } while (0);
+
+ // An error occurred, so call the response handler immediately (indicating the error):
+ delete[] cmd;
+ handleRequestError(request);
+ delete request;
+ return 0;
+}
+
+static char* createSessionString(char const* sessionId) {
+ char* sessionStr;
+ if (sessionId != NULL) {
+ sessionStr = new char[20+strlen(sessionId)];
+ sprintf(sessionStr, "Session: %s\r\n", sessionId);
+ } else {
+ sessionStr = strDup("");
+ }
+ return sessionStr;
+}
+
+// Add support for faster download thru "speed:" option on PLAY
+static char* createSpeedString(float speed) {
+ char buf[100];
+ if (speed == 1.0f ) {
+ // This is the default value; we don't need a "Speed:" header:
+ buf[0] = '\0';
+ } else {
+ sprintf(buf, "Speed: %.3f\r\n",speed);
+ }
+
+ return strDup(buf);
+}
+
+static char* createScaleString(float scale, float currentScale) {
+ char buf[100];
+ if (scale == 1.0f && currentScale == 1.0f) {
+ // This is the default value; we don't need a "Scale:" header:
+ buf[0] = '\0';
+ } else {
+ Locale l("C", Numeric);
+ sprintf(buf, "Scale: %f\r\n", scale);
+ }
+
+ return strDup(buf);
+}
+
+static char* createRangeString(double start, double end, char const* absStartTime, char const* absEndTime) {
+ char buf[100];
+
+ if (absStartTime != NULL) {
+ // Create a "Range:" header that specifies 'absolute' time values:
+
+ if (absEndTime == NULL) {
+ // There's no end time:
+ snprintf(buf, sizeof buf, "Range: clock=%s-\r\n", absStartTime);
+ } else {
+ // There's both a start and an end time; include them both in the "Range:" hdr
+ snprintf(buf, sizeof buf, "Range: clock=%s-%s\r\n", absStartTime, absEndTime);
+ }
+ } else {
+ // Create a "Range:" header that specifies relative (i.e., NPT) time values:
+
+ if (start < 0) {
+ // We're resuming from a PAUSE; there's no "Range:" header at all
+ buf[0] = '\0';
+ } else if (end < 0) {
+ // There's no end time:
+ Locale l("C", Numeric);
+ sprintf(buf, "Range: npt=%.3f-\r\n", start);
+ } else {
+ // There's both a start and an end time; include them both in the "Range:" hdr
+ Locale l("C", Numeric);
+ sprintf(buf, "Range: npt=%.3f-%.3f\r\n", start, end);
+ }
+ }
+
+ return strDup(buf);
+}
+
+Boolean RTSPClient::setRequestFields(RequestRecord* request,
+ char*& cmdURL, Boolean& cmdURLWasAllocated,
+ char const*& protocolStr,
+ char*& extraHeaders, Boolean& extraHeadersWereAllocated
+ ) {
+ // Set various fields that will appear in our outgoing request, depending upon the particular command that we are sending.
+
+ if (strcmp(request->commandName(), "DESCRIBE") == 0) {
+ extraHeaders = (char*)"Accept: application/sdp\r\n";
+ } else if (strcmp(request->commandName(), "OPTIONS") == 0) {
+ // If we're currently part of a session, create a "Session:" header (in case the server wants this to indicate
+ // client 'liveness); this makes up our 'extra headers':
+ extraHeaders = createSessionString(fLastSessionId);
+ extraHeadersWereAllocated = True;
+ } else if (strcmp(request->commandName(), "ANNOUNCE") == 0) {
+ extraHeaders = (char*)"Content-Type: application/sdp\r\n";
+ } else if (strcmp(request->commandName(), "SETUP") == 0) {
+ MediaSubsession& subsession = *request->subsession();
+ Boolean streamUsingTCP = (request->booleanFlags()&0x1) != 0;
+ Boolean streamOutgoing = (request->booleanFlags()&0x2) != 0;
+ Boolean forceMulticastOnUnspecified = (request->booleanFlags()&0x4) != 0;
+
+ char const *prefix, *separator, *suffix;
+ constructSubsessionURL(subsession, prefix, separator, suffix);
+
+ char const* transportFmt;
+ if (strcmp(subsession.protocolName(), "RTP") == 0) {
+ transportFmt = "Transport: RTP/AVP%s%s%s=%d-%d\r\n";
+ } else if (strcmp(subsession.protocolName(), "SRTP") == 0) {
+ transportFmt = "Transport: RTP/SAVP%s%s%s=%d-%d\r\n";
+ } else { // "UDP"
+ suffix = "";
+ transportFmt = "Transport: RAW/RAW/UDP%s%s%s=%d-%d\r\n";
+ }
+
+ cmdURL = new char[strlen(prefix) + strlen(separator) + strlen(suffix) + 1];
+ cmdURLWasAllocated = True;
+ sprintf(cmdURL, "%s%s%s", prefix, separator, suffix);
+
+ // Construct a "Transport:" header.
+ char const* transportTypeStr;
+ char const* modeStr = streamOutgoing ? ";mode=receive" : "";
+ // Note: I think the above is nonstandard, but DSS wants it this way
+ char const* portTypeStr;
+ portNumBits rtpNumber, rtcpNumber;
+ if (streamUsingTCP) { // streaming over the RTSP connection
+ transportTypeStr = "/TCP;unicast";
+ portTypeStr = ";interleaved";
+ rtpNumber = fTCPStreamIdCount++;
+ rtcpNumber = fTCPStreamIdCount++;
+ } else { // normal RTP streaming
+ unsigned connectionAddress = subsession.connectionEndpointAddress();
+ Boolean requestMulticastStreaming
+ = IsMulticastAddress(connectionAddress) || (connectionAddress == 0 && forceMulticastOnUnspecified);
+ transportTypeStr = requestMulticastStreaming ? ";multicast" : ";unicast";
+ portTypeStr = requestMulticastStreaming ? ";port" : ";client_port";
+ rtpNumber = subsession.clientPortNum();
+ if (rtpNumber == 0) {
+ envir().setResultMsg("Client port number unknown\n");
+ delete[] cmdURL;
+ return False;
+ }
+ rtcpNumber = subsession.rtcpIsMuxed() ? rtpNumber : rtpNumber + 1;
+ }
+ unsigned transportSize = strlen(transportFmt)
+ + strlen(transportTypeStr) + strlen(modeStr) + strlen(portTypeStr) + 2*5 /* max port len */;
+ char* transportStr = new char[transportSize];
+ sprintf(transportStr, transportFmt,
+ transportTypeStr, modeStr, portTypeStr, rtpNumber, rtcpNumber);
+
+ // When sending more than one "SETUP" request, include a "Session:" header in the 2nd and later commands:
+ char* sessionStr = createSessionString(fLastSessionId);
+
+ // Optionally include a "Blocksize:" string:
+ char* blocksizeStr = createBlocksizeString(streamUsingTCP);
+
+ // Optionally include a "KeyMgmt:" string:
+ char* keyMgmtStr = createKeyMgmtString(cmdURL, subsession);
+
+ // The "Transport:", "Session:" (if present), "Blocksize:" (if present), and "KeyMgmt:" (if present)
+ // headers make up the 'extra headers':
+ extraHeaders = new char[transportSize + strlen(sessionStr) + strlen(blocksizeStr) + strlen(keyMgmtStr) + 1];
+ extraHeadersWereAllocated = True;
+ sprintf(extraHeaders, "%s%s%s%s", transportStr, sessionStr, blocksizeStr, keyMgmtStr);
+ delete[] transportStr; delete[] sessionStr; delete[] blocksizeStr; delete[] keyMgmtStr;
+ } else if (strcmp(request->commandName(), "GET") == 0 || strcmp(request->commandName(), "POST") == 0) {
+ // We will be sending a HTTP (not a RTSP) request.
+ // Begin by re-parsing our RTSP URL, to get the stream name (which we'll use as our 'cmdURL'
+ // in the subsequent request), and the server address (which we'll use in a "Host:" header):
+ char* username;
+ char* password;
+ NetAddress destAddress;
+ portNumBits urlPortNum;
+ if (!parseRTSPURL(fBaseURL, username, password, destAddress, urlPortNum, (char const**)&cmdURL)) return False;
+ if (cmdURL[0] == '\0') cmdURL = (char*)"/";
+ delete[] username;
+ delete[] password;
+ netAddressBits serverAddress = *(netAddressBits*)(destAddress.data());
+ AddressString serverAddressString(serverAddress);
+
+ protocolStr = "HTTP/1.1";
+
+ if (strcmp(request->commandName(), "GET") == 0) {
+ // Create a 'session cookie' string, using MD5:
+ struct {
+ struct timeval timestamp;
+ unsigned counter;
+ } seedData;
+ gettimeofday(&seedData.timestamp, NULL);
+ seedData.counter = ++fSessionCookieCounter;
+ our_MD5Data((unsigned char*)(&seedData), sizeof seedData, fSessionCookie);
+ // DSS seems to require that the 'session cookie' string be 22 bytes long:
+ fSessionCookie[23] = '\0';
+
+ char const* const extraHeadersFmt =
+ "Host: %s\r\n"
+ "x-sessioncookie: %s\r\n"
+ "Accept: application/x-rtsp-tunnelled\r\n"
+ "Pragma: no-cache\r\n"
+ "Cache-Control: no-cache\r\n";
+ unsigned extraHeadersSize = strlen(extraHeadersFmt)
+ + strlen(serverAddressString.val())
+ + strlen(fSessionCookie);
+ extraHeaders = new char[extraHeadersSize];
+ extraHeadersWereAllocated = True;
+ sprintf(extraHeaders, extraHeadersFmt,
+ serverAddressString.val(),
+ fSessionCookie);
+ } else { // "POST"
+ char const* const extraHeadersFmt =
+ "Host: %s\r\n"
+ "x-sessioncookie: %s\r\n"
+ "Content-Type: application/x-rtsp-tunnelled\r\n"
+ "Pragma: no-cache\r\n"
+ "Cache-Control: no-cache\r\n"
+ "Content-Length: 32767\r\n"
+ "Expires: Sun, 9 Jan 1972 00:00:00 GMT\r\n";
+ unsigned extraHeadersSize = strlen(extraHeadersFmt)
+ + strlen(serverAddressString.val())
+ + strlen(fSessionCookie);
+ extraHeaders = new char[extraHeadersSize];
+ extraHeadersWereAllocated = True;
+ sprintf(extraHeaders, extraHeadersFmt,
+ serverAddressString.val(),
+ fSessionCookie);
+ }
+ } else { // "PLAY", "PAUSE", "TEARDOWN", "RECORD", "SET_PARAMETER", "GET_PARAMETER"
+ // First, make sure that we have a RTSP session in progress
+ if (fLastSessionId == NULL) {
+ envir().setResultMsg("No RTSP session is currently in progress\n");
+ return False;
+ }
+
+ char const* sessionId;
+ float originalScale;
+ if (request->session() != NULL) {
+ // Session-level operation
+ cmdURL = (char*)sessionURL(*request->session());
+
+ sessionId = fLastSessionId;
+ originalScale = request->session()->scale();
+ } else {
+ // Media-level operation
+ char const *prefix, *separator, *suffix;
+ constructSubsessionURL(*request->subsession(), prefix, separator, suffix);
+ cmdURL = new char[strlen(prefix) + strlen(separator) + strlen(suffix) + 1];
+ cmdURLWasAllocated = True;
+ sprintf(cmdURL, "%s%s%s", prefix, separator, suffix);
+
+ sessionId = request->subsession()->sessionId();
+ originalScale = request->subsession()->scale();
+ }
+
+ if (strcmp(request->commandName(), "PLAY") == 0) {
+ // Create possible "Session:", "Scale:", "Speed:", and "Range:" headers;
+ // these make up the 'extra headers':
+ char* sessionStr = createSessionString(sessionId);
+ char* scaleStr = createScaleString(request->scale(), originalScale);
+ float speed = request->session() != NULL ? request->session()->speed() : request->subsession()->speed();
+ char* speedStr = createSpeedString(speed);
+ char* rangeStr = createRangeString(request->start(), request->end(), request->absStartTime(), request->absEndTime());
+ extraHeaders = new char[strlen(sessionStr) + strlen(scaleStr) + strlen(speedStr) + strlen(rangeStr) + 1];
+ extraHeadersWereAllocated = True;
+ sprintf(extraHeaders, "%s%s%s%s", sessionStr, scaleStr, speedStr, rangeStr);
+ delete[] sessionStr; delete[] scaleStr; delete[] speedStr; delete[] rangeStr;
+ } else {
+ // Create a "Session:" header; this makes up our 'extra headers':
+ extraHeaders = createSessionString(sessionId);
+ extraHeadersWereAllocated = True;
+ }
+ }
+
+ return True;
+}
+
+Boolean RTSPClient::isRTSPClient() const {
+ return True;
+}
+
+void RTSPClient::resetTCPSockets() {
+ if (fInputSocketNum >= 0) {
+ RTPInterface::clearServerRequestAlternativeByteHandler(envir(), fInputSocketNum); // in case we were receiving RTP-over-TCP
+ envir().taskScheduler().disableBackgroundHandling(fInputSocketNum);
+ ::closeSocket(fInputSocketNum);
+ if (fOutputSocketNum != fInputSocketNum) {
+ envir().taskScheduler().disableBackgroundHandling(fOutputSocketNum);
+ ::closeSocket(fOutputSocketNum);
+ }
+ }
+ fInputSocketNum = fOutputSocketNum = -1;
+}
+
+void RTSPClient::resetResponseBuffer() {
+ fResponseBytesAlreadySeen = 0;
+ fResponseBufferBytesLeft = responseBufferSize;
+}
+
+int RTSPClient::openConnection() {
+ do {
+ // Set up a connection to the server. Begin by parsing the URL:
+
+ char* username;
+ char* password;
+ NetAddress destAddress;
+ portNumBits urlPortNum;
+ char const* urlSuffix;
+ if (!parseRTSPURL(fBaseURL, username, password, destAddress, urlPortNum, &urlSuffix)) break;
+ portNumBits destPortNum = fTunnelOverHTTPPortNum == 0 ? urlPortNum : fTunnelOverHTTPPortNum;
+ if (destPortNum == 322) useTLS(); // port 322 is a special case: "rtsps"
+
+ if (username != NULL || password != NULL) {
+ fCurrentAuthenticator.setUsernameAndPassword(username, password);
+ delete[] username;
+ delete[] password;
+ }
+
+ // We don't yet have a TCP socket (or we used to have one, but it got closed). Set it up now.
+ fInputSocketNum = setupStreamSocket(envir(), 0);
+ if (fInputSocketNum < 0) break;
+ ignoreSigPipeOnSocket(fInputSocketNum); // so that servers on the same host that get killed don't also kill us
+ if (fOutputSocketNum < 0) fOutputSocketNum = fInputSocketNum;
+ envir() << "Created new TCP socket " << fInputSocketNum << " for connection\n";
+
+ // Connect to the remote endpoint:
+ fServerAddress = *(netAddressBits*)(destAddress.data());
+ int connectResult = connectToServer(fInputSocketNum, destPortNum);
+ if (connectResult < 0) break;
+ else if (connectResult > 0) {
+ if (fTLS.isNeeded) {
+ // We need to complete an additional TLS connection:
+ connectResult = fTLS.connect(fInputSocketNum);
+ if (connectResult < 0) break;
+ if (connectResult > 0 && fVerbosityLevel >= 1) envir() << "...TLS connection completed\n";
+ }
+
+ if (connectResult > 0 && fVerbosityLevel >= 1) envir() << "...local connection opened\n";
+ }
+
+ return connectResult;
+ } while (0);
+
+ resetTCPSockets();
+ return -1;
+}
+
+int RTSPClient::connectToServer(int socketNum, portNumBits remotePortNum) {
+ MAKE_SOCKADDR_IN(remoteName, fServerAddress, htons(remotePortNum));
+ if (fVerbosityLevel >= 1) {
+ envir() << "Connecting to " << AddressString(remoteName).val() << ", port " << remotePortNum << " on socket " << socketNum << "...\n";
+ }
+ if (connect(socketNum, (struct sockaddr*) &remoteName, sizeof remoteName) != 0) {
+ int const err = envir().getErrno();
+ if (err == EINPROGRESS || err == EWOULDBLOCK) {
+ // The connection is pending; we'll need to handle it later. Wait for our socket to be 'writable', or have an exception.
+ envir().taskScheduler().setBackgroundHandling(socketNum, SOCKET_WRITABLE|SOCKET_EXCEPTION,
+ (TaskScheduler::BackgroundHandlerProc*)&connectionHandler, this);
+ return 0;
+ }
+ envir().setResultErrMsg("connect() failed: ");
+ if (fVerbosityLevel >= 1) envir() << "..." << envir().getResultMsg() << "\n";
+ return -1;
+ }
+
+ // The connection succeeded. Arrange to handle responses to requests sent on it:
+ envir().taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION,
+ (TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this);
+
+ return 1;
+}
+
+char* RTSPClient::createAuthenticatorString(char const* cmd, char const* url) {
+ Authenticator& auth = fCurrentAuthenticator; // alias, for brevity
+ if (auth.realm() != NULL && auth.username() != NULL && auth.password() != NULL) {
+ // We have a filled-in authenticator, so use it:
+ char* authenticatorStr;
+ if (auth.nonce() != NULL) { // Digest authentication
+ char const* const authFmt =
+ "Authorization: Digest username=\"%s\", realm=\"%s\", "
+ "nonce=\"%s\", uri=\"%s\", response=\"%s\"\r\n";
+ char const* response = auth.computeDigestResponse(cmd, url);
+ unsigned authBufSize = strlen(authFmt)
+ + strlen(auth.username()) + strlen(auth.realm())
+ + strlen(auth.nonce()) + strlen(url) + strlen(response);
+ authenticatorStr = new char[authBufSize];
+ sprintf(authenticatorStr, authFmt,
+ auth.username(), auth.realm(),
+ auth.nonce(), url, response);
+ auth.reclaimDigestResponse(response);
+ } else { // Basic authentication
+ char const* const authFmt = "Authorization: Basic %s\r\n";
+
+ unsigned usernamePasswordLength = strlen(auth.username()) + 1 + strlen(auth.password());
+ char* usernamePassword = new char[usernamePasswordLength+1];
+ sprintf(usernamePassword, "%s:%s", auth.username(), auth.password());
+
+ char* response = base64Encode(usernamePassword, usernamePasswordLength);
+ unsigned const authBufSize = strlen(authFmt) + strlen(response) + 1;
+ authenticatorStr = new char[authBufSize];
+ sprintf(authenticatorStr, authFmt, response);
+ delete[] response; delete[] usernamePassword;
+ }
+
+ return authenticatorStr;
+ }
+
+ // We don't have a (filled-in) authenticator.
+ return strDup("");
+}
+
+char* RTSPClient::createBlocksizeString(Boolean streamUsingTCP) {
+ char* blocksizeStr;
+ u_int16_t maxPacketSize = desiredMaxIncomingPacketSize;
+
+ // Allow for the RTP header (if streaming over TCP)
+ // or the IP/UDP/RTP headers (if streaming over UDP):
+ u_int16_t const headerAllowance = streamUsingTCP ? 12 : 50/*conservative*/;
+ if (maxPacketSize < headerAllowance) {
+ maxPacketSize = 0;
+ } else {
+ maxPacketSize -= headerAllowance;
+ }
+
+ if (maxPacketSize > 0) {
+ blocksizeStr = new char[25]; // more than enough space
+ sprintf(blocksizeStr, "Blocksize: %u\r\n", maxPacketSize);
+ } else {
+ blocksizeStr = strDup("");
+ }
+ return blocksizeStr;
+}
+
+char* RTSPClient::createKeyMgmtString(char const* url, MediaSubsession const& subsession) {
+ char* keyMgmtStr;
+ MIKEYState* mikeyState;
+ u_int8_t* mikeyMessage;
+ unsigned mikeyMessageSize;
+
+ if ((mikeyState = subsession.getMIKEYState()) == NULL ||
+ (mikeyMessage = mikeyState->generateMessage(mikeyMessageSize)) == NULL) {
+ keyMgmtStr = strDup("");
+ } else {
+ char const* keyMgmtFmt = "KeyMgmt: prot=mikey; uri=\"%s\"; data=\"%s\"\r\n";
+ char* base64EncodedData = base64Encode((char*)mikeyMessage, mikeyMessageSize);
+ unsigned keyMgmtSize = strlen(keyMgmtFmt)
+ + strlen(url) + strlen(base64EncodedData);
+ keyMgmtStr = new char[keyMgmtSize];
+ sprintf(keyMgmtStr, keyMgmtFmt,
+ url, base64EncodedData);
+ delete[] base64EncodedData;
+ }
+
+ return keyMgmtStr;
+}
+
+void RTSPClient::handleRequestError(RequestRecord* request) {
+ int resultCode = -envir().getErrno();
+ if (resultCode == 0) {
+ // Choose some generic error code instead:
+#if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4)
+ resultCode = -WSAENOTCONN;
+#else
+ resultCode = -ENOTCONN;
+#endif
+ }
+ if (request->handler() != NULL) (*request->handler())(this, resultCode, strDup(envir().getResultMsg()));
+}
+
+Boolean RTSPClient
+::parseResponseCode(char const* line, unsigned& responseCode, char const*& responseString) {
+ if (sscanf(line, "RTSP/%*s%u", &responseCode) != 1 &&
+ sscanf(line, "HTTP/%*s%u", &responseCode) != 1) return False;
+ // Note: We check for HTTP responses as well as RTSP responses, both in order to setup RTSP-over-HTTP tunneling,
+ // and so that we get back a meaningful error if the client tried to mistakenly send a RTSP command to a HTTP-only server.
+
+ // Use everything after the RTSP/* (or HTTP/*) as the response string:
+ responseString = line;
+ while (responseString[0] != '\0' && responseString[0] != ' ' && responseString[0] != '\t') ++responseString;
+ while (responseString[0] != '\0' && (responseString[0] == ' ' || responseString[0] == '\t')) ++responseString; // skip whitespace
+
+ return True;
+}
+
+void RTSPClient::handleIncomingRequest() {
+ // Parse the request string into command name and 'CSeq', then 'handle' the command (by responding that we don't support it):
+ char cmdName[RTSP_PARAM_STRING_MAX];
+ char urlPreSuffix[RTSP_PARAM_STRING_MAX];
+ char urlSuffix[RTSP_PARAM_STRING_MAX];
+ char cseq[RTSP_PARAM_STRING_MAX];
+ char sessionId[RTSP_PARAM_STRING_MAX];
+ unsigned contentLength;
+ if (!parseRTSPRequestString(fResponseBuffer, fResponseBytesAlreadySeen,
+ cmdName, sizeof cmdName,
+ urlPreSuffix, sizeof urlPreSuffix,
+ urlSuffix, sizeof urlSuffix,
+ cseq, sizeof cseq,
+ sessionId, sizeof sessionId,
+ contentLength)) {
+ return;
+ } else {
+ if (fVerbosityLevel >= 1) {
+ envir() << "Received incoming RTSP request: " << fResponseBuffer << "\n";
+ }
+ char tmpBuf[2*RTSP_PARAM_STRING_MAX];
+ snprintf(tmpBuf, sizeof tmpBuf,
+ "RTSP/1.0 405 Method Not Allowed\r\nCSeq: %s\r\n\r\n", cseq);
+ write(tmpBuf, strlen(tmpBuf));
+ }
+}
+
+Boolean RTSPClient::checkForHeader(char const* line, char const* headerName, unsigned headerNameLength, char const*& headerParams) {
+ if (_strncasecmp(line, headerName, headerNameLength) != 0) return False;
+
+ // The line begins with the desired header name. Trim off any whitespace, and return the header parameters:
+ unsigned paramIndex = headerNameLength;
+ while (line[paramIndex] != '\0' && (line[paramIndex] == ' ' || line[paramIndex] == '\t')) ++paramIndex;
+ if (line[paramIndex] == '\0') return False; // the header is assumed to be bad if it has no parameters
+
+ headerParams = &line[paramIndex];
+ return True;
+}
+
+Boolean RTSPClient::parseTransportParams(char const* paramsStr,
+ char*& serverAddressStr, portNumBits& serverPortNum,
+ unsigned char& rtpChannelId, unsigned char& rtcpChannelId) {
+ // Initialize the return parameters to 'not found' values:
+ serverAddressStr = NULL;
+ serverPortNum = 0;
+ rtpChannelId = rtcpChannelId = 0xFF;
+ if (paramsStr == NULL) return False;
+
+ char* foundServerAddressStr = NULL;
+ Boolean foundServerPortNum = False;
+ portNumBits clientPortNum = 0;
+ Boolean foundClientPortNum = False;
+ Boolean foundChannelIds = False;
+ unsigned rtpCid, rtcpCid;
+ Boolean isMulticast = True; // by default
+ char* foundDestinationStr = NULL;
+ portNumBits multicastPortNumRTP, multicastPortNumRTCP;
+ Boolean foundMulticastPortNum = False;
+
+ // Run through each of the parameters, looking for ones that we handle:
+ char const* fields = paramsStr;
+ char* field = strDupSize(fields);
+ while (sscanf(fields, "%[^;]", field) == 1) {
+ if (sscanf(field, "server_port=%hu", &serverPortNum) == 1) {
+ foundServerPortNum = True;
+ } else if (sscanf(field, "client_port=%hu", &clientPortNum) == 1) {
+ foundClientPortNum = True;
+ } else if (_strncasecmp(field, "source=", 7) == 0) {
+ delete[] foundServerAddressStr;
+ foundServerAddressStr = strDup(field+7);
+ } else if (sscanf(field, "interleaved=%u-%u", &rtpCid, &rtcpCid) == 2) {
+ rtpChannelId = (unsigned char)rtpCid;
+ rtcpChannelId = (unsigned char)rtcpCid;
+ foundChannelIds = True;
+ } else if (strcmp(field, "unicast") == 0) {
+ isMulticast = False;
+ } else if (_strncasecmp(field, "destination=", 12) == 0) {
+ delete[] foundDestinationStr;
+ foundDestinationStr = strDup(field+12);
+ } else if (sscanf(field, "port=%hu-%hu", &multicastPortNumRTP, &multicastPortNumRTCP) == 2 ||
+ sscanf(field, "port=%hu", &multicastPortNumRTP) == 1) {
+ foundMulticastPortNum = True;
+ }
+
+ fields += strlen(field);
+ while (fields[0] == ';') ++fields; // skip over all leading ';' chars
+ if (fields[0] == '\0') break;
+ }
+ delete[] field;
+
+ // If we're multicast, and have a "destination=" (multicast) address, then use this
+ // as the 'server' address (because some weird servers don't specify the multicast
+ // address earlier, in the "DESCRIBE" response's SDP:
+ if (isMulticast && foundDestinationStr != NULL && foundMulticastPortNum) {
+ delete[] foundServerAddressStr;
+ serverAddressStr = foundDestinationStr;
+ serverPortNum = multicastPortNumRTP;
+ return True;
+ }
+ delete[] foundDestinationStr;
+
+ // We have a valid "Transport:" header if any of the following are true:
+ // - We saw a "interleaved=" field, indicating RTP/RTCP-over-TCP streaming, or
+ // - We saw a "server_port=" field, or
+ // - We saw a "client_port=" field.
+ // If we didn't also see a "server_port=" field, then the server port is assumed to be the same as the client port.
+ if (foundChannelIds || foundServerPortNum || foundClientPortNum) {
+ if (foundClientPortNum && !foundServerPortNum) {
+ serverPortNum = clientPortNum;
+ }
+ serverAddressStr = foundServerAddressStr;
+ return True;
+ }
+
+ delete[] foundServerAddressStr;
+ return False;
+}
+
+Boolean RTSPClient::parseScaleParam(char const* paramStr, float& scale) {
+ Locale l("C", Numeric);
+ return sscanf(paramStr, "%f", &scale) == 1;
+}
+
+Boolean RTSPClient::parseSpeedParam(char const* paramStr, float& speed) {
+ Locale l("C", Numeric);
+ return sscanf(paramStr, "%f", &speed) >= 1;
+}
+
+Boolean RTSPClient::parseRTPInfoParams(char const*& paramsStr, u_int16_t& seqNum, u_int32_t& timestamp) {
+ if (paramsStr == NULL || paramsStr[0] == '\0') return False;
+ while (paramsStr[0] == ',') ++paramsStr;
+
+ // "paramsStr" now consists of a ';'-separated list of parameters, ending with ',' or '\0'.
+ char* field = strDupSize(paramsStr);
+
+ Boolean sawSeq = False, sawRtptime = False;
+ while (sscanf(paramsStr, "%[^;,]", field) == 1) {
+ if (sscanf(field, "seq=%hu", &seqNum) == 1) {
+ sawSeq = True;
+ } else if (sscanf(field, "rtptime=%u", ×tamp) == 1) {
+ sawRtptime = True;
+ }
+
+ paramsStr += strlen(field);
+ if (paramsStr[0] == '\0' || paramsStr[0] == ',') break;
+ // ASSERT: paramsStr[0] == ';'
+ ++paramsStr; // skip over the ';'
+ }
+
+ delete[] field;
+ // For the "RTP-Info:" parameters to be useful to us, we need to have seen both the "seq=" and "rtptime=" parameters:
+ return sawSeq && sawRtptime;
+}
+
+Boolean RTSPClient::handleSETUPResponse(MediaSubsession& subsession, char const* sessionParamsStr, char const* transportParamsStr,
+ Boolean streamUsingTCP) {
+ char* sessionId = new char[responseBufferSize]; // ensures we have enough space
+ Boolean success = False;
+ do {
+ // Check for a session id:
+ if (sessionParamsStr == NULL || sscanf(sessionParamsStr, "%[^;]", sessionId) != 1) {
+ envir().setResultMsg("Missing or bad \"Session:\" header");
+ break;
+ }
+ subsession.setSessionId(sessionId);
+ delete[] fLastSessionId; fLastSessionId = strDup(sessionId);
+
+ // Also look for an optional "; timeout = " parameter following this:
+ char const* afterSessionId = sessionParamsStr + strlen(sessionId);
+ int timeoutVal;
+ if (sscanf(afterSessionId, "; timeout = %d", &timeoutVal) == 1) {
+ fSessionTimeoutParameter = timeoutVal;
+ }
+
+ // Parse the "Transport:" header parameters:
+ char* serverAddressStr;
+ portNumBits serverPortNum;
+ unsigned char rtpChannelId, rtcpChannelId;
+ if (!parseTransportParams(transportParamsStr, serverAddressStr, serverPortNum, rtpChannelId, rtcpChannelId)) {
+ envir().setResultMsg("Missing or bad \"Transport:\" header");
+ break;
+ }
+ delete[] subsession.connectionEndpointName();
+ subsession.connectionEndpointName() = serverAddressStr;
+ subsession.serverPortNum = serverPortNum;
+ subsession.rtpChannelId = rtpChannelId;
+ subsession.rtcpChannelId = rtcpChannelId;
+
+ if (streamUsingTCP) {
+ // Tell the subsession to receive RTP (and send/receive RTCP) over the RTSP stream:
+ if (subsession.rtpSource() != NULL) {
+ subsession.rtpSource()->setStreamSocket(fInputSocketNum, subsession.rtpChannelId);
+ // So that we continue to receive & handle RTSP commands and responses from the server
+ subsession.rtpSource()->enableRTCPReports() = False;
+ // To avoid confusing the server (which won't start handling RTP/RTCP-over-TCP until "PLAY"), don't send RTCP "RR"s yet
+ increaseReceiveBufferTo(envir(), fInputSocketNum, 50*1024);
+ }
+ if (subsession.rtcpInstance() != NULL) subsession.rtcpInstance()->setStreamSocket(fInputSocketNum, subsession.rtcpChannelId);
+ RTPInterface::setServerRequestAlternativeByteHandler(envir(), fInputSocketNum, handleAlternativeRequestByte, this);
+ } else {
+ // Normal case.
+ // Set the RTP and RTCP sockets' destination address and port from the information in the SETUP response (if present):
+ netAddressBits destAddress = subsession.connectionEndpointAddress();
+ if (destAddress == 0) destAddress = fServerAddress;
+ subsession.setDestinations(destAddress);
+ }
+
+ success = True;
+ } while (0);
+
+ delete[] sessionId;
+ return success;
+}
+
+Boolean RTSPClient::handlePLAYResponse(MediaSession* session, MediaSubsession* subsession,
+ char const* scaleParamsStr, char const* speedParamsStr,
+ char const* rangeParamsStr, char const* rtpInfoParamsStr) {
+ Boolean scaleOK = False, rangeOK = False, speedOK = False;
+ do {
+ if (session != NULL) {
+ // The command was on the whole session
+ if (scaleParamsStr != NULL && !parseScaleParam(scaleParamsStr, session->scale())) break;
+ scaleOK = True;
+ if (speedParamsStr != NULL && !parseSpeedParam(speedParamsStr, session->speed())) break;
+ speedOK = True;
+ Boolean startTimeIsNow;
+ if (rangeParamsStr != NULL &&
+ !parseRangeParam(rangeParamsStr,
+ session->playStartTime(), session->playEndTime(),
+ session->_absStartTime(), session->_absEndTime(),
+ startTimeIsNow)) break;
+ rangeOK = True;
+
+ MediaSubsessionIterator iter(*session);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ u_int16_t seqNum; u_int32_t timestamp;
+ subsession->rtpInfo.infoIsNew = False;
+ if (parseRTPInfoParams(rtpInfoParamsStr, seqNum, timestamp)) {
+ subsession->rtpInfo.seqNum = seqNum;
+ subsession->rtpInfo.timestamp = timestamp;
+ subsession->rtpInfo.infoIsNew = True;
+ }
+
+ if (subsession->rtpSource() != NULL) subsession->rtpSource()->enableRTCPReports() = True; // start sending RTCP "RR"s now
+ }
+ } else {
+ // The command was on a subsession
+ if (scaleParamsStr != NULL && !parseScaleParam(scaleParamsStr, subsession->scale())) break;
+ scaleOK = True;
+ if (speedParamsStr != NULL && !parseSpeedParam(speedParamsStr, subsession->speed())) break;
+ speedOK = True;
+ Boolean startTimeIsNow;
+ if (rangeParamsStr != NULL &&
+ !parseRangeParam(rangeParamsStr,
+ subsession->_playStartTime(), subsession->_playEndTime(),
+ subsession->_absStartTime(), subsession->_absEndTime(),
+ startTimeIsNow)) break;
+ rangeOK = True;
+
+ u_int16_t seqNum; u_int32_t timestamp;
+ subsession->rtpInfo.infoIsNew = False;
+ if (parseRTPInfoParams(rtpInfoParamsStr, seqNum, timestamp)) {
+ subsession->rtpInfo.seqNum = seqNum;
+ subsession->rtpInfo.timestamp = timestamp;
+ subsession->rtpInfo.infoIsNew = True;
+ }
+
+ if (subsession->rtpSource() != NULL) subsession->rtpSource()->enableRTCPReports() = True; // start sending RTCP "RR"s now
+ }
+
+ return True;
+ } while (0);
+
+ // An error occurred:
+ if (!scaleOK) {
+ envir().setResultMsg("Bad \"Scale:\" header");
+ } else if (!speedOK) {
+ envir().setResultMsg("Bad \"Speed:\" header");
+ } else if (!rangeOK) {
+ envir().setResultMsg("Bad \"Range:\" header");
+ } else {
+ envir().setResultMsg("Bad \"RTP-Info:\" header");
+ }
+ return False;
+}
+
+Boolean RTSPClient::handleTEARDOWNResponse(MediaSession& /*session*/, MediaSubsession& /*subsession*/) {
+ // Because we don't expect to always get a response to "TEARDOWN", we don't need to do anything if we do get one:
+ return True;
+}
+
+Boolean RTSPClient::handleGET_PARAMETERResponse(char const* parameterName, char*& resultValueString, char* resultValueStringEnd) {
+ do {
+ // If "parameterName" is non-empty, it may be (possibly followed by ':' and whitespace) at the start of the result string:
+ if (parameterName != NULL && parameterName[0] != '\0') {
+ if (parameterName[1] == '\0') break; // sanity check; there should have been \r\n at the end of "parameterName"
+
+ unsigned parameterNameLen = strlen(parameterName);
+ // ASSERT: parameterNameLen >= 2;
+ parameterNameLen -= 2; // because of the trailing \r\n
+ if (resultValueString + parameterNameLen > resultValueStringEnd) break; // not enough space
+ if (parameterNameLen > 0 && _strncasecmp(resultValueString, parameterName, parameterNameLen) == 0) {
+ resultValueString += parameterNameLen;
+ // ASSERT: resultValueString <= resultValueStringEnd
+ if (resultValueString == resultValueStringEnd) break;
+
+ if (resultValueString[0] == ':') ++resultValueString;
+ while (resultValueString < resultValueStringEnd
+ && (resultValueString[0] == ' ' || resultValueString[0] == '\t')) {
+ ++resultValueString;
+ }
+ }
+ }
+
+ // The rest of "resultValueStr" should be our desired result, but first trim off any \r and/or \n characters at the end:
+ char saved = *resultValueStringEnd;
+ *resultValueStringEnd = '\0';
+ unsigned resultLen = strlen(resultValueString);
+ *resultValueStringEnd = saved;
+
+ while (resultLen > 0 && (resultValueString[resultLen-1] == '\r' || resultValueString[resultLen-1] == '\n')) --resultLen;
+ resultValueString[resultLen] = '\0';
+
+ return True;
+ } while (0);
+
+ // An error occurred:
+ envir().setResultMsg("Bad \"GET_PARAMETER\" response");
+ return False;
+}
+
+Boolean RTSPClient::handleAuthenticationFailure(char const* paramsStr) {
+ if (paramsStr == NULL) return False; // There was no "WWW-Authenticate:" header; we can't proceed.
+
+ // Fill in "fCurrentAuthenticator" with the information from the "WWW-Authenticate:" header:
+ Boolean realmHasChanged = False; // by default
+ Boolean isStale = False; // by default
+ char* realm = strDupSize(paramsStr);
+ char* nonce = strDupSize(paramsStr);
+ char* stale = strDupSize(paramsStr);
+ Boolean success = True;
+ if (sscanf(paramsStr, "Digest realm=\"%[^\"]\", nonce=\"%[^\"]\", stale=%[a-zA-Z]", realm, nonce, stale) == 3) {
+ realmHasChanged = fCurrentAuthenticator.realm() == NULL || strcmp(fCurrentAuthenticator.realm(), realm) != 0;
+ isStale = _strncasecmp(stale, "true", 4) == 0;
+ fCurrentAuthenticator.setRealmAndNonce(realm, nonce);
+ } else if (sscanf(paramsStr, "Digest realm=\"%[^\"]\", nonce=\"%[^\"]\"", realm, nonce) == 2) {
+ realmHasChanged = fCurrentAuthenticator.realm() == NULL || strcmp(fCurrentAuthenticator.realm(), realm) != 0;
+ fCurrentAuthenticator.setRealmAndNonce(realm, nonce);
+ } else if (sscanf(paramsStr, "Basic realm=\"%[^\"]\"", realm) == 1 && fAllowBasicAuthentication) {
+ realmHasChanged = fCurrentAuthenticator.realm() == NULL || strcmp(fCurrentAuthenticator.realm(), realm) != 0;
+ fCurrentAuthenticator.setRealmAndNonce(realm, NULL); // Basic authentication
+ } else {
+ success = False; // bad "WWW-Authenticate:" header
+ }
+ delete[] realm; delete[] nonce; delete[] stale;
+
+ if (success) {
+ if ((!realmHasChanged && !isStale) || fCurrentAuthenticator.username() == NULL || fCurrentAuthenticator.password() == NULL) {
+ // We already tried with the same realm (and a non-stale nonce),
+ // or don't have a username and/or password, so the new "WWW-Authenticate:" header
+ // information won't help us. We remain unauthenticated.
+ success = False;
+ }
+ }
+
+ return success;
+}
+
+Boolean RTSPClient::resendCommand(RequestRecord* request) {
+ if (fVerbosityLevel >= 1) envir() << "Resending...\n";
+ if (request != NULL && strcmp(request->commandName(), "GET") != 0) request->cseq() = ++fCSeq;
+ return sendRequest(request) != 0;
+}
+
+char const* RTSPClient::sessionURL(MediaSession const& session) const {
+ char const* url = session.controlPath();
+ if (url == NULL || strcmp(url, "*") == 0) url = fBaseURL;
+
+ return url;
+}
+
+void RTSPClient::handleAlternativeRequestByte(void* rtspClient, u_int8_t requestByte) {
+ ((RTSPClient*)rtspClient)->handleAlternativeRequestByte1(requestByte);
+}
+
+void RTSPClient::handleAlternativeRequestByte1(u_int8_t requestByte) {
+ if (requestByte == 0xFF) {
+ // Hack: The new handler of the input TCP socket encountered an error reading it. Indicate this:
+ handleResponseBytes(-1);
+ } else if (requestByte == 0xFE) {
+ // Another hack: The new handler of the input TCP socket no longer needs it, so take back control:
+ envir().taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION,
+ (TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this);
+ } else {
+ // Normal case:
+ fResponseBuffer[fResponseBytesAlreadySeen] = requestByte;
+ handleResponseBytes(1);
+ }
+}
+
+static Boolean isAbsoluteURL(char const* url) {
+ // Assumption: "url" is absolute if it contains a ':', before any
+ // occurrence of '/'
+ while (*url != '\0' && *url != '/') {
+ if (*url == ':') return True;
+ ++url;
+ }
+
+ return False;
+}
+
+void RTSPClient::constructSubsessionURL(MediaSubsession const& subsession,
+ char const*& prefix,
+ char const*& separator,
+ char const*& suffix) {
+ // Figure out what the URL describing "subsession" will look like.
+ // The URL is returned in three parts: prefix; separator; suffix
+ //##### NOTE: This code doesn't really do the right thing if "sessionURL()"
+ // doesn't end with a "/", and "subsession.controlPath()" is relative.
+ // The right thing would have been to truncate "sessionURL()" back to the
+ // rightmost "/", and then add "subsession.controlPath()".
+ // In practice, though, each "DESCRIBE" response typically contains
+ // a "Content-Base:" header that consists of "sessionURL()" followed by
+ // a "/", in which case this code ends up giving the correct result.
+ // However, we should really fix this code to do the right thing, and
+ // also check for and use the "Content-Base:" header appropriately. #####
+ prefix = sessionURL(subsession.parentSession());
+ if (prefix == NULL) prefix = "";
+
+ suffix = subsession.controlPath();
+ if (suffix == NULL) suffix = "";
+
+ if (isAbsoluteURL(suffix)) {
+ prefix = separator = "";
+ } else {
+ unsigned prefixLen = strlen(prefix);
+ separator = (prefixLen == 0 || prefix[prefixLen-1] == '/' || suffix[0] == '/') ? "" : "/";
+ }
+}
+
+Boolean RTSPClient::setupHTTPTunneling1() {
+ // Set up RTSP-over-HTTP tunneling, as described in
+ // http://mirror.informatimago.com/next/developer.apple.com/quicktime/icefloe/dispatch028.html
+ // and http://images.apple.com/br/quicktime/pdf/QTSS_Modules.pdf
+ if (fVerbosityLevel >= 1) {
+ envir() << "Requesting RTSP-over-HTTP tunneling (on port " << fTunnelOverHTTPPortNum << ")\n\n";
+ }
+
+ // Begin by sending a HTTP "GET", to set up the server->client link. Continue when we handle the response:
+ return sendRequest(new RequestRecord(1, "GET", responseHandlerForHTTP_GET)) != 0;
+}
+
+void RTSPClient::responseHandlerForHTTP_GET(RTSPClient* rtspClient, int responseCode, char* responseString) {
+ if (rtspClient != NULL) rtspClient->responseHandlerForHTTP_GET1(responseCode, responseString);
+}
+
+void RTSPClient::responseHandlerForHTTP_GET1(int responseCode, char* responseString) {
+ RequestRecord* request;
+ do {
+ delete[] responseString; // we don't need it (but are responsible for deleting it)
+ if (responseCode != 0) break; // The HTTP "GET" failed.
+
+ // Having successfully set up (using the HTTP "GET" command) the server->client link, set up a second TCP connection
+ // (to the same server & port as before) for the client->server link. All future output will be to this new socket.
+ fOutputSocketNum = setupStreamSocket(envir(), 0);
+ if (fOutputSocketNum < 0) break;
+ ignoreSigPipeOnSocket(fOutputSocketNum); // so that servers on the same host that killed don't also kill us
+
+ fHTTPTunnelingConnectionIsPending = True;
+ int connectResult = connectToServer(fOutputSocketNum, fTunnelOverHTTPPortNum);
+ if (connectResult < 0) break; // an error occurred
+ else if (connectResult == 0) {
+ // A connection is pending. Continue setting up RTSP-over-HTTP when the connection completes.
+ // First, move the pending requests to the 'awaiting connection' queue:
+ while ((request = fRequestsAwaitingHTTPTunneling.dequeue()) != NULL) {
+ fRequestsAwaitingConnection.enqueue(request);
+ }
+ return;
+ }
+
+ // The connection succeeded. Continue setting up RTSP-over-HTTP:
+ if (!setupHTTPTunneling2()) break;
+
+ // RTSP-over-HTTP tunneling succeeded. Resume the pending request(s):
+ while ((request = fRequestsAwaitingHTTPTunneling.dequeue()) != NULL) {
+ sendRequest(request);
+ }
+ return;
+ } while (0);
+
+ // An error occurred. Dequeue the pending request(s), and tell them about the error:
+ fHTTPTunnelingConnectionIsPending = False;
+ resetTCPSockets(); // do this now, in case an error handler deletes "this"
+ RequestQueue requestQueue(fRequestsAwaitingHTTPTunneling);
+ while ((request = requestQueue.dequeue()) != NULL) {
+ handleRequestError(request);
+ delete request;
+ }
+}
+
+Boolean RTSPClient::setupHTTPTunneling2() {
+ fHTTPTunnelingConnectionIsPending = False;
+
+ // Send a HTTP "POST", to set up the client->server link. (Note that we won't see a reply to the "POST".)
+ return sendRequest(new RequestRecord(1, "POST", NULL)) != 0;
+}
+
+void RTSPClient::connectionHandler(void* instance, int /*mask*/) {
+ RTSPClient* client = (RTSPClient*)instance;
+ client->connectionHandler1();
+}
+
+void RTSPClient::connectionHandler1() {
+ // Restore normal handling on our sockets:
+ envir().taskScheduler().disableBackgroundHandling(fOutputSocketNum);
+ envir().taskScheduler().setBackgroundHandling(fInputSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION,
+ (TaskScheduler::BackgroundHandlerProc*)&incomingDataHandler, this);
+
+ // Move all requests awaiting connection into a new, temporary queue, to clear "fRequestsAwaitingConnection"
+ // (so that "sendRequest()" doesn't get confused by "fRequestsAwaitingConnection" being nonempty, and enqueue them all over again).
+ RequestQueue tmpRequestQueue(fRequestsAwaitingConnection);
+ RequestRecord* request;
+
+ // Find out whether the connection succeeded or failed:
+ do {
+ int err = 0;
+ SOCKLEN_T len = sizeof err;
+ if (getsockopt(fInputSocketNum, SOL_SOCKET, SO_ERROR, (char*)&err, &len) < 0 || err != 0) {
+ envir().setResultErrMsg("Connection to server failed: ", err);
+ if (fVerbosityLevel >= 1) envir() << "..." << envir().getResultMsg() << "\n";
+ break;
+ }
+
+ // The connection succeeded. If the connection came about from an attempt to set up RTSP-over-HTTP, finish this now:
+ if (fHTTPTunnelingConnectionIsPending && !setupHTTPTunneling2()) break;
+
+ if (fTLS.isNeeded) {
+ // We need to complete an additional TLS connection:
+ int tlsConnectResult = fTLS.connect(fInputSocketNum);
+ if (tlsConnectResult < 0) break; // error in TLS connection
+ if (tlsConnectResult > 0 && fVerbosityLevel >= 1) envir() << "...TLS connection completed\n";
+ if (tlsConnectResult == 0) {
+ // The connection is still pending. Continue deferring...
+ while ((request = tmpRequestQueue.dequeue()) != NULL) {
+ fRequestsAwaitingConnection.enqueue(request);
+ }
+ return;
+ }
+ }
+
+ // The connection is complete. Resume sending all pending requests:
+ if (fVerbosityLevel >= 1) envir() << "...remote connection opened\n";
+ while ((request = tmpRequestQueue.dequeue()) != NULL) {
+ sendRequest(request);
+ }
+ return;
+ } while (0);
+
+ // An error occurred. Tell all pending requests about the error:
+ resetTCPSockets(); // do this now, in case an error handler deletes "this"
+ while ((request = tmpRequestQueue.dequeue()) != NULL) {
+ handleRequestError(request);
+ delete request;
+ }
+}
+
+void RTSPClient::incomingDataHandler(void* instance, int /*mask*/) {
+ RTSPClient* client = (RTSPClient*)instance;
+ client->incomingDataHandler1();
+}
+
+void RTSPClient::incomingDataHandler1() {
+ int bytesRead = read((u_int8_t*)&fResponseBuffer[fResponseBytesAlreadySeen], fResponseBufferBytesLeft);
+ handleResponseBytes(bytesRead);
+}
+
+static char* getLine(char* startOfLine) {
+ // returns the start of the next line, or NULL if none. Note that this modifies the input string to add '\0' characters.
+ for (char* ptr = startOfLine; *ptr != '\0'; ++ptr) {
+ // Check for the end of line: \r\n (but also accept \r or \n by itself):
+ if (*ptr == '\r' || *ptr == '\n') {
+ // We found the end of the line
+ if (*ptr == '\r') {
+ *ptr++ = '\0';
+ if (*ptr == '\n') ++ptr;
+ } else {
+ *ptr++ = '\0';
+ }
+ return ptr;
+ }
+ }
+
+ return NULL;
+}
+
+void RTSPClient::handleResponseBytes(int newBytesRead) {
+ do {
+ if (newBytesRead >= 0 && (unsigned)newBytesRead < fResponseBufferBytesLeft) break; // data was read OK; process it below
+
+ if (newBytesRead >= (int)fResponseBufferBytesLeft) {
+ // We filled up our response buffer. Treat this as an error (for the first response handler):
+ envir().setResultMsg("RTSP response was truncated. Increase \"RTSPClient::responseBufferSize\"");
+ }
+
+ // An error occurred while reading our TCP socket. Call all pending response handlers, indicating this error.
+ // (However, the "RTSP response was truncated" error is applied to the first response handler only.)
+ resetResponseBuffer();
+ RequestRecord* request;
+ if (newBytesRead > 0) { // The "RTSP response was truncated" error
+ if ((request = fRequestsAwaitingResponse.dequeue()) != NULL) {
+ handleRequestError(request);
+ delete request;
+ }
+ } else {
+ RequestQueue requestQueue(fRequestsAwaitingResponse);
+ resetTCPSockets(); // do this now, in case an error handler deletes "this"
+
+ while ((request = requestQueue.dequeue()) != NULL) {
+ handleRequestError(request);
+ delete request;
+ }
+ }
+ return;
+ } while (0);
+
+ fResponseBufferBytesLeft -= newBytesRead;
+ fResponseBytesAlreadySeen += newBytesRead;
+ fResponseBuffer[fResponseBytesAlreadySeen] = '\0';
+ if (fVerbosityLevel >= 1 && newBytesRead > 1) envir() << "Received " << newBytesRead << " new bytes of response data.\n";
+
+ unsigned numExtraBytesAfterResponse = 0;
+ Boolean responseSuccess = False; // by default
+ do {
+ // Data was read OK. Look through the data that we've read so far, to see if it contains <CR><LF><CR><LF>.
+ // (If not, wait for more data to arrive.)
+ Boolean endOfHeaders = False;
+ char const* ptr = fResponseBuffer;
+ if (fResponseBytesAlreadySeen > 3) {
+ char const* const ptrEnd = &fResponseBuffer[fResponseBytesAlreadySeen-3];
+ while (ptr < ptrEnd) {
+ if (*ptr++ == '\r' && *ptr++ == '\n' && *ptr++ == '\r' && *ptr++ == '\n') {
+ // This is it
+ endOfHeaders = True;
+ break;
+ }
+ }
+ }
+
+ if (!endOfHeaders) return; // subsequent reads will be needed to get the complete response
+
+ // Now that we have the complete response headers (ending with <CR><LF><CR><LF>), parse them to get the response code, CSeq,
+ // and various other header parameters. To do this, we first make a copy of the received header data, because we'll be
+ // modifying it by adding '\0' bytes.
+ char* headerDataCopy;
+ unsigned responseCode = 200;
+ char const* responseStr = NULL;
+ RequestRecord* foundRequest = NULL;
+ char const* sessionParamsStr = NULL;
+ char const* transportParamsStr = NULL;
+ char const* scaleParamsStr = NULL;
+ char const* speedParamsStr = NULL;
+ char const* rangeParamsStr = NULL;
+ char const* rtpInfoParamsStr = NULL;
+ char const* wwwAuthenticateParamsStr = NULL;
+ char const* publicParamsStr = NULL;
+ char* bodyStart = NULL;
+ unsigned numBodyBytes = 0;
+ responseSuccess = False;
+ do {
+ headerDataCopy = new char[responseBufferSize];
+ strncpy(headerDataCopy, fResponseBuffer, fResponseBytesAlreadySeen);
+ headerDataCopy[fResponseBytesAlreadySeen] = '\0';
+
+ char* lineStart;
+ char* nextLineStart = headerDataCopy;
+ do {
+ lineStart = nextLineStart;
+ nextLineStart = getLine(lineStart);
+ } while (lineStart[0] == '\0' && nextLineStart != NULL); // skip over any blank lines at the start
+ if (!parseResponseCode(lineStart, responseCode, responseStr)) {
+ // This does not appear to be a RTSP response; perhaps it's a RTSP request instead?
+ handleIncomingRequest();
+ break; // we're done with this data
+ }
+
+ // Scan through the headers, handling the ones that we're interested in:
+ Boolean reachedEndOfHeaders;
+ unsigned cseq = 0;
+ unsigned contentLength = 0;
+
+ while (1) {
+ reachedEndOfHeaders = True; // by default; may get changed below
+ lineStart = nextLineStart;
+ if (lineStart == NULL) break;
+
+ nextLineStart = getLine(lineStart);
+ if (lineStart[0] == '\0') break; // this is a blank line
+ reachedEndOfHeaders = False;
+
+ char const* headerParamsStr;
+ if (checkForHeader(lineStart, "CSeq:", 5, headerParamsStr)) {
+ if (sscanf(headerParamsStr, "%u", &cseq) != 1 || cseq <= 0) {
+ envir().setResultMsg("Bad \"CSeq:\" header: \"", lineStart, "\"");
+ break;
+ }
+ // Find the handler function for "cseq":
+ RequestRecord* request;
+ while ((request = fRequestsAwaitingResponse.dequeue()) != NULL) {
+ if (request->cseq() < cseq) { // assumes that the CSeq counter will never wrap around
+ // We never received (and will never receive) a response for this handler, so delete it:
+ if (fVerbosityLevel >= 1 && strcmp(request->commandName(), "POST") != 0) {
+ envir() << "WARNING: The server did not respond to our \"" << request->commandName() << "\" request (CSeq: "
+ << request->cseq() << "). The server appears to be buggy (perhaps not handling pipelined requests properly).\n";
+ }
+ delete request;
+ } else if (request->cseq() == cseq) {
+ // This is the handler that we want. Remove its record, but remember it, so that we can later call its handler:
+ foundRequest = request;
+ break;
+ } else { // request->cseq() > cseq
+ // No handler was registered for this response, so ignore it.
+ break;
+ }
+ }
+ } else if (checkForHeader(lineStart, "Content-Length:", 15, headerParamsStr)) {
+ if (sscanf(headerParamsStr, "%u", &contentLength) != 1) {
+ envir().setResultMsg("Bad \"Content-Length:\" header: \"", lineStart, "\"");
+ break;
+ }
+ } else if (checkForHeader(lineStart, "Content-Base:", 13, headerParamsStr)) {
+ setBaseURL(headerParamsStr);
+ } else if (checkForHeader(lineStart, "Session:", 8, sessionParamsStr)) {
+ } else if (checkForHeader(lineStart, "Transport:", 10, transportParamsStr)) {
+ } else if (checkForHeader(lineStart, "Scale:", 6, scaleParamsStr)) {
+ } else if (checkForHeader(lineStart, "Speed:",
+// NOTE: Should you feel the need to modify this code,
+ 6,
+// please first email the "live-devel" mailing list
+ speedParamsStr
+// (see http://live555.com/liveMedia/faq.html#mailing-list-address for details),
+ )) {
+// to check whether your proposed modification is appropriate/correct,
+ } else if (checkForHeader(lineStart, "Range:",
+// and, if so, whether instead it could be included in
+ 6,
+// a future release of the "LIVE555 Streaming Media" software,
+ rangeParamsStr
+// so that other projects that use the code could benefit (not just your own project).
+ )) {
+ } else if (checkForHeader(lineStart, "RTP-Info:", 9, rtpInfoParamsStr)) {
+ } else if (checkForHeader(lineStart, "WWW-Authenticate:", 17, headerParamsStr)) {
+ // If we've already seen a "WWW-Authenticate:" header, then we replace it with this new one only if
+ // the new one specifies "Digest" authentication:
+ if (wwwAuthenticateParamsStr == NULL || _strncasecmp(headerParamsStr, "Digest", 6) == 0) {
+ wwwAuthenticateParamsStr = headerParamsStr;
+ }
+ } else if (checkForHeader(lineStart, "Public:", 7, publicParamsStr)) {
+ } else if (checkForHeader(lineStart, "Allow:", 6, publicParamsStr)) {
+ // Note: we accept "Allow:" instead of "Public:", so that "OPTIONS" requests made to HTTP servers will work.
+ } else if (checkForHeader(lineStart, "Location:", 9, headerParamsStr)) {
+ setBaseURL(headerParamsStr);
+ } else if (checkForHeader(lineStart, "com.ses.streamID:", 17, headerParamsStr)) {
+ // Replace the tail of the 'base URL' with the value of this header parameter:
+ char* oldBaseURLTail = strrchr(fBaseURL, '/');
+ if (oldBaseURLTail != NULL) {
+ unsigned newBaseURLLen
+ = (oldBaseURLTail - fBaseURL) + 8/* for "/stream=" */ + strlen(headerParamsStr);
+ char* newBaseURL = new char[newBaseURLLen + 1];
+ // Note: We couldn't use "asprintf()", because some compilers don't support it
+ sprintf(newBaseURL, "%.*s/stream=%s",
+ (int)(oldBaseURLTail - fBaseURL), fBaseURL, headerParamsStr);
+ setBaseURL(newBaseURL);
+ delete[] newBaseURL;
+ }
+ } else if (checkForHeader(lineStart, "Connection:", 11, headerParamsStr)) {
+ if (fTunnelOverHTTPPortNum == 0 && _strncasecmp(headerParamsStr, "Close", 5) == 0) {
+ resetTCPSockets();
+ }
+ }
+ }
+ if (!reachedEndOfHeaders) break; // an error occurred
+
+ if (foundRequest == NULL) {
+ // Hack: The response didn't have a "CSeq:" header; assume it's for our most recent request:
+ foundRequest = fRequestsAwaitingResponse.dequeue();
+ }
+
+ // If we saw a "Content-Length:" header, then make sure that we have the amount of data that it specified:
+ unsigned bodyOffset = nextLineStart == NULL ? fResponseBytesAlreadySeen : nextLineStart - headerDataCopy;
+ bodyStart = &fResponseBuffer[bodyOffset];
+ numBodyBytes = fResponseBytesAlreadySeen - bodyOffset;
+ if (contentLength > numBodyBytes) {
+ // We need to read more data. First, make sure we have enough space for it:
+ unsigned numExtraBytesNeeded = contentLength - numBodyBytes;
+ unsigned remainingBufferSize = responseBufferSize - fResponseBytesAlreadySeen;
+ if (numExtraBytesNeeded > remainingBufferSize) {
+ char tmpBuf[200];
+ sprintf(tmpBuf, "Response buffer size (%d) is too small for \"Content-Length:\" %d (need a buffer size of >= %d bytes\n",
+ responseBufferSize, contentLength, fResponseBytesAlreadySeen + numExtraBytesNeeded);
+ envir().setResultMsg(tmpBuf);
+ break;
+ }
+
+ if (fVerbosityLevel >= 1) {
+ envir() << "Have received " << fResponseBytesAlreadySeen << " total bytes of a "
+ << (foundRequest != NULL ? foundRequest->commandName() : "(unknown)")
+ << " RTSP response; awaiting " << numExtraBytesNeeded << " bytes more.\n";
+ }
+ delete[] headerDataCopy;
+ if (foundRequest != NULL) fRequestsAwaitingResponse.putAtHead(foundRequest);// put our request record back; we need it again
+ return; // We need to read more data
+ }
+
+ // We now have a complete response (including all bytes specified by the "Content-Length:" header, if any).
+ char* responseEnd = bodyStart + contentLength;
+ numExtraBytesAfterResponse = &fResponseBuffer[fResponseBytesAlreadySeen] - responseEnd;
+
+ if (fVerbosityLevel >= 1) {
+ char saved = *responseEnd;
+ *responseEnd = '\0';
+ envir() << "Received a complete "
+ << (foundRequest != NULL ? foundRequest->commandName() : "(unknown)")
+ << " response:\n" << fResponseBuffer << "\n";
+ if (numExtraBytesAfterResponse > 0) envir() << "\t(plus " << numExtraBytesAfterResponse << " additional bytes)\n";
+ *responseEnd = saved;
+ }
+
+ if (foundRequest != NULL) {
+ Boolean needToResendCommand = False; // by default...
+ if (responseCode == 200) {
+ // Do special-case response handling for some commands:
+ if (strcmp(foundRequest->commandName(), "SETUP") == 0) {
+ if (!handleSETUPResponse(*foundRequest->subsession(), sessionParamsStr, transportParamsStr, foundRequest->booleanFlags()&0x1)) break;
+ } else if (strcmp(foundRequest->commandName(), "PLAY") == 0) {
+ if (!handlePLAYResponse(foundRequest->session(), foundRequest->subsession(), scaleParamsStr, speedParamsStr, rangeParamsStr, rtpInfoParamsStr)) break;
+ } else if (strcmp(foundRequest->commandName(), "TEARDOWN") == 0) {
+ if (!handleTEARDOWNResponse(*foundRequest->session(), *foundRequest->subsession())) break;
+ } else if (strcmp(foundRequest->commandName(), "GET_PARAMETER") == 0) {
+ if (!handleGET_PARAMETERResponse(foundRequest->contentStr(), bodyStart, responseEnd)) break;
+ }
+ } else if (responseCode == 401 && handleAuthenticationFailure(wwwAuthenticateParamsStr)) {
+ // We need to resend the command, with an "Authorization:" header:
+ needToResendCommand = True;
+
+ if (strcmp(foundRequest->commandName(), "GET") == 0) {
+ // Note: If a HTTP "GET" command (for RTSP-over-HTTP tunneling) returns "401 Unauthorized", then we resend it
+ // (with an "Authorization:" header), just as we would for a RTSP command. However, we do so using a new TCP connection,
+ // because some servers close the original connection after returning the "401 Unauthorized".
+ resetTCPSockets(); // forces the opening of a new connection for the resent command
+ }
+ } else if (responseCode == 301 || responseCode == 302) { // redirection
+ resetTCPSockets(); // because we need to connect somewhere else next
+ needToResendCommand = True;
+ }
+
+ if (needToResendCommand) {
+ resetResponseBuffer();
+ (void)resendCommand(foundRequest);
+ delete[] headerDataCopy;
+ return; // without calling our response handler; the response to the resent command will do that
+ }
+ }
+
+ responseSuccess = True;
+ } while (0);
+
+ // If we have a handler function for this response, call it.
+ // But first, reset our response buffer, in case the handler goes to the event loop, and we end up getting called recursively:
+ if (numExtraBytesAfterResponse > 0) {
+ // An unusual case; usually due to having received pipelined responses. Move the extra bytes to the front of the buffer:
+ char* responseEnd = &fResponseBuffer[fResponseBytesAlreadySeen - numExtraBytesAfterResponse];
+
+ // But first: A hack to save a copy of the response 'body', in case it's needed below for "resultString":
+ numBodyBytes -= numExtraBytesAfterResponse;
+ if (numBodyBytes > 0) {
+ char saved = *responseEnd;
+ *responseEnd = '\0';
+ bodyStart = strDup(bodyStart);
+ *responseEnd = saved;
+ }
+
+ memmove(fResponseBuffer, responseEnd, numExtraBytesAfterResponse);
+ fResponseBytesAlreadySeen = numExtraBytesAfterResponse;
+ fResponseBufferBytesLeft = responseBufferSize - numExtraBytesAfterResponse;
+ fResponseBuffer[numExtraBytesAfterResponse] = '\0';
+ } else {
+ resetResponseBuffer();
+ }
+ if (foundRequest != NULL && foundRequest->handler() != NULL) {
+ int resultCode;
+ char* resultString;
+ if (responseSuccess) {
+ if (responseCode == 200) {
+ resultCode = 0;
+ resultString = numBodyBytes > 0 ? strDup(bodyStart) : strDup(publicParamsStr);
+ // Note: The "strDup(bodyStart)" call assumes that the body is encoded without interior '\0' bytes
+ } else {
+ resultCode = responseCode;
+ resultString = strDup(responseStr);
+ envir().setResultMsg(responseStr);
+ }
+ (*foundRequest->handler())(this, resultCode, resultString);
+ } else {
+ // An error occurred parsing the response, so call the handler, indicating an error:
+ handleRequestError(foundRequest);
+ }
+ }
+ delete foundRequest;
+ delete[] headerDataCopy;
+ if (numExtraBytesAfterResponse > 0 && numBodyBytes > 0) delete[] bodyStart;
+ } while (numExtraBytesAfterResponse > 0 && responseSuccess);
+}
+
+int RTSPClient::write(const char* data, unsigned count) {
+ if (fTLS.isNeeded) {
+ return fTLS.write(data, count);
+ } else {
+ return send(fOutputSocketNum, data, count, 0);
+ }
+}
+
+int RTSPClient::read(u_int8_t* buffer, unsigned bufferSize) {
+ if (fTLS.isNeeded) {
+ return fTLS.read(buffer, bufferSize);
+ } else {
+ struct sockaddr_in dummy; // 'from' address - not used
+ return readSocket(envir(), fInputSocketNum, buffer, bufferSize, dummy);
+ }
+}
+
+
+////////// RTSPClient::RequestRecord implementation //////////
+
+RTSPClient::RequestRecord::RequestRecord(unsigned cseq, char const* commandName, responseHandler* handler,
+ MediaSession* session, MediaSubsession* subsession, u_int32_t booleanFlags,
+ double start, double end, float scale, char const* contentStr)
+ : fNext(NULL), fCSeq(cseq), fCommandName(commandName), fSession(session), fSubsession(subsession), fBooleanFlags(booleanFlags),
+ fStart(start), fEnd(end), fAbsStartTime(NULL), fAbsEndTime(NULL), fScale(scale), fContentStr(strDup(contentStr)), fHandler(handler) {
+}
+
+RTSPClient::RequestRecord::RequestRecord(unsigned cseq, responseHandler* handler,
+ char const* absStartTime, char const* absEndTime, float scale,
+ MediaSession* session, MediaSubsession* subsession)
+ : fNext(NULL), fCSeq(cseq), fCommandName("PLAY"), fSession(session), fSubsession(subsession), fBooleanFlags(0),
+ fStart(0.0f), fEnd(-1.0f), fAbsStartTime(strDup(absStartTime)), fAbsEndTime(strDup(absEndTime)), fScale(scale),
+ fContentStr(NULL), fHandler(handler) {
+}
+
+RTSPClient::RequestRecord::~RequestRecord() {
+ // Delete the rest of the list first:
+ delete fNext;
+
+ delete[] fAbsStartTime; delete[] fAbsEndTime;
+ delete[] fContentStr;
+}
+
+
+////////// RTSPClient::RequestQueue implementation //////////
+
+RTSPClient::RequestQueue::RequestQueue()
+ : fHead(NULL), fTail(NULL) {
+}
+
+RTSPClient::RequestQueue::RequestQueue(RequestQueue& origQueue)
+ : fHead(NULL), fTail(NULL) {
+ RequestRecord* request;
+ while ((request = origQueue.dequeue()) != NULL) {
+ enqueue(request);
+ }
+}
+
+RTSPClient::RequestQueue::~RequestQueue() {
+ reset();
+}
+
+void RTSPClient::RequestQueue::enqueue(RequestRecord* request) {
+ if (fTail == NULL) {
+ fHead = request;
+ } else {
+ fTail->next() = request;
+ }
+ fTail = request;
+}
+
+RTSPClient::RequestRecord* RTSPClient::RequestQueue::dequeue() {
+ RequestRecord* request = fHead;
+ if (fHead == fTail) {
+ fHead = NULL;
+ fTail = NULL;
+ } else {
+ fHead = fHead->next();
+ }
+ if (request != NULL) request->next() = NULL;
+ return request;
+}
+
+void RTSPClient::RequestQueue::putAtHead(RequestRecord* request) {
+ request->next() = fHead;
+ fHead = request;
+ if (fTail == NULL) {
+ fTail = request;
+ }
+}
+
+RTSPClient::RequestRecord* RTSPClient::RequestQueue::findByCSeq(unsigned cseq) {
+ RequestRecord* request;
+ for (request = fHead; request != NULL; request = request->next()) {
+ if (request->cseq() == cseq) return request;
+ }
+ return NULL;
+}
+
+void RTSPClient::RequestQueue::reset() {
+ delete fHead;
+ fHead = fTail = NULL;
+}
+
+
+#ifndef OMIT_REGISTER_HANDLING
+////////// HandlerServerForREGISTERCommand implementation /////////
+
+HandlerServerForREGISTERCommand* HandlerServerForREGISTERCommand
+::createNew(UsageEnvironment& env, onRTSPClientCreationFunc* creationFunc, Port ourPort,
+ UserAuthenticationDatabase* authDatabase, int verbosityLevel, char const* applicationName) {
+ int ourSocket = setUpOurSocket(env, ourPort);
+ if (ourSocket == -1) return NULL;
+
+ return new HandlerServerForREGISTERCommand(env, creationFunc, ourSocket, ourPort, authDatabase, verbosityLevel, applicationName);
+}
+
+HandlerServerForREGISTERCommand
+::HandlerServerForREGISTERCommand(UsageEnvironment& env, onRTSPClientCreationFunc* creationFunc, int ourSocket, Port ourPort,
+ UserAuthenticationDatabase* authDatabase, int verbosityLevel, char const* applicationName)
+ : RTSPServer(env, ourSocket, ourPort, authDatabase, 30/*small reclamationTestSeconds*/),
+ fCreationFunc(creationFunc), fVerbosityLevel(verbosityLevel), fApplicationName(strDup(applicationName)) {
+}
+
+HandlerServerForREGISTERCommand::~HandlerServerForREGISTERCommand() {
+ delete[] fApplicationName;
+}
+
+RTSPClient* HandlerServerForREGISTERCommand
+::createNewRTSPClient(char const* rtspURL, int verbosityLevel, char const* applicationName, int socketNumToServer) {
+ // Default implementation: create a basic "RTSPClient":
+ return RTSPClient::createNew(envir(), rtspURL, verbosityLevel, applicationName, 0, socketNumToServer);
+}
+
+char const* HandlerServerForREGISTERCommand::allowedCommandNames() {
+ return "OPTIONS, REGISTER";
+}
+
+Boolean HandlerServerForREGISTERCommand
+::weImplementREGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* /*proxyURLSuffix*/, char*& responseStr) {
+ responseStr = NULL;
+ // By default, we implement only "REGISTER"; not "DEREGISTER". Subclass to implement "DEREGISTER"
+ return strcmp(cmd, "REGISTER") == 0;
+}
+
+void HandlerServerForREGISTERCommand
+::implementCmd_REGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* url, char const* urlSuffix, int socketToRemoteServer,
+ Boolean deliverViaTCP, char const* /*proxyURLSuffix*/) {
+ if (strcmp(cmd, "REGISTER") == 0) { // By default, we don't implement "DEREGISTER"
+ // Create a new "RTSPClient" object, and call our 'creation function' with it:
+ RTSPClient* newRTSPClient = createNewRTSPClient(url, fVerbosityLevel, fApplicationName, socketToRemoteServer);
+
+ if (fCreationFunc != NULL) (*fCreationFunc)(newRTSPClient, deliverViaTCP);
+ }
+}
+#endif
diff --git a/liveMedia/RTSPCommon.cpp b/liveMedia/RTSPCommon.cpp
new file mode 100644
index 0000000..c7dde82
--- /dev/null
+++ b/liveMedia/RTSPCommon.cpp
@@ -0,0 +1,374 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Common routines used by both RTSP clients and servers
+// Implementation
+
+#include "RTSPCommon.hh"
+#include "Locale.hh"
+#include <string.h>
+#include <stdio.h>
+#include <ctype.h> // for "isxdigit()
+#include <time.h> // for "strftime()" and "gmtime()"
+
+static void decodeURL(char* url) {
+ // Replace (in place) any %<hex><hex> sequences with the appropriate 8-bit character.
+ char* cursor = url;
+ while (*cursor) {
+ if ((cursor[0] == '%') &&
+ cursor[1] && isxdigit(cursor[1]) &&
+ cursor[2] && isxdigit(cursor[2])) {
+ // We saw a % followed by 2 hex digits, so we copy the literal hex value into the URL, then advance the cursor past it:
+ char hex[3];
+ hex[0] = cursor[1];
+ hex[1] = cursor[2];
+ hex[2] = '\0';
+ *url++ = (char)strtol(hex, NULL, 16);
+ cursor += 3;
+ } else {
+ // Common case: This is a normal character or a bogus % expression, so just copy it
+ *url++ = *cursor++;
+ }
+ }
+
+ *url = '\0';
+}
+
+Boolean parseRTSPRequestString(char const* reqStr,
+ unsigned reqStrSize,
+ char* resultCmdName,
+ unsigned resultCmdNameMaxSize,
+ char* resultURLPreSuffix,
+ unsigned resultURLPreSuffixMaxSize,
+ char* resultURLSuffix,
+ unsigned resultURLSuffixMaxSize,
+ char* resultCSeq,
+ unsigned resultCSeqMaxSize,
+ char* resultSessionIdStr,
+ unsigned resultSessionIdStrMaxSize,
+ unsigned& contentLength) {
+ // This parser is currently rather dumb; it should be made smarter #####
+
+ // "Be liberal in what you accept": Skip over any whitespace at the start of the request:
+ unsigned i;
+ for (i = 0; i < reqStrSize; ++i) {
+ char c = reqStr[i];
+ if (!(c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '\0')) break;
+ }
+ if (i == reqStrSize) return False; // The request consisted of nothing but whitespace!
+
+ // Then read everything up to the next space (or tab) as the command name:
+ Boolean parseSucceeded = False;
+ unsigned i1 = 0;
+ for (; i1 < resultCmdNameMaxSize-1 && i < reqStrSize; ++i,++i1) {
+ char c = reqStr[i];
+ if (c == ' ' || c == '\t') {
+ parseSucceeded = True;
+ break;
+ }
+
+ resultCmdName[i1] = c;
+ }
+ resultCmdName[i1] = '\0';
+ if (!parseSucceeded) return False;
+
+ // Skip over the prefix of any "rtsp://" or "rtsp:/" URL that follows:
+ unsigned j = i+1;
+ while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j; // skip over any additional white space
+ for (; (int)j < (int)(reqStrSize-8); ++j) {
+ if ((reqStr[j] == 'r' || reqStr[j] == 'R')
+ && (reqStr[j+1] == 't' || reqStr[j+1] == 'T')
+ && (reqStr[j+2] == 's' || reqStr[j+2] == 'S')
+ && (reqStr[j+3] == 'p' || reqStr[j+3] == 'P')
+ && reqStr[j+4] == ':' && reqStr[j+5] == '/') {
+ j += 6;
+ if (reqStr[j] == '/') {
+ // This is a "rtsp://" URL; skip over the host:port part that follows:
+ ++j;
+ while (j < reqStrSize && reqStr[j] != '/' && reqStr[j] != ' ') ++j;
+ } else {
+ // This is a "rtsp:/" URL; back up to the "/":
+ --j;
+ }
+ i = j;
+ break;
+ }
+ }
+
+ // Look for the URL suffix (before the following "RTSP/"):
+ parseSucceeded = False;
+ for (unsigned k = i+1; (int)k < (int)(reqStrSize-5); ++k) {
+ if (reqStr[k] == 'R' && reqStr[k+1] == 'T' &&
+ reqStr[k+2] == 'S' && reqStr[k+3] == 'P' && reqStr[k+4] == '/') {
+ while (--k >= i && reqStr[k] == ' ') {} // go back over all spaces before "RTSP/"
+ unsigned k1 = k;
+ while (k1 > i && reqStr[k1] != '/') --k1;
+
+ // ASSERT: At this point
+ // i: first space or slash after "host" or "host:port"
+ // k: last non-space before "RTSP/"
+ // k1: last slash in the range [i,k]
+
+ // The URL suffix comes from [k1+1,k]
+ // Copy "resultURLSuffix":
+ unsigned n = 0, k2 = k1+1;
+ if (k2 <= k) {
+ if (k - k1 + 1 > resultURLSuffixMaxSize) return False; // there's no room
+ while (k2 <= k) resultURLSuffix[n++] = reqStr[k2++];
+ }
+ resultURLSuffix[n] = '\0';
+
+ // The URL 'pre-suffix' comes from [i+1,k1-1]
+ // Copy "resultURLPreSuffix":
+ n = 0; k2 = i+1;
+ if (k2+1 <= k1) {
+ if (k1 - i > resultURLPreSuffixMaxSize) return False; // there's no room
+ while (k2 <= k1-1) resultURLPreSuffix[n++] = reqStr[k2++];
+ }
+ resultURLPreSuffix[n] = '\0';
+ decodeURL(resultURLPreSuffix);
+
+ i = k + 7; // to go past " RTSP/"
+ parseSucceeded = True;
+ break;
+ }
+ }
+ if (!parseSucceeded) return False;
+
+ // Look for "CSeq:" (mandatory, case insensitive), skip whitespace,
+ // then read everything up to the next \r or \n as 'CSeq':
+ parseSucceeded = False;
+ for (j = i; (int)j < (int)(reqStrSize-5); ++j) {
+ if (_strncasecmp("CSeq:", &reqStr[j], 5) == 0) {
+ j += 5;
+ while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j;
+ unsigned n;
+ for (n = 0; n < resultCSeqMaxSize-1 && j < reqStrSize; ++n,++j) {
+ char c = reqStr[j];
+ if (c == '\r' || c == '\n') {
+ parseSucceeded = True;
+ break;
+ }
+
+ resultCSeq[n] = c;
+ }
+ resultCSeq[n] = '\0';
+ break;
+ }
+ }
+ if (!parseSucceeded) return False;
+
+ // Look for "Session:" (optional, case insensitive), skip whitespace,
+ // then read everything up to the next \r or \n as 'Session':
+ resultSessionIdStr[0] = '\0'; // default value (empty string)
+ for (j = i; (int)j < (int)(reqStrSize-8); ++j) {
+ if (_strncasecmp("Session:", &reqStr[j], 8) == 0) {
+ j += 8;
+ while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j;
+ unsigned n;
+ for (n = 0; n < resultSessionIdStrMaxSize-1 && j < reqStrSize; ++n,++j) {
+ char c = reqStr[j];
+ if (c == '\r' || c == '\n') {
+ break;
+ }
+
+ resultSessionIdStr[n] = c;
+ }
+ resultSessionIdStr[n] = '\0';
+ break;
+ }
+ }
+
+ // Also: Look for "Content-Length:" (optional, case insensitive)
+ contentLength = 0; // default value
+ for (j = i; (int)j < (int)(reqStrSize-15); ++j) {
+ if (_strncasecmp("Content-Length:", &(reqStr[j]), 15) == 0) {
+ j += 15;
+ while (j < reqStrSize && (reqStr[j] == ' ' || reqStr[j] == '\t')) ++j;
+ unsigned num;
+ if (sscanf(&reqStr[j], "%u", &num) == 1) {
+ contentLength = num;
+ }
+ }
+ }
+ return True;
+}
+
+Boolean parseRangeParam(char const* paramStr,
+ double& rangeStart, double& rangeEnd,
+ char*& absStartTime, char*& absEndTime,
+ Boolean& startTimeIsNow) {
+ delete[] absStartTime; delete[] absEndTime;
+ absStartTime = absEndTime = NULL; // by default, unless "paramStr" is a "clock=..." string
+ startTimeIsNow = False; // by default
+ double start, end;
+ int numCharsMatched1 = 0, numCharsMatched2 = 0, numCharsMatched3 = 0, numCharsMatched4 = 0;
+ int startHour = 0, startMin = 0, endHour = 0, endMin = 0;
+ double startSec = 0.0, endSec = 0.0;
+ Locale l("C", Numeric);
+ if (sscanf(paramStr, "npt = %d:%d:%lf - %d:%d:%lf", &startHour, &startMin, &startSec, &endHour, &endMin, &endSec) == 6) {
+ rangeStart = startHour*3600 + startMin*60 + startSec;
+ rangeEnd = endHour*3600 + endMin*60 + endSec;
+ } else if (sscanf(paramStr, "npt =%lf - %d:%d:%lf", &start, &endHour, &endMin, &endSec) == 4) {
+ rangeStart = start;
+ rangeEnd = endHour*3600 + endMin*60 + endSec;
+ } else if (sscanf(paramStr, "npt = %d:%d:%lf -", &startHour, &startMin, &startSec) == 3) {
+ rangeStart = startHour*3600 + startMin*60 + startSec;
+ rangeEnd = 0.0;
+ } else if (sscanf(paramStr, "npt = %lf - %lf", &start, &end) == 2) {
+ rangeStart = start;
+ rangeEnd = end;
+ } else if (sscanf(paramStr, "npt = %n%lf -", &numCharsMatched1, &start) == 1) {
+ if (paramStr[numCharsMatched1] == '-') {
+ // special case for "npt = -<endtime>", which matches here:
+ rangeStart = 0.0; startTimeIsNow = True;
+ rangeEnd = -start;
+ } else {
+ rangeStart = start;
+ rangeEnd = 0.0;
+ }
+ } else if (sscanf(paramStr, "npt = now - %lf", &end) == 1) {
+ rangeStart = 0.0; startTimeIsNow = True;
+ rangeEnd = end;
+ } else if (sscanf(paramStr, "npt = now -%n", &numCharsMatched2) == 0 && numCharsMatched2 > 0) {
+ rangeStart = 0.0; startTimeIsNow = True;
+ rangeEnd = 0.0;
+ } else if (sscanf(paramStr, "clock = %n", &numCharsMatched3) == 0 && numCharsMatched3 > 0) {
+ rangeStart = rangeEnd = 0.0;
+
+ char const* utcTimes = ¶mStr[numCharsMatched3];
+ size_t len = strlen(utcTimes) + 1;
+ char* as = new char[len];
+ char* ae = new char[len];
+ int sscanfResult = sscanf(utcTimes, "%[^-]-%[^\r\n]", as, ae);
+ if (sscanfResult == 2) {
+ absStartTime = as;
+ absEndTime = ae;
+ } else if (sscanfResult == 1) {
+ absStartTime = as;
+ delete[] ae;
+ } else {
+ delete[] as; delete[] ae;
+ return False;
+ }
+ } else if (sscanf(paramStr, "smtpe = %n", &numCharsMatched4) == 0 && numCharsMatched4 > 0) {
+ // We accept "smtpe=" parameters, but currently do not interpret them.
+ } else {
+ return False; // The header is malformed
+ }
+
+ return True;
+}
+
+Boolean parseRangeHeader(char const* buf,
+ double& rangeStart, double& rangeEnd,
+ char*& absStartTime, char*& absEndTime,
+ Boolean& startTimeIsNow) {
+ // First, find "Range:"
+ while (1) {
+ if (*buf == '\0') return False; // not found
+ if (_strncasecmp(buf, "Range: ", 7) == 0) break;
+ ++buf;
+ }
+
+ char const* fields = buf + 7;
+ while (*fields == ' ') ++fields;
+ return parseRangeParam(fields, rangeStart, rangeEnd, absStartTime, absEndTime, startTimeIsNow);
+}
+
+Boolean parseScaleHeader(char const* buf, float& scale) {
+ // Initialize the result parameter to a default value:
+ scale = 1.0;
+
+ // First, find "Scale:"
+ while (1) {
+ if (*buf == '\0') return False; // not found
+ if (_strncasecmp(buf, "Scale:", 6) == 0) break;
+ ++buf;
+ }
+
+ char const* fields = buf + 6;
+ while (*fields == ' ') ++fields;
+ float sc;
+ if (sscanf(fields, "%f", &sc) == 1) {
+ scale = sc;
+ } else {
+ return False; // The header is malformed
+ }
+
+ return True;
+}
+
+// Used to implement "RTSPOptionIsSupported()":
+static Boolean isSeparator(char c) { return c == ' ' || c == ',' || c == ';' || c == ':'; }
+
+Boolean RTSPOptionIsSupported(char const* commandName, char const* optionsResponseString) {
+ do {
+ if (commandName == NULL || optionsResponseString == NULL) break;
+
+ unsigned const commandNameLen = strlen(commandName);
+ if (commandNameLen == 0) break;
+
+ // "optionsResponseString" is assumed to be a list of command names, separated by " " and/or ",", ";", or ":"
+ // Scan through these, looking for "commandName".
+ while (1) {
+ // Skip over separators:
+ while (*optionsResponseString != '\0' && isSeparator(*optionsResponseString)) ++optionsResponseString;
+ if (*optionsResponseString == '\0') break;
+
+ // At this point, "optionsResponseString" begins with a command name (with perhaps a separator afterwads).
+ if (strncmp(commandName, optionsResponseString, commandNameLen) == 0) {
+ // We have at least a partial match here.
+ optionsResponseString += commandNameLen;
+ if (*optionsResponseString == '\0' || isSeparator(*optionsResponseString)) return True;
+ }
+
+ // No match. Skip over the rest of the command name:
+ while (*optionsResponseString != '\0' && !isSeparator(*optionsResponseString)) ++optionsResponseString;
+ }
+ } while (0);
+
+ return False;
+}
+
+char const* dateHeader() {
+ static char buf[200];
+#if !defined(_WIN32_WCE)
+ time_t tt = time(NULL);
+ strftime(buf, sizeof buf, "Date: %a, %b %d %Y %H:%M:%S GMT\r\n", gmtime(&tt));
+#else
+ // WinCE apparently doesn't have "time()", "strftime()", or "gmtime()",
+ // so generate the "Date:" header a different, WinCE-specific way.
+ // (Thanks to Pierre l'Hussiez for this code)
+ // RSF: But where is the "Date: " string? This code doesn't look quite right...
+ SYSTEMTIME SystemTime;
+ GetSystemTime(&SystemTime);
+ WCHAR dateFormat[] = L"ddd, MMM dd yyyy";
+ WCHAR timeFormat[] = L"HH:mm:ss GMT\r\n";
+ WCHAR inBuf[200];
+ DWORD locale = LOCALE_NEUTRAL;
+
+ int ret = GetDateFormat(locale, 0, &SystemTime,
+ (LPTSTR)dateFormat, (LPTSTR)inBuf, sizeof inBuf);
+ inBuf[ret - 1] = ' ';
+ ret = GetTimeFormat(locale, 0, &SystemTime,
+ (LPTSTR)timeFormat,
+ (LPTSTR)inBuf + ret, (sizeof inBuf) - ret);
+ wcstombs(buf, inBuf, wcslen(inBuf));
+#endif
+ return buf;
+}
diff --git a/liveMedia/RTSPRegisterSender.cpp b/liveMedia/RTSPRegisterSender.cpp
new file mode 100644
index 0000000..ec66e6a
--- /dev/null
+++ b/liveMedia/RTSPRegisterSender.cpp
@@ -0,0 +1,228 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Special objects which, when created, sends a custom RTSP "REGISTER" (or "DEREGISTER") command
+// to a specified client.
+// Implementation
+
+#include "RTSPRegisterSender.hh"
+#include <GroupsockHelper.hh> // for MAKE_SOCKADDR_IN
+
+////////// RTSPRegisterOrDeregisterSender implementation /////////
+
+RTSPRegisterOrDeregisterSender
+::RTSPRegisterOrDeregisterSender(UsageEnvironment& env,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum,
+ Authenticator* authenticator,
+ int verbosityLevel, char const* applicationName)
+ : RTSPClient(env, NULL, verbosityLevel, applicationName, 0, -1),
+ fRemoteClientPortNum(remoteClientPortNum) {
+ // Set up a connection to the remote client. To do this, we create a fake "rtsp://" URL for it:
+ char const* fakeRTSPURLFmt = "rtsp://%s:%u/";
+ unsigned fakeRTSPURLSize = strlen(fakeRTSPURLFmt) + strlen(remoteClientNameOrAddress) + 5/* max port num len */;
+ char* fakeRTSPURL = new char[fakeRTSPURLSize];
+ sprintf(fakeRTSPURL, fakeRTSPURLFmt, remoteClientNameOrAddress, remoteClientPortNum);
+ setBaseURL(fakeRTSPURL);
+ delete[] fakeRTSPURL;
+
+ if (authenticator != NULL) fCurrentAuthenticator = *authenticator;
+}
+
+RTSPRegisterOrDeregisterSender::~RTSPRegisterOrDeregisterSender() {
+}
+
+RTSPRegisterOrDeregisterSender::RequestRecord_REGISTER_or_DEREGISTER
+::RequestRecord_REGISTER_or_DEREGISTER(unsigned cseq, char const* cmdName,
+ RTSPClient::responseHandler* rtspResponseHandler,
+ char const* rtspURLToRegisterOrDeregister,
+ char const* proxyURLSuffix)
+ : RTSPClient::RequestRecord(cseq, cmdName, rtspResponseHandler),
+ fRTSPURLToRegisterOrDeregister(strDup(rtspURLToRegisterOrDeregister)),
+ fProxyURLSuffix(strDup(proxyURLSuffix)) {
+}
+
+RTSPRegisterOrDeregisterSender::RequestRecord_REGISTER_or_DEREGISTER
+::~RequestRecord_REGISTER_or_DEREGISTER() {
+ delete[] fRTSPURLToRegisterOrDeregister;
+ delete[] fProxyURLSuffix;
+}
+
+
+////////// RTSPRegisterSender implementation /////////
+
+RTSPRegisterSender* RTSPRegisterSender
+::createNew(UsageEnvironment& env,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToRegister,
+ RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator,
+ Boolean requestStreamingViaTCP, char const* proxyURLSuffix, Boolean reuseConnection,
+ int verbosityLevel, char const* applicationName) {
+ return new RTSPRegisterSender(env, remoteClientNameOrAddress, remoteClientPortNum, rtspURLToRegister,
+ rtspResponseHandler, authenticator,
+ requestStreamingViaTCP, proxyURLSuffix, reuseConnection,
+ verbosityLevel, applicationName);
+}
+
+void RTSPRegisterSender::grabConnection(int& sock, struct sockaddr_in& remoteAddress) {
+ sock = grabSocket();
+
+ MAKE_SOCKADDR_IN(remoteAddr, fServerAddress, htons(fRemoteClientPortNum));
+ remoteAddress = remoteAddr;
+}
+
+RTSPRegisterSender
+::RTSPRegisterSender(UsageEnvironment& env,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToRegister,
+ RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator,
+ Boolean requestStreamingViaTCP, char const* proxyURLSuffix, Boolean reuseConnection,
+ int verbosityLevel, char const* applicationName)
+ : RTSPRegisterOrDeregisterSender(env, remoteClientNameOrAddress, remoteClientPortNum, authenticator, verbosityLevel, applicationName) {
+ // Send the "REGISTER" request:
+ (void)sendRequest(new RequestRecord_REGISTER(++fCSeq, rtspResponseHandler,
+ rtspURLToRegister, reuseConnection, requestStreamingViaTCP, proxyURLSuffix));
+}
+
+RTSPRegisterSender::~RTSPRegisterSender() {
+}
+
+Boolean RTSPRegisterSender::setRequestFields(RequestRecord* request,
+ char*& cmdURL, Boolean& cmdURLWasAllocated,
+ char const*& protocolStr,
+ char*& extraHeaders, Boolean& extraHeadersWereAllocated) {
+ if (strcmp(request->commandName(), "REGISTER") == 0) {
+ RequestRecord_REGISTER* request_REGISTER = (RequestRecord_REGISTER*)request;
+
+ setBaseURL(request_REGISTER->rtspURLToRegister());
+ cmdURL = (char*)url();
+ cmdURLWasAllocated = False;
+
+ // Generate the "Transport:" header that will contain our REGISTER-specific parameters. This will be "extraHeaders".
+ // First, generate the "proxy_url_suffix" parameter string, if any:
+ char* proxyURLSuffixParameterStr;
+ if (request_REGISTER->proxyURLSuffix() == NULL) {
+ proxyURLSuffixParameterStr = strDup("");
+ } else {
+ char const* proxyURLSuffixParameterFmt = "; proxy_url_suffix=%s";
+ unsigned proxyURLSuffixParameterSize = strlen(proxyURLSuffixParameterFmt)
+ + strlen(request_REGISTER->proxyURLSuffix());
+ proxyURLSuffixParameterStr = new char[proxyURLSuffixParameterSize];
+ sprintf(proxyURLSuffixParameterStr, proxyURLSuffixParameterFmt, request_REGISTER->proxyURLSuffix());
+ }
+
+ char const* transportHeaderFmt = "Transport: %spreferred_delivery_protocol=%s%s\r\n";
+ unsigned transportHeaderSize = strlen(transportHeaderFmt) + 100/*conservative*/ + strlen(proxyURLSuffixParameterStr);
+ char* transportHeaderStr = new char[transportHeaderSize];
+ sprintf(transportHeaderStr, transportHeaderFmt,
+ request_REGISTER->reuseConnection() ? "reuse_connection; " : "",
+ request_REGISTER->requestStreamingViaTCP() ? "interleaved" : "udp",
+ proxyURLSuffixParameterStr);
+ delete[] proxyURLSuffixParameterStr;
+
+ extraHeaders = transportHeaderStr;
+ extraHeadersWereAllocated = True;
+
+ return True;
+ } else {
+ return RTSPClient::setRequestFields(request, cmdURL, cmdURLWasAllocated, protocolStr, extraHeaders, extraHeadersWereAllocated);
+ }
+}
+
+RTSPRegisterSender::RequestRecord_REGISTER
+::RequestRecord_REGISTER(unsigned cseq, RTSPClient::responseHandler* rtspResponseHandler, char const* rtspURLToRegister,
+ Boolean reuseConnection, Boolean requestStreamingViaTCP, char const* proxyURLSuffix)
+ : RTSPRegisterOrDeregisterSender::RequestRecord_REGISTER_or_DEREGISTER(cseq, "REGISTER", rtspResponseHandler, rtspURLToRegister, proxyURLSuffix),
+ fReuseConnection(reuseConnection), fRequestStreamingViaTCP(requestStreamingViaTCP) {
+}
+
+RTSPRegisterSender::RequestRecord_REGISTER::~RequestRecord_REGISTER() {
+}
+
+
+////////// RTSPDeregisterSender implementation /////////
+
+RTSPDeregisterSender* RTSPDeregisterSender
+::createNew(UsageEnvironment& env,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToDeregister,
+ RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator,
+ char const* proxyURLSuffix, int verbosityLevel, char const* applicationName) {
+ return new RTSPDeregisterSender(env, remoteClientNameOrAddress, remoteClientPortNum, rtspURLToDeregister,
+ rtspResponseHandler, authenticator,
+ proxyURLSuffix, verbosityLevel, applicationName);
+}
+
+RTSPDeregisterSender
+::RTSPDeregisterSender(UsageEnvironment& env,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToDeregister,
+ RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator,
+ char const* proxyURLSuffix,
+ int verbosityLevel, char const* applicationName)
+ : RTSPRegisterOrDeregisterSender(env, remoteClientNameOrAddress, remoteClientPortNum, authenticator, verbosityLevel, applicationName) {
+ // Send the "DEREGISTER" request:
+ (void)sendRequest(new RequestRecord_DEREGISTER(++fCSeq, rtspResponseHandler,
+ rtspURLToDeregister, proxyURLSuffix));
+}
+
+RTSPDeregisterSender::~RTSPDeregisterSender() {
+}
+
+Boolean RTSPDeregisterSender::setRequestFields(RequestRecord* request,
+ char*& cmdURL, Boolean& cmdURLWasAllocated,
+ char const*& protocolStr,
+ char*& extraHeaders, Boolean& extraHeadersWereAllocated) {
+ if (strcmp(request->commandName(), "DEREGISTER") == 0) {
+ RequestRecord_DEREGISTER* request_DEREGISTER = (RequestRecord_DEREGISTER*)request;
+
+ setBaseURL(request_DEREGISTER->rtspURLToDeregister());
+ cmdURL = (char*)url();
+ cmdURLWasAllocated = False;
+
+ // Generate the "Transport:" header that will contain our DEREGISTER-specific parameters. This will be "extraHeaders".
+ // First, generate the "proxy_url_suffix" parameter string, if any:
+ char* proxyURLSuffixParameterStr;
+ if (request_DEREGISTER->proxyURLSuffix() == NULL) {
+ proxyURLSuffixParameterStr = strDup("");
+ } else {
+ char const* proxyURLSuffixParameterFmt = "proxy_url_suffix=%s";
+ unsigned proxyURLSuffixParameterSize = strlen(proxyURLSuffixParameterFmt)
+ + strlen(request_DEREGISTER->proxyURLSuffix());
+ proxyURLSuffixParameterStr = new char[proxyURLSuffixParameterSize];
+ sprintf(proxyURLSuffixParameterStr, proxyURLSuffixParameterFmt, request_DEREGISTER->proxyURLSuffix());
+ }
+
+ char const* transportHeaderFmt = "Transport: %s\r\n";
+ unsigned transportHeaderSize = strlen(transportHeaderFmt) + strlen(proxyURLSuffixParameterStr);
+ char* transportHeaderStr = new char[transportHeaderSize];
+ sprintf(transportHeaderStr, transportHeaderFmt,
+ proxyURLSuffixParameterStr);
+ delete[] proxyURLSuffixParameterStr;
+
+ extraHeaders = transportHeaderStr;
+ extraHeadersWereAllocated = True;
+
+ return True;
+ } else {
+ return RTSPClient::setRequestFields(request, cmdURL, cmdURLWasAllocated, protocolStr, extraHeaders, extraHeadersWereAllocated);
+ }
+}
+
+RTSPDeregisterSender::RequestRecord_DEREGISTER
+::RequestRecord_DEREGISTER(unsigned cseq, RTSPClient::responseHandler* rtspResponseHandler, char const* rtspURLToDeregister,
+ char const* proxyURLSuffix)
+ : RTSPRegisterOrDeregisterSender::RequestRecord_REGISTER_or_DEREGISTER(cseq, "DEREGISTER", rtspResponseHandler, rtspURLToDeregister, proxyURLSuffix) {
+}
+
+RTSPDeregisterSender::RequestRecord_DEREGISTER::~RequestRecord_DEREGISTER() {
+}
diff --git a/liveMedia/RTSPServer.cpp b/liveMedia/RTSPServer.cpp
new file mode 100644
index 0000000..a6b1ca4
--- /dev/null
+++ b/liveMedia/RTSPServer.cpp
@@ -0,0 +1,1874 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A RTSP server
+// Implementation
+
+#include "RTSPServer.hh"
+#include "RTSPCommon.hh"
+#include "RTSPRegisterSender.hh"
+#include "Base64.hh"
+#include <GroupsockHelper.hh>
+
+////////// RTSPServer implementation //////////
+
+RTSPServer*
+RTSPServer::createNew(UsageEnvironment& env, Port ourPort,
+ UserAuthenticationDatabase* authDatabase,
+ unsigned reclamationSeconds) {
+ int ourSocket = setUpOurSocket(env, ourPort);
+ if (ourSocket == -1) return NULL;
+
+ return new RTSPServer(env, ourSocket, ourPort, authDatabase, reclamationSeconds);
+}
+
+Boolean RTSPServer::lookupByName(UsageEnvironment& env,
+ char const* name,
+ RTSPServer*& resultServer) {
+ resultServer = NULL; // unless we succeed
+
+ Medium* medium;
+ if (!Medium::lookupByName(env, name, medium)) return False;
+
+ if (!medium->isRTSPServer()) {
+ env.setResultMsg(name, " is not a RTSP server");
+ return False;
+ }
+
+ resultServer = (RTSPServer*)medium;
+ return True;
+}
+
+char* RTSPServer
+::rtspURL(ServerMediaSession const* serverMediaSession, int clientSocket) const {
+ char* urlPrefix = rtspURLPrefix(clientSocket);
+ char const* sessionName = serverMediaSession->streamName();
+
+ char* resultURL = new char[strlen(urlPrefix) + strlen(sessionName) + 1];
+ sprintf(resultURL, "%s%s", urlPrefix, sessionName);
+
+ delete[] urlPrefix;
+ return resultURL;
+}
+
+char* RTSPServer::rtspURLPrefix(int clientSocket) const {
+ struct sockaddr_in ourAddress;
+ if (clientSocket < 0) {
+ // Use our default IP address in the URL:
+ ourAddress.sin_addr.s_addr = ReceivingInterfaceAddr != 0
+ ? ReceivingInterfaceAddr
+ : ourIPAddress(envir()); // hack
+ } else {
+ SOCKLEN_T namelen = sizeof ourAddress;
+ getsockname(clientSocket, (struct sockaddr*)&ourAddress, &namelen);
+ }
+
+ char urlBuffer[100]; // more than big enough for "rtsp://<ip-address>:<port>/"
+
+ portNumBits portNumHostOrder = ntohs(fServerPort.num());
+ if (portNumHostOrder == 554 /* the default port number */) {
+ sprintf(urlBuffer, "rtsp://%s/", AddressString(ourAddress).val());
+ } else {
+ sprintf(urlBuffer, "rtsp://%s:%hu/",
+ AddressString(ourAddress).val(), portNumHostOrder);
+ }
+
+ return strDup(urlBuffer);
+}
+
+UserAuthenticationDatabase* RTSPServer::setAuthenticationDatabase(UserAuthenticationDatabase* newDB) {
+ UserAuthenticationDatabase* oldDB = fAuthDB;
+ fAuthDB = newDB;
+
+ return oldDB;
+}
+
+Boolean RTSPServer::setUpTunnelingOverHTTP(Port httpPort) {
+ fHTTPServerSocket = setUpOurSocket(envir(), httpPort);
+ if (fHTTPServerSocket >= 0) {
+ fHTTPServerPort = httpPort;
+ envir().taskScheduler().turnOnBackgroundReadHandling(fHTTPServerSocket,
+ incomingConnectionHandlerHTTP, this);
+ return True;
+ }
+
+ return False;
+}
+
+portNumBits RTSPServer::httpServerPortNum() const {
+ return ntohs(fHTTPServerPort.num());
+}
+
+char const* RTSPServer::allowedCommandNames() {
+ return "OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY, PAUSE, GET_PARAMETER, SET_PARAMETER";
+}
+
+UserAuthenticationDatabase* RTSPServer::getAuthenticationDatabaseForCommand(char const* /*cmdName*/) {
+ // default implementation
+ return fAuthDB;
+}
+
+Boolean RTSPServer::specialClientAccessCheck(int /*clientSocket*/, struct sockaddr_in& /*clientAddr*/, char const* /*urlSuffix*/) {
+ // default implementation
+ return True;
+}
+
+Boolean RTSPServer::specialClientUserAccessCheck(int /*clientSocket*/, struct sockaddr_in& /*clientAddr*/,
+ char const* /*urlSuffix*/, char const * /*username*/) {
+ // default implementation; no further access restrictions:
+ return True;
+}
+
+
+RTSPServer::RTSPServer(UsageEnvironment& env,
+ int ourSocket, Port ourPort,
+ UserAuthenticationDatabase* authDatabase,
+ unsigned reclamationSeconds)
+ : GenericMediaServer(env, ourSocket, ourPort, reclamationSeconds),
+ fHTTPServerSocket(-1), fHTTPServerPort(0),
+ fClientConnectionsForHTTPTunneling(NULL), // will get created if needed
+ fTCPStreamingDatabase(HashTable::create(ONE_WORD_HASH_KEYS)),
+ fPendingRegisterOrDeregisterRequests(HashTable::create(ONE_WORD_HASH_KEYS)),
+ fRegisterOrDeregisterRequestCounter(0), fAuthDB(authDatabase), fAllowStreamingRTPOverTCP(True) {
+}
+
+// A data structure that is used to implement "fTCPStreamingDatabase"
+// (and the "noteTCPStreamingOnSocket()" and "stopTCPStreamingOnSocket()" member functions):
+class streamingOverTCPRecord {
+public:
+ streamingOverTCPRecord(u_int32_t sessionId, unsigned trackNum, streamingOverTCPRecord* next)
+ : fNext(next), fSessionId(sessionId), fTrackNum(trackNum) {
+ }
+ virtual ~streamingOverTCPRecord() {
+ delete fNext;
+ }
+
+ streamingOverTCPRecord* fNext;
+ u_int32_t fSessionId;
+ unsigned fTrackNum;
+};
+
+RTSPServer::~RTSPServer() {
+ // Turn off background HTTP read handling (if any):
+ envir().taskScheduler().turnOffBackgroundReadHandling(fHTTPServerSocket);
+ ::closeSocket(fHTTPServerSocket);
+
+ cleanup(); // Removes all "ClientSession" and "ClientConnection" objects, and their tables.
+ delete fClientConnectionsForHTTPTunneling;
+
+ // Delete any pending REGISTER requests:
+ RTSPRegisterOrDeregisterSender* r;
+ while ((r = (RTSPRegisterOrDeregisterSender*)fPendingRegisterOrDeregisterRequests->getFirst()) != NULL) {
+ delete r;
+ }
+ delete fPendingRegisterOrDeregisterRequests;
+
+ // Empty out and close "fTCPStreamingDatabase":
+ streamingOverTCPRecord* sotcp;
+ while ((sotcp = (streamingOverTCPRecord*)fTCPStreamingDatabase->getFirst()) != NULL) {
+ delete sotcp;
+ }
+ delete fTCPStreamingDatabase;
+}
+
+Boolean RTSPServer::isRTSPServer() const {
+ return True;
+}
+
+void RTSPServer::incomingConnectionHandlerHTTP(void* instance, int /*mask*/) {
+ RTSPServer* server = (RTSPServer*)instance;
+ server->incomingConnectionHandlerHTTP();
+}
+void RTSPServer::incomingConnectionHandlerHTTP() {
+ incomingConnectionHandlerOnSocket(fHTTPServerSocket);
+}
+
+void RTSPServer
+::noteTCPStreamingOnSocket(int socketNum, RTSPClientSession* clientSession, unsigned trackNum) {
+ streamingOverTCPRecord* sotcpCur
+ = (streamingOverTCPRecord*)fTCPStreamingDatabase->Lookup((char const*)socketNum);
+ streamingOverTCPRecord* sotcpNew
+ = new streamingOverTCPRecord(clientSession->fOurSessionId, trackNum, sotcpCur);
+ fTCPStreamingDatabase->Add((char const*)socketNum, sotcpNew);
+}
+
+void RTSPServer
+::unnoteTCPStreamingOnSocket(int socketNum, RTSPClientSession* clientSession, unsigned trackNum) {
+ if (socketNum < 0) return;
+ streamingOverTCPRecord* sotcpHead
+ = (streamingOverTCPRecord*)fTCPStreamingDatabase->Lookup((char const*)socketNum);
+ if (sotcpHead == NULL) return;
+
+ // Look for a record of the (session,track); remove it if found:
+ streamingOverTCPRecord* sotcp = sotcpHead;
+ streamingOverTCPRecord* sotcpPrev = sotcpHead;
+ do {
+ if (sotcp->fSessionId == clientSession->fOurSessionId && sotcp->fTrackNum == trackNum) break;
+ sotcpPrev = sotcp;
+ sotcp = sotcp->fNext;
+ } while (sotcp != NULL);
+ if (sotcp == NULL) return; // not found
+
+ if (sotcp == sotcpHead) {
+ // We found it at the head of the list. Remove it and reinsert the tail into the hash table:
+ sotcpHead = sotcp->fNext;
+ sotcp->fNext = NULL;
+ delete sotcp;
+
+ if (sotcpHead == NULL) {
+ // There were no more entries on the list. Remove the original entry from the hash table:
+ fTCPStreamingDatabase->Remove((char const*)socketNum);
+ } else {
+ // Add the rest of the list into the hash table (replacing the original):
+ fTCPStreamingDatabase->Add((char const*)socketNum, sotcpHead);
+ }
+ } else {
+ // We found it on the list, but not at the head. Unlink it:
+ sotcpPrev->fNext = sotcp->fNext;
+ sotcp->fNext = NULL;
+ delete sotcp;
+ }
+}
+
+void RTSPServer::stopTCPStreamingOnSocket(int socketNum) {
+ // Close any stream that is streaming over "socketNum" (using RTP/RTCP-over-TCP streaming):
+ streamingOverTCPRecord* sotcp
+ = (streamingOverTCPRecord*)fTCPStreamingDatabase->Lookup((char const*)socketNum);
+ if (sotcp != NULL) {
+ do {
+ RTSPClientSession* clientSession
+ = (RTSPServer::RTSPClientSession*)lookupClientSession(sotcp->fSessionId);
+ if (clientSession != NULL) {
+ clientSession->deleteStreamByTrack(sotcp->fTrackNum);
+ }
+
+ streamingOverTCPRecord* sotcpNext = sotcp->fNext;
+ sotcp->fNext = NULL;
+ delete sotcp;
+ sotcp = sotcpNext;
+ } while (sotcp != NULL);
+ fTCPStreamingDatabase->Remove((char const*)socketNum);
+ }
+}
+
+
+////////// RTSPServer::RTSPClientConnection implementation //////////
+
+RTSPServer::RTSPClientConnection
+::RTSPClientConnection(RTSPServer& ourServer, int clientSocket, struct sockaddr_in clientAddr)
+ : GenericMediaServer::ClientConnection(ourServer, clientSocket, clientAddr),
+ fOurRTSPServer(ourServer), fClientInputSocket(fOurSocket), fClientOutputSocket(fOurSocket),
+ fIsActive(True), fRecursionCount(0), fOurSessionCookie(NULL) {
+ resetRequestBuffer();
+}
+
+RTSPServer::RTSPClientConnection::~RTSPClientConnection() {
+ if (fOurSessionCookie != NULL) {
+ // We were being used for RTSP-over-HTTP tunneling. Also remove ourselves from the 'session cookie' hash table before we go:
+ fOurRTSPServer.fClientConnectionsForHTTPTunneling->Remove(fOurSessionCookie);
+ delete[] fOurSessionCookie;
+ }
+
+ closeSocketsRTSP();
+}
+
+// Handler routines for specific RTSP commands:
+
+void RTSPServer::RTSPClientConnection::handleCmd_OPTIONS() {
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "RTSP/1.0 200 OK\r\nCSeq: %s\r\n%sPublic: %s\r\n\r\n",
+ fCurrentCSeq, dateHeader(), fOurRTSPServer.allowedCommandNames());
+}
+
+void RTSPServer::RTSPClientConnection
+::handleCmd_GET_PARAMETER(char const* /*fullRequestStr*/) {
+ // By default, we implement "GET_PARAMETER" (on the entire server) just as a 'no op', and send back a dummy response.
+ // (If you want to handle this type of "GET_PARAMETER" differently, you can do so by defining a subclass of "RTSPServer"
+ // and "RTSPServer::RTSPClientConnection", and then reimplement this virtual function in your subclass.)
+ setRTSPResponse("200 OK", LIVEMEDIA_LIBRARY_VERSION_STRING);
+}
+
+void RTSPServer::RTSPClientConnection
+::handleCmd_SET_PARAMETER(char const* /*fullRequestStr*/) {
+ // By default, we implement "SET_PARAMETER" (on the entire server) just as a 'no op', and send back an empty response.
+ // (If you want to handle this type of "SET_PARAMETER" differently, you can do so by defining a subclass of "RTSPServer"
+ // and "RTSPServer::RTSPClientConnection", and then reimplement this virtual function in your subclass.)
+ setRTSPResponse("200 OK");
+}
+
+void RTSPServer::RTSPClientConnection
+::handleCmd_DESCRIBE(char const* urlPreSuffix, char const* urlSuffix, char const* fullRequestStr) {
+ ServerMediaSession* session = NULL;
+ char* sdpDescription = NULL;
+ char* rtspURL = NULL;
+ do {
+ char urlTotalSuffix[2*RTSP_PARAM_STRING_MAX];
+ // enough space for urlPreSuffix/urlSuffix'\0'
+ urlTotalSuffix[0] = '\0';
+ if (urlPreSuffix[0] != '\0') {
+ strcat(urlTotalSuffix, urlPreSuffix);
+ strcat(urlTotalSuffix, "/");
+ }
+ strcat(urlTotalSuffix, urlSuffix);
+
+ if (!authenticationOK("DESCRIBE", urlTotalSuffix, fullRequestStr)) break;
+
+ // We should really check that the request contains an "Accept:" #####
+ // for "application/sdp", because that's what we're sending back #####
+
+ // Begin by looking up the "ServerMediaSession" object for the specified "urlTotalSuffix":
+ session = fOurServer.lookupServerMediaSession(urlTotalSuffix);
+ if (session == NULL) {
+ handleCmd_notFound();
+ break;
+ }
+
+ // Increment the "ServerMediaSession" object's reference count, in case someone removes it
+ // while we're using it:
+ session->incrementReferenceCount();
+
+ // Then, assemble a SDP description for this session:
+ sdpDescription = session->generateSDPDescription();
+ if (sdpDescription == NULL) {
+ // This usually means that a file name that was specified for a
+ // "ServerMediaSubsession" does not exist.
+ setRTSPResponse("404 File Not Found, Or In Incorrect Format");
+ break;
+ }
+ unsigned sdpDescriptionSize = strlen(sdpDescription);
+
+ // Also, generate our RTSP URL, for the "Content-Base:" header
+ // (which is necessary to ensure that the correct URL gets used in subsequent "SETUP" requests).
+ rtspURL = fOurRTSPServer.rtspURL(session, fClientInputSocket);
+
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "RTSP/1.0 200 OK\r\nCSeq: %s\r\n"
+ "%s"
+ "Content-Base: %s/\r\n"
+ "Content-Type: application/sdp\r\n"
+ "Content-Length: %d\r\n\r\n"
+ "%s",
+ fCurrentCSeq,
+ dateHeader(),
+ rtspURL,
+ sdpDescriptionSize,
+ sdpDescription);
+ } while (0);
+
+ if (session != NULL) {
+ // Decrement its reference count, now that we're done using it:
+ session->decrementReferenceCount();
+ if (session->referenceCount() == 0 && session->deleteWhenUnreferenced()) {
+ fOurServer.removeServerMediaSession(session);
+ }
+ }
+
+ delete[] sdpDescription;
+ delete[] rtspURL;
+}
+
+static void lookForHeader(char const* headerName, char const* source, unsigned sourceLen, char* resultStr, unsigned resultMaxSize) {
+ resultStr[0] = '\0'; // by default, return an empty string
+ unsigned headerNameLen = strlen(headerName);
+ for (int i = 0; i < (int)(sourceLen-headerNameLen); ++i) {
+ if (strncmp(&source[i], headerName, headerNameLen) == 0 && source[i+headerNameLen] == ':') {
+ // We found the header. Skip over any whitespace, then copy the rest of the line to "resultStr":
+ for (i += headerNameLen+1; i < (int)sourceLen && (source[i] == ' ' || source[i] == '\t'); ++i) {}
+ for (unsigned j = i; j < sourceLen; ++j) {
+ if (source[j] == '\r' || source[j] == '\n') {
+ // We've found the end of the line. Copy it to the result (if it will fit):
+ if (j-i+1 > resultMaxSize) return; // it wouldn't fit
+ char const* resultSource = &source[i];
+ char const* resultSourceEnd = &source[j];
+ while (resultSource < resultSourceEnd) *resultStr++ = *resultSource++;
+ *resultStr = '\0';
+ return;
+ }
+ }
+ }
+ }
+}
+
+void RTSPServer::RTSPClientConnection::handleCmd_bad() {
+ // Don't do anything with "fCurrentCSeq", because it might be nonsense
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "RTSP/1.0 400 Bad Request\r\n%sAllow: %s\r\n\r\n",
+ dateHeader(), fOurRTSPServer.allowedCommandNames());
+}
+
+void RTSPServer::RTSPClientConnection::handleCmd_notSupported() {
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "RTSP/1.0 405 Method Not Allowed\r\nCSeq: %s\r\n%sAllow: %s\r\n\r\n",
+ fCurrentCSeq, dateHeader(), fOurRTSPServer.allowedCommandNames());
+}
+
+void RTSPServer::RTSPClientConnection::handleCmd_notFound() {
+ setRTSPResponse("404 Stream Not Found");
+}
+
+void RTSPServer::RTSPClientConnection::handleCmd_sessionNotFound() {
+ setRTSPResponse("454 Session Not Found");
+}
+
+void RTSPServer::RTSPClientConnection::handleCmd_unsupportedTransport() {
+ setRTSPResponse("461 Unsupported Transport");
+}
+
+Boolean RTSPServer::RTSPClientConnection::parseHTTPRequestString(char* resultCmdName, unsigned resultCmdNameMaxSize,
+ char* urlSuffix, unsigned urlSuffixMaxSize,
+ char* sessionCookie, unsigned sessionCookieMaxSize,
+ char* acceptStr, unsigned acceptStrMaxSize) {
+ // Check for the limited HTTP requests that we expect for specifying RTSP-over-HTTP tunneling.
+ // This parser is currently rather dumb; it should be made smarter #####
+ char const* reqStr = (char const*)fRequestBuffer;
+ unsigned const reqStrSize = fRequestBytesAlreadySeen;
+
+ // Read everything up to the first space as the command name:
+ Boolean parseSucceeded = False;
+ unsigned i;
+ for (i = 0; i < resultCmdNameMaxSize-1 && i < reqStrSize; ++i) {
+ char c = reqStr[i];
+ if (c == ' ' || c == '\t') {
+ parseSucceeded = True;
+ break;
+ }
+
+ resultCmdName[i] = c;
+ }
+ resultCmdName[i] = '\0';
+ if (!parseSucceeded) return False;
+
+ // Look for the string "HTTP/", before the first \r or \n:
+ parseSucceeded = False;
+ for (; i < reqStrSize-5 && reqStr[i] != '\r' && reqStr[i] != '\n'; ++i) {
+ if (reqStr[i] == 'H' && reqStr[i+1] == 'T' && reqStr[i+2]== 'T' && reqStr[i+3]== 'P' && reqStr[i+4]== '/') {
+ i += 5; // to advance past the "HTTP/"
+ parseSucceeded = True;
+ break;
+ }
+ }
+ if (!parseSucceeded) return False;
+
+ // Get the 'URL suffix' that occurred before this:
+ unsigned k = i-6;
+ while (k > 0 && reqStr[k] == ' ') --k; // back up over white space
+ unsigned j = k;
+ while (j > 0 && reqStr[j] != ' ' && reqStr[j] != '/') --j;
+ // The URL suffix is in position (j,k]:
+ if (k - j + 1 > urlSuffixMaxSize) return False; // there's no room>
+ unsigned n = 0;
+ while (++j <= k) urlSuffix[n++] = reqStr[j];
+ urlSuffix[n] = '\0';
+
+ // Look for various headers that we're interested in:
+ lookForHeader("x-sessioncookie", &reqStr[i], reqStrSize-i, sessionCookie, sessionCookieMaxSize);
+ lookForHeader("Accept", &reqStr[i], reqStrSize-i, acceptStr, acceptStrMaxSize);
+
+ return True;
+}
+
+void RTSPServer::RTSPClientConnection::handleHTTPCmd_notSupported() {
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "HTTP/1.1 405 Method Not Allowed\r\n%s\r\n\r\n",
+ dateHeader());
+}
+
+void RTSPServer::RTSPClientConnection::handleHTTPCmd_notFound() {
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "HTTP/1.1 404 Not Found\r\n%s\r\n\r\n",
+ dateHeader());
+}
+
+void RTSPServer::RTSPClientConnection::handleHTTPCmd_OPTIONS() {
+#ifdef DEBUG
+ fprintf(stderr, "Handled HTTP \"OPTIONS\" request\n");
+#endif
+ // Construct a response to the "OPTIONS" command that notes that our special headers (for RTSP-over-HTTP tunneling) are allowed:
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "HTTP/1.1 200 OK\r\n"
+ "%s"
+ "Access-Control-Allow-Origin: *\r\n"
+ "Access-Control-Allow-Methods: POST, GET, OPTIONS\r\n"
+ "Access-Control-Allow-Headers: x-sessioncookie, Pragma, Cache-Control\r\n"
+ "Access-Control-Max-Age: 1728000\r\n"
+ "\r\n",
+ dateHeader());
+}
+
+void RTSPServer::RTSPClientConnection::handleHTTPCmd_TunnelingGET(char const* sessionCookie) {
+ // Record ourself as having this 'session cookie', so that a subsequent HTTP "POST" command (with the same 'session cookie')
+ // can find us:
+ if (fOurRTSPServer.fClientConnectionsForHTTPTunneling == NULL) {
+ fOurRTSPServer.fClientConnectionsForHTTPTunneling = HashTable::create(STRING_HASH_KEYS);
+ }
+ delete[] fOurSessionCookie; fOurSessionCookie = strDup(sessionCookie);
+ fOurRTSPServer.fClientConnectionsForHTTPTunneling->Add(sessionCookie, (void*)this);
+#ifdef DEBUG
+ fprintf(stderr, "Handled HTTP \"GET\" request (client output socket: %d)\n", fClientOutputSocket);
+#endif
+
+ // Construct our response:
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "HTTP/1.1 200 OK\r\n"
+ "%s"
+ "Cache-Control: no-cache\r\n"
+ "Pragma: no-cache\r\n"
+ "Content-Type: application/x-rtsp-tunnelled\r\n"
+ "\r\n",
+ dateHeader());
+}
+
+Boolean RTSPServer::RTSPClientConnection
+::handleHTTPCmd_TunnelingPOST(char const* sessionCookie, unsigned char const* extraData, unsigned extraDataSize) {
+ // Use the "sessionCookie" string to look up the separate "RTSPClientConnection" object that should have been used to handle
+ // an earlier HTTP "GET" request:
+ if (fOurRTSPServer.fClientConnectionsForHTTPTunneling == NULL) {
+ fOurRTSPServer.fClientConnectionsForHTTPTunneling = HashTable::create(STRING_HASH_KEYS);
+ }
+ RTSPServer::RTSPClientConnection* prevClientConnection
+ = (RTSPServer::RTSPClientConnection*)(fOurRTSPServer.fClientConnectionsForHTTPTunneling->Lookup(sessionCookie));
+ if (prevClientConnection == NULL || prevClientConnection == this) {
+ // Either there was no previous HTTP "GET" request, or it was on the same connection; treat this "POST" request as bad:
+ handleHTTPCmd_notSupported();
+ fIsActive = False; // triggers deletion of ourself
+ return False;
+ }
+#ifdef DEBUG
+ fprintf(stderr, "Handled HTTP \"POST\" request (client input socket: %d)\n", fClientInputSocket);
+#endif
+
+ // Change the previous "RTSPClientSession" object's input socket to ours. It will be used for subsequent requests:
+ prevClientConnection->changeClientInputSocket(fClientInputSocket, extraData, extraDataSize);
+ fClientInputSocket = fClientOutputSocket = -1; // so the socket doesn't get closed when we get deleted
+ return True;
+}
+
+void RTSPServer::RTSPClientConnection::handleHTTPCmd_StreamingGET(char const* /*urlSuffix*/, char const* /*fullRequestStr*/) {
+ // By default, we don't support requests to access streams via HTTP:
+ handleHTTPCmd_notSupported();
+}
+
+void RTSPServer::RTSPClientConnection::resetRequestBuffer() {
+ ClientConnection::resetRequestBuffer();
+
+ fLastCRLF = &fRequestBuffer[-3]; // hack: Ensures that we don't think we have end-of-msg if the data starts with <CR><LF>
+ fBase64RemainderCount = 0;
+}
+
+void RTSPServer::RTSPClientConnection::closeSocketsRTSP() {
+ // First, tell our server to stop any streaming that it might be doing over our output socket:
+ fOurRTSPServer.stopTCPStreamingOnSocket(fClientOutputSocket);
+
+ // Turn off background handling on our input socket (and output socket, if different); then close it (or them):
+ if (fClientOutputSocket != fClientInputSocket) {
+ envir().taskScheduler().disableBackgroundHandling(fClientOutputSocket);
+ ::closeSocket(fClientOutputSocket);
+ }
+ fClientOutputSocket = -1;
+
+ closeSockets(); // closes fClientInputSocket
+}
+
+void RTSPServer::RTSPClientConnection::handleAlternativeRequestByte(void* instance, u_int8_t requestByte) {
+ RTSPClientConnection* connection = (RTSPClientConnection*)instance;
+ connection->handleAlternativeRequestByte1(requestByte);
+}
+
+void RTSPServer::RTSPClientConnection::handleAlternativeRequestByte1(u_int8_t requestByte) {
+ if (requestByte == 0xFF) {
+ // Hack: The new handler of the input TCP socket encountered an error reading it. Indicate this:
+ handleRequestBytes(-1);
+ } else if (requestByte == 0xFE) {
+ // Another hack: The new handler of the input TCP socket no longer needs it, so take back control of it:
+ envir().taskScheduler().setBackgroundHandling(fClientInputSocket, SOCKET_READABLE|SOCKET_EXCEPTION,
+ incomingRequestHandler, this);
+ } else {
+ // Normal case: Add this character to our buffer; then try to handle the data that we have buffered so far:
+ if (fRequestBufferBytesLeft == 0 || fRequestBytesAlreadySeen >= REQUEST_BUFFER_SIZE) return;
+ fRequestBuffer[fRequestBytesAlreadySeen] = requestByte;
+ handleRequestBytes(1);
+ }
+}
+
+void RTSPServer::RTSPClientConnection::handleRequestBytes(int newBytesRead) {
+ int numBytesRemaining = 0;
+ ++fRecursionCount;
+
+ do {
+ RTSPServer::RTSPClientSession* clientSession = NULL;
+
+ if (newBytesRead < 0 || (unsigned)newBytesRead >= fRequestBufferBytesLeft) {
+ // Either the client socket has died, or the request was too big for us.
+ // Terminate this connection:
+#ifdef DEBUG
+ fprintf(stderr, "RTSPClientConnection[%p]::handleRequestBytes() read %d new bytes (of %d); terminating connection!\n", this, newBytesRead, fRequestBufferBytesLeft);
+#endif
+ fIsActive = False;
+ break;
+ }
+
+ Boolean endOfMsg = False;
+ unsigned char* ptr = &fRequestBuffer[fRequestBytesAlreadySeen];
+#ifdef DEBUG
+ ptr[newBytesRead] = '\0';
+ fprintf(stderr, "RTSPClientConnection[%p]::handleRequestBytes() %s %d new bytes:%s\n",
+ this, numBytesRemaining > 0 ? "processing" : "read", newBytesRead, ptr);
+#endif
+
+ if (fClientOutputSocket != fClientInputSocket && numBytesRemaining == 0) {
+ // We're doing RTSP-over-HTTP tunneling, and input commands are assumed to have been Base64-encoded.
+ // We therefore Base64-decode as much of this new data as we can (i.e., up to a multiple of 4 bytes).
+
+ // But first, we remove any whitespace that may be in the input data:
+ unsigned toIndex = 0;
+ for (int fromIndex = 0; fromIndex < newBytesRead; ++fromIndex) {
+ char c = ptr[fromIndex];
+ if (!(c == ' ' || c == '\t' || c == '\r' || c == '\n')) { // not 'whitespace': space,tab,CR,NL
+ ptr[toIndex++] = c;
+ }
+ }
+ newBytesRead = toIndex;
+
+ unsigned numBytesToDecode = fBase64RemainderCount + newBytesRead;
+ unsigned newBase64RemainderCount = numBytesToDecode%4;
+ numBytesToDecode -= newBase64RemainderCount;
+ if (numBytesToDecode > 0) {
+ ptr[newBytesRead] = '\0';
+ unsigned decodedSize;
+ unsigned char* decodedBytes = base64Decode((char const*)(ptr-fBase64RemainderCount), numBytesToDecode, decodedSize);
+#ifdef DEBUG
+ fprintf(stderr, "Base64-decoded %d input bytes into %d new bytes:", numBytesToDecode, decodedSize);
+ for (unsigned k = 0; k < decodedSize; ++k) fprintf(stderr, "%c", decodedBytes[k]);
+ fprintf(stderr, "\n");
+#endif
+
+ // Copy the new decoded bytes in place of the old ones (we can do this because there are fewer decoded bytes than original):
+ unsigned char* to = ptr-fBase64RemainderCount;
+ for (unsigned i = 0; i < decodedSize; ++i) *to++ = decodedBytes[i];
+
+ // Then copy any remaining (undecoded) bytes to the end:
+ for (unsigned j = 0; j < newBase64RemainderCount; ++j) *to++ = (ptr-fBase64RemainderCount+numBytesToDecode)[j];
+
+ newBytesRead = decodedSize - fBase64RemainderCount + newBase64RemainderCount;
+ // adjust to allow for the size of the new decoded data (+ remainder)
+ delete[] decodedBytes;
+ }
+ fBase64RemainderCount = newBase64RemainderCount;
+ }
+
+ unsigned char* tmpPtr = fLastCRLF + 2;
+ if (fBase64RemainderCount == 0) { // no more Base-64 bytes remain to be read/decoded
+ // Look for the end of the message: <CR><LF><CR><LF>
+ if (tmpPtr < fRequestBuffer) tmpPtr = fRequestBuffer;
+ while (tmpPtr < &ptr[newBytesRead-1]) {
+ if (*tmpPtr == '\r' && *(tmpPtr+1) == '\n') {
+ if (tmpPtr - fLastCRLF == 2) { // This is it:
+ endOfMsg = True;
+ break;
+ }
+ fLastCRLF = tmpPtr;
+ }
+ ++tmpPtr;
+ }
+ }
+
+ fRequestBufferBytesLeft -= newBytesRead;
+ fRequestBytesAlreadySeen += newBytesRead;
+
+ if (!endOfMsg) break; // subsequent reads will be needed to complete the request
+
+ // Parse the request string into command name and 'CSeq', then handle the command:
+ fRequestBuffer[fRequestBytesAlreadySeen] = '\0';
+ char cmdName[RTSP_PARAM_STRING_MAX];
+ char urlPreSuffix[RTSP_PARAM_STRING_MAX];
+ char urlSuffix[RTSP_PARAM_STRING_MAX];
+ char cseq[RTSP_PARAM_STRING_MAX];
+ char sessionIdStr[RTSP_PARAM_STRING_MAX];
+ unsigned contentLength = 0;
+ Boolean playAfterSetup = False;
+ fLastCRLF[2] = '\0'; // temporarily, for parsing
+ Boolean parseSucceeded = parseRTSPRequestString((char*)fRequestBuffer, fLastCRLF+2 - fRequestBuffer,
+ cmdName, sizeof cmdName,
+ urlPreSuffix, sizeof urlPreSuffix,
+ urlSuffix, sizeof urlSuffix,
+ cseq, sizeof cseq,
+ sessionIdStr, sizeof sessionIdStr,
+ contentLength);
+ fLastCRLF[2] = '\r'; // restore its value
+ // Check first for a bogus "Content-Length" value that would cause a pointer wraparound:
+ if (tmpPtr + 2 + contentLength < tmpPtr + 2) {
+#ifdef DEBUG
+ fprintf(stderr, "parseRTSPRequestString() returned a bogus \"Content-Length:\" value: 0x%x (%d)\n", contentLength, (int)contentLength);
+#endif
+ contentLength = 0;
+ parseSucceeded = False;
+ }
+ if (parseSucceeded) {
+#ifdef DEBUG
+ fprintf(stderr, "parseRTSPRequestString() succeeded, returning cmdName \"%s\", urlPreSuffix \"%s\", urlSuffix \"%s\", CSeq \"%s\", Content-Length %u, with %d bytes following the message.\n", cmdName, urlPreSuffix, urlSuffix, cseq, contentLength, ptr + newBytesRead - (tmpPtr + 2));
+#endif
+ // If there was a "Content-Length:" header, then make sure we've received all of the data that it specified:
+ if (ptr + newBytesRead < tmpPtr + 2 + contentLength) break; // we still need more data; subsequent reads will give it to us
+
+ // If the request included a "Session:" id, and it refers to a client session that's
+ // current ongoing, then use this command to indicate 'liveness' on that client session:
+ Boolean const requestIncludedSessionId = sessionIdStr[0] != '\0';
+ if (requestIncludedSessionId) {
+ clientSession
+ = (RTSPServer::RTSPClientSession*)(fOurRTSPServer.lookupClientSession(sessionIdStr));
+ if (clientSession != NULL) clientSession->noteLiveness();
+ }
+
+ // We now have a complete RTSP request.
+ // Handle the specified command (beginning with commands that are session-independent):
+ fCurrentCSeq = cseq;
+ if (strcmp(cmdName, "OPTIONS") == 0) {
+ // If the "OPTIONS" command included a "Session:" id for a session that doesn't exist,
+ // then treat this as an error:
+ if (requestIncludedSessionId && clientSession == NULL) {
+#ifdef DEBUG
+ fprintf(stderr, "Calling handleCmd_sessionNotFound() (case 1)\n");
+#endif
+ handleCmd_sessionNotFound();
+ } else {
+ // Normal case:
+ handleCmd_OPTIONS();
+ }
+ } else if (urlPreSuffix[0] == '\0' && urlSuffix[0] == '*' && urlSuffix[1] == '\0') {
+ // The special "*" URL means: an operation on the entire server. This works only for GET_PARAMETER and SET_PARAMETER:
+ if (strcmp(cmdName, "GET_PARAMETER") == 0) {
+ handleCmd_GET_PARAMETER((char const*)fRequestBuffer);
+ } else if (strcmp(cmdName, "SET_PARAMETER") == 0) {
+ handleCmd_SET_PARAMETER((char const*)fRequestBuffer);
+ } else {
+ handleCmd_notSupported();
+ }
+ } else if (strcmp(cmdName, "DESCRIBE") == 0) {
+ handleCmd_DESCRIBE(urlPreSuffix, urlSuffix, (char const*)fRequestBuffer);
+ } else if (strcmp(cmdName, "SETUP") == 0) {
+ Boolean areAuthenticated = True;
+
+ if (!requestIncludedSessionId) {
+ // No session id was present in the request.
+ // So create a new "RTSPClientSession" object for this request.
+
+ // But first, make sure that we're authenticated to perform this command:
+ char urlTotalSuffix[2*RTSP_PARAM_STRING_MAX];
+ // enough space for urlPreSuffix/urlSuffix'\0'
+ urlTotalSuffix[0] = '\0';
+ if (urlPreSuffix[0] != '\0') {
+ strcat(urlTotalSuffix, urlPreSuffix);
+ strcat(urlTotalSuffix, "/");
+ }
+ strcat(urlTotalSuffix, urlSuffix);
+ if (authenticationOK("SETUP", urlTotalSuffix, (char const*)fRequestBuffer)) {
+ clientSession
+ = (RTSPServer::RTSPClientSession*)fOurRTSPServer.createNewClientSessionWithId();
+ } else {
+ areAuthenticated = False;
+ }
+ }
+ if (clientSession != NULL) {
+ clientSession->handleCmd_SETUP(this, urlPreSuffix, urlSuffix, (char const*)fRequestBuffer);
+ playAfterSetup = clientSession->fStreamAfterSETUP;
+ } else if (areAuthenticated) {
+#ifdef DEBUG
+ fprintf(stderr, "Calling handleCmd_sessionNotFound() (case 2)\n");
+#endif
+ handleCmd_sessionNotFound();
+ }
+ } else if (strcmp(cmdName, "TEARDOWN") == 0
+ || strcmp(cmdName, "PLAY") == 0
+ || strcmp(cmdName, "PAUSE") == 0
+ || strcmp(cmdName, "GET_PARAMETER") == 0
+ || strcmp(cmdName, "SET_PARAMETER") == 0) {
+ if (clientSession != NULL) {
+ clientSession->handleCmd_withinSession(this, cmdName, urlPreSuffix, urlSuffix, (char const*)fRequestBuffer);
+ } else {
+#ifdef DEBUG
+ fprintf(stderr, "Calling handleCmd_sessionNotFound() (case 3)\n");
+#endif
+ handleCmd_sessionNotFound();
+ }
+ } else if (strcmp(cmdName, "REGISTER") == 0 || strcmp(cmdName, "DEREGISTER") == 0) {
+ // Because - unlike other commands - an implementation of this command needs
+ // the entire URL, we re-parse the command to get it:
+ char* url = strDupSize((char*)fRequestBuffer);
+ if (sscanf((char*)fRequestBuffer, "%*s %s", url) == 1) {
+ // Check for special command-specific parameters in a "Transport:" header:
+ Boolean reuseConnection, deliverViaTCP;
+ char* proxyURLSuffix;
+ parseTransportHeaderForREGISTER((const char*)fRequestBuffer, reuseConnection, deliverViaTCP, proxyURLSuffix);
+
+ handleCmd_REGISTER(cmdName, url, urlSuffix, (char const*)fRequestBuffer, reuseConnection, deliverViaTCP, proxyURLSuffix);
+ delete[] proxyURLSuffix;
+ } else {
+ handleCmd_bad();
+ }
+ delete[] url;
+ } else {
+ // The command is one that we don't handle:
+ handleCmd_notSupported();
+ }
+ } else {
+#ifdef DEBUG
+ fprintf(stderr, "parseRTSPRequestString() failed; checking now for HTTP commands (for RTSP-over-HTTP tunneling)...\n");
+#endif
+ // The request was not (valid) RTSP, but check for a special case: HTTP commands (for setting up RTSP-over-HTTP tunneling):
+ char sessionCookie[RTSP_PARAM_STRING_MAX];
+ char acceptStr[RTSP_PARAM_STRING_MAX];
+ *fLastCRLF = '\0'; // temporarily, for parsing
+ parseSucceeded = parseHTTPRequestString(cmdName, sizeof cmdName,
+ urlSuffix, sizeof urlPreSuffix,
+ sessionCookie, sizeof sessionCookie,
+ acceptStr, sizeof acceptStr);
+ *fLastCRLF = '\r';
+ if (parseSucceeded) {
+#ifdef DEBUG
+ fprintf(stderr, "parseHTTPRequestString() succeeded, returning cmdName \"%s\", urlSuffix \"%s\", sessionCookie \"%s\", acceptStr \"%s\"\n", cmdName, urlSuffix, sessionCookie, acceptStr);
+#endif
+ // Check that the HTTP command is valid for RTSP-over-HTTP tunneling: There must be a 'session cookie'.
+ Boolean isValidHTTPCmd = True;
+ if (strcmp(cmdName, "OPTIONS") == 0) {
+ handleHTTPCmd_OPTIONS();
+ } else if (sessionCookie[0] == '\0') {
+ // There was no "x-sessioncookie:" header. If there was an "Accept: application/x-rtsp-tunnelled" header,
+ // then this is a bad tunneling request. Otherwise, assume that it's an attempt to access the stream via HTTP.
+ if (strcmp(acceptStr, "application/x-rtsp-tunnelled") == 0) {
+ isValidHTTPCmd = False;
+ } else {
+ handleHTTPCmd_StreamingGET(urlSuffix, (char const*)fRequestBuffer);
+ }
+ } else if (strcmp(cmdName, "GET") == 0) {
+ handleHTTPCmd_TunnelingGET(sessionCookie);
+ } else if (strcmp(cmdName, "POST") == 0) {
+ // We might have received additional data following the HTTP "POST" command - i.e., the first Base64-encoded RTSP command.
+ // Check for this, and handle it if it exists:
+ unsigned char const* extraData = fLastCRLF+4;
+ unsigned extraDataSize = &fRequestBuffer[fRequestBytesAlreadySeen] - extraData;
+ if (handleHTTPCmd_TunnelingPOST(sessionCookie, extraData, extraDataSize)) {
+ // We don't respond to the "POST" command, and we go away:
+ fIsActive = False;
+ break;
+ }
+ } else {
+ isValidHTTPCmd = False;
+ }
+ if (!isValidHTTPCmd) {
+ handleHTTPCmd_notSupported();
+ }
+ } else {
+#ifdef DEBUG
+ fprintf(stderr, "parseHTTPRequestString() failed!\n");
+#endif
+ handleCmd_bad();
+ }
+ }
+
+#ifdef DEBUG
+ fprintf(stderr, "sending response: %s", fResponseBuffer);
+#endif
+ send(fClientOutputSocket, (char const*)fResponseBuffer, strlen((char*)fResponseBuffer), 0);
+
+ if (playAfterSetup) {
+ // The client has asked for streaming to commence now, rather than after a
+ // subsequent "PLAY" command. So, simulate the effect of a "PLAY" command:
+ clientSession->handleCmd_withinSession(this, "PLAY", urlPreSuffix, urlSuffix, (char const*)fRequestBuffer);
+ }
+
+ // Check whether there are extra bytes remaining in the buffer, after the end of the request (a rare case).
+ // If so, move them to the front of our buffer, and keep processing it, because it might be a following, pipelined request.
+ unsigned requestSize = (fLastCRLF+4-fRequestBuffer) + contentLength;
+ numBytesRemaining = fRequestBytesAlreadySeen - requestSize;
+ resetRequestBuffer(); // to prepare for any subsequent request
+
+ if (numBytesRemaining > 0) {
+ memmove(fRequestBuffer, &fRequestBuffer[requestSize], numBytesRemaining);
+ newBytesRead = numBytesRemaining;
+ }
+ } while (numBytesRemaining > 0);
+
+ --fRecursionCount;
+ if (!fIsActive) {
+ if (fRecursionCount > 0) closeSockets(); else delete this;
+ // Note: The "fRecursionCount" test is for a pathological situation where we reenter the event loop and get called recursively
+ // while handling a command (e.g., while handling a "DESCRIBE", to get a SDP description).
+ // In such a case we don't want to actually delete ourself until we leave the outermost call.
+ }
+}
+
+#define SKIP_WHITESPACE while (*fields != '\0' && (*fields == ' ' || *fields == '\t')) ++fields
+
+static Boolean parseAuthorizationHeader(char const* buf,
+ char const*& username,
+ char const*& realm,
+ char const*& nonce, char const*& uri,
+ char const*& response) {
+ // Initialize the result parameters to default values:
+ username = realm = nonce = uri = response = NULL;
+
+ // First, find "Authorization:"
+ while (1) {
+ if (*buf == '\0') return False; // not found
+ if (_strncasecmp(buf, "Authorization: Digest ", 22) == 0) break;
+ ++buf;
+ }
+
+ // Then, run through each of the fields, looking for ones we handle:
+ char const* fields = buf + 22;
+ char* parameter = strDupSize(fields);
+ char* value = strDupSize(fields);
+ char* p;
+ Boolean success;
+ do {
+ // Parse: <parameter>="<value>"
+ success = False;
+ parameter[0] = value[0] = '\0';
+ SKIP_WHITESPACE;
+ for (p = parameter; *fields != '\0' && *fields != ' ' && *fields != '\t' && *fields != '='; ) *p++ = *fields++;
+ SKIP_WHITESPACE;
+ if (*fields++ != '=') break; // parsing failed
+ *p = '\0'; // complete parsing <parameter>
+ SKIP_WHITESPACE;
+ if (*fields++ != '"') break; // parsing failed
+ for (p = value; *fields != '\0' && *fields != '"'; ) *p++ = *fields++;
+ if (*fields++ != '"') break; // parsing failed
+ *p = '\0'; // complete parsing <value>
+ SKIP_WHITESPACE;
+ success = True;
+
+ // Copy values for parameters that we understand:
+ if (strcmp(parameter, "username") == 0) {
+ username = strDup(value);
+ } else if (strcmp(parameter, "realm") == 0) {
+ realm = strDup(value);
+ } else if (strcmp(parameter, "nonce") == 0) {
+ nonce = strDup(value);
+ } else if (strcmp(parameter, "uri") == 0) {
+ uri = strDup(value);
+ } else if (strcmp(parameter, "response") == 0) {
+ response = strDup(value);
+ }
+
+ // Check for a ',', indicating that more <parameter>="<value>" pairs follow:
+ } while (*fields++ == ',');
+
+ delete[] parameter; delete[] value;
+ return success;
+}
+
+Boolean RTSPServer::RTSPClientConnection
+::authenticationOK(char const* cmdName, char const* urlSuffix, char const* fullRequestStr) {
+ if (!fOurRTSPServer.specialClientAccessCheck(fClientInputSocket, fClientAddr, urlSuffix)) {
+ setRTSPResponse("401 Unauthorized");
+ return False;
+ }
+
+ // If we weren't set up with an authentication database, we're OK:
+ UserAuthenticationDatabase* authDB = fOurRTSPServer.getAuthenticationDatabaseForCommand(cmdName);
+ if (authDB == NULL) return True;
+
+ char const* username = NULL; char const* realm = NULL; char const* nonce = NULL;
+ char const* uri = NULL; char const* response = NULL;
+ Boolean success = False;
+
+ do {
+ // To authenticate, we first need to have a nonce set up
+ // from a previous attempt:
+ if (fCurrentAuthenticator.nonce() == NULL) break;
+
+ // Next, the request needs to contain an "Authorization:" header,
+ // containing a username, (our) realm, (our) nonce, uri,
+ // and response string:
+ if (!parseAuthorizationHeader(fullRequestStr,
+ username, realm, nonce, uri, response)
+ || username == NULL
+ || realm == NULL || strcmp(realm, fCurrentAuthenticator.realm()) != 0
+ || nonce == NULL || strcmp(nonce, fCurrentAuthenticator.nonce()) != 0
+ || uri == NULL || response == NULL) {
+ break;
+ }
+
+ // Next, the username has to be known to us:
+ char const* password = authDB->lookupPassword(username);
+#ifdef DEBUG
+ fprintf(stderr, "lookupPassword(%s) returned password %s\n", username, password);
+#endif
+ if (password == NULL) break;
+ fCurrentAuthenticator.setUsernameAndPassword(username, password, authDB->passwordsAreMD5());
+
+ // Finally, compute a digest response from the information that we have,
+ // and compare it to the one that we were given:
+ char const* ourResponse
+ = fCurrentAuthenticator.computeDigestResponse(cmdName, uri);
+ success = (strcmp(ourResponse, response) == 0);
+ fCurrentAuthenticator.reclaimDigestResponse(ourResponse);
+ } while (0);
+
+ delete[] (char*)realm; delete[] (char*)nonce;
+ delete[] (char*)uri; delete[] (char*)response;
+
+ if (success) {
+ // The user has been authenticated.
+ // Now allow subclasses a chance to validate the user against the IP address and/or URL suffix.
+ if (!fOurRTSPServer.specialClientUserAccessCheck(fClientInputSocket, fClientAddr, urlSuffix, username)) {
+ // Note: We don't return a "WWW-Authenticate" header here, because the user is valid,
+ // even though the server has decided that they should not have access.
+ setRTSPResponse("401 Unauthorized");
+ delete[] (char*)username;
+ return False;
+ }
+ }
+ delete[] (char*)username;
+ if (success) return True;
+
+ // If we get here, we failed to authenticate the user.
+ // Send back a "401 Unauthorized" response, with a new random nonce:
+ fCurrentAuthenticator.setRealmAndRandomNonce(authDB->realm());
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "RTSP/1.0 401 Unauthorized\r\n"
+ "CSeq: %s\r\n"
+ "%s"
+ "WWW-Authenticate: Digest realm=\"%s\", nonce=\"%s\"\r\n\r\n",
+ fCurrentCSeq,
+ dateHeader(),
+ fCurrentAuthenticator.realm(), fCurrentAuthenticator.nonce());
+ return False;
+}
+
+void RTSPServer::RTSPClientConnection
+::setRTSPResponse(char const* responseStr) {
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "RTSP/1.0 %s\r\n"
+ "CSeq: %s\r\n"
+ "%s\r\n",
+ responseStr,
+ fCurrentCSeq,
+ dateHeader());
+}
+
+void RTSPServer::RTSPClientConnection
+::setRTSPResponse(char const* responseStr, u_int32_t sessionId) {
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "RTSP/1.0 %s\r\n"
+ "CSeq: %s\r\n"
+ "%s"
+ "Session: %08X\r\n\r\n",
+ responseStr,
+ fCurrentCSeq,
+ dateHeader(),
+ sessionId);
+}
+
+void RTSPServer::RTSPClientConnection
+::setRTSPResponse(char const* responseStr, char const* contentStr) {
+ if (contentStr == NULL) contentStr = "";
+ unsigned const contentLen = strlen(contentStr);
+
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "RTSP/1.0 %s\r\n"
+ "CSeq: %s\r\n"
+ "%s"
+ "Content-Length: %d\r\n\r\n"
+ "%s",
+ responseStr,
+ fCurrentCSeq,
+ dateHeader(),
+ contentLen,
+ contentStr);
+}
+
+void RTSPServer::RTSPClientConnection
+::setRTSPResponse(char const* responseStr, u_int32_t sessionId, char const* contentStr) {
+ if (contentStr == NULL) contentStr = "";
+ unsigned const contentLen = strlen(contentStr);
+
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "RTSP/1.0 %s\r\n"
+ "CSeq: %s\r\n"
+ "%s"
+ "Session: %08X\r\n"
+ "Content-Length: %d\r\n\r\n"
+ "%s",
+ responseStr,
+ fCurrentCSeq,
+ dateHeader(),
+ sessionId,
+ contentLen,
+ contentStr);
+}
+
+void RTSPServer::RTSPClientConnection
+::changeClientInputSocket(int newSocketNum, unsigned char const* extraData, unsigned extraDataSize) {
+ envir().taskScheduler().disableBackgroundHandling(fClientInputSocket);
+ fClientInputSocket = newSocketNum;
+ envir().taskScheduler().setBackgroundHandling(fClientInputSocket, SOCKET_READABLE|SOCKET_EXCEPTION,
+ incomingRequestHandler, this);
+
+ // Also write any extra data to our buffer, and handle it:
+ if (extraDataSize > 0 && extraDataSize <= fRequestBufferBytesLeft/*sanity check; should always be true*/) {
+ unsigned char* ptr = &fRequestBuffer[fRequestBytesAlreadySeen];
+ for (unsigned i = 0; i < extraDataSize; ++i) {
+ ptr[i] = extraData[i];
+ }
+ handleRequestBytes(extraDataSize);
+ }
+}
+
+
+////////// RTSPServer::RTSPClientSession implementation //////////
+
+RTSPServer::RTSPClientSession
+::RTSPClientSession(RTSPServer& ourServer, u_int32_t sessionId)
+ : GenericMediaServer::ClientSession(ourServer, sessionId),
+ fOurRTSPServer(ourServer), fIsMulticast(False), fStreamAfterSETUP(False),
+ fTCPStreamIdCount(0), fNumStreamStates(0), fStreamStates(NULL) {
+}
+
+RTSPServer::RTSPClientSession::~RTSPClientSession() {
+ reclaimStreamStates();
+}
+
+void RTSPServer::RTSPClientSession::deleteStreamByTrack(unsigned trackNum) {
+ if (trackNum >= fNumStreamStates) return; // sanity check; shouldn't happen
+ if (fStreamStates[trackNum].subsession != NULL) {
+ fStreamStates[trackNum].subsession->deleteStream(fOurSessionId, fStreamStates[trackNum].streamToken);
+ fStreamStates[trackNum].subsession = NULL;
+ }
+
+ // Optimization: If all subsessions have now been deleted, then we can delete ourself now:
+ Boolean noSubsessionsRemain = True;
+ for (unsigned i = 0; i < fNumStreamStates; ++i) {
+ if (fStreamStates[i].subsession != NULL) {
+ noSubsessionsRemain = False;
+ break;
+ }
+ }
+ if (noSubsessionsRemain) delete this;
+}
+
+void RTSPServer::RTSPClientSession::reclaimStreamStates() {
+ for (unsigned i = 0; i < fNumStreamStates; ++i) {
+ if (fStreamStates[i].subsession != NULL) {
+ fOurRTSPServer.unnoteTCPStreamingOnSocket(fStreamStates[i].tcpSocketNum, this, i);
+ fStreamStates[i].subsession->deleteStream(fOurSessionId, fStreamStates[i].streamToken);
+ }
+ }
+ delete[] fStreamStates; fStreamStates = NULL;
+ fNumStreamStates = 0;
+}
+
+typedef enum StreamingMode {
+ RTP_UDP,
+ RTP_TCP,
+ RAW_UDP
+} StreamingMode;
+
+static void parseTransportHeader(char const* buf,
+ StreamingMode& streamingMode,
+ char*& streamingModeString,
+ char*& destinationAddressStr,
+ u_int8_t& destinationTTL,
+ portNumBits& clientRTPPortNum, // if UDP
+ portNumBits& clientRTCPPortNum, // if UDP
+ unsigned char& rtpChannelId, // if TCP
+ unsigned char& rtcpChannelId // if TCP
+ ) {
+ // Initialize the result parameters to default values:
+ streamingMode = RTP_UDP;
+ streamingModeString = NULL;
+ destinationAddressStr = NULL;
+ destinationTTL = 255;
+ clientRTPPortNum = 0;
+ clientRTCPPortNum = 1;
+ rtpChannelId = rtcpChannelId = 0xFF;
+
+ portNumBits p1, p2;
+ unsigned ttl, rtpCid, rtcpCid;
+
+ // First, find "Transport:"
+ while (1) {
+ if (*buf == '\0') return; // not found
+ if (*buf == '\r' && *(buf+1) == '\n' && *(buf+2) == '\r') return; // end of the headers => not found
+ if (_strncasecmp(buf, "Transport:", 10) == 0) break;
+ ++buf;
+ }
+
+ // Then, run through each of the fields, looking for ones we handle:
+ char const* fields = buf + 10;
+ while (*fields == ' ') ++fields;
+ char* field = strDupSize(fields);
+ while (sscanf(fields, "%[^;\r\n]", field) == 1) {
+ if (strcmp(field, "RTP/AVP/TCP") == 0) {
+ streamingMode = RTP_TCP;
+ } else if (strcmp(field, "RAW/RAW/UDP") == 0 ||
+ strcmp(field, "MP2T/H2221/UDP") == 0) {
+ streamingMode = RAW_UDP;
+ streamingModeString = strDup(field);
+ } else if (_strncasecmp(field, "destination=", 12) == 0) {
+ delete[] destinationAddressStr;
+ destinationAddressStr = strDup(field+12);
+ } else if (sscanf(field, "ttl%u", &ttl) == 1) {
+ destinationTTL = (u_int8_t)ttl;
+ } else if (sscanf(field, "client_port=%hu-%hu", &p1, &p2) == 2) {
+ clientRTPPortNum = p1;
+ clientRTCPPortNum = streamingMode == RAW_UDP ? 0 : p2; // ignore the second port number if the client asked for raw UDP
+ } else if (sscanf(field, "client_port=%hu", &p1) == 1) {
+ clientRTPPortNum = p1;
+ clientRTCPPortNum = streamingMode == RAW_UDP ? 0 : p1 + 1;
+ } else if (sscanf(field, "interleaved=%u-%u", &rtpCid, &rtcpCid) == 2) {
+ rtpChannelId = (unsigned char)rtpCid;
+ rtcpChannelId = (unsigned char)rtcpCid;
+ }
+
+ fields += strlen(field);
+ while (*fields == ';' || *fields == ' ' || *fields == '\t') ++fields; // skip over separating ';' chars or whitespace
+ if (*fields == '\0' || *fields == '\r' || *fields == '\n') break;
+ }
+ delete[] field;
+}
+
+static Boolean parsePlayNowHeader(char const* buf) {
+ // Find "x-playNow:" header, if present
+ while (1) {
+ if (*buf == '\0') return False; // not found
+ if (_strncasecmp(buf, "x-playNow:", 10) == 0) break;
+ ++buf;
+ }
+
+ return True;
+}
+
+void RTSPServer::RTSPClientSession
+::handleCmd_SETUP(RTSPServer::RTSPClientConnection* ourClientConnection,
+ char const* urlPreSuffix, char const* urlSuffix, char const* fullRequestStr) {
+ // Normally, "urlPreSuffix" should be the session (stream) name, and "urlSuffix" should be the subsession (track) name.
+ // However (being "liberal in what we accept"), we also handle 'aggregate' SETUP requests (i.e., without a track name),
+ // in the special case where we have only a single track. I.e., in this case, we also handle:
+ // "urlPreSuffix" is empty and "urlSuffix" is the session (stream) name, or
+ // "urlPreSuffix" concatenated with "urlSuffix" (with "/" inbetween) is the session (stream) name.
+ char const* streamName = urlPreSuffix; // in the normal case
+ char const* trackId = urlSuffix; // in the normal case
+ char* concatenatedStreamName = NULL; // in the normal case
+
+ do {
+ // First, make sure the specified stream name exists:
+ ServerMediaSession* sms
+ = fOurServer.lookupServerMediaSession(streamName, fOurServerMediaSession == NULL);
+ if (sms == NULL) {
+ // Check for the special case (noted above), before we give up:
+ if (urlPreSuffix[0] == '\0') {
+ streamName = urlSuffix;
+ } else {
+ concatenatedStreamName = new char[strlen(urlPreSuffix) + strlen(urlSuffix) + 2]; // allow for the "/" and the trailing '\0'
+ sprintf(concatenatedStreamName, "%s/%s", urlPreSuffix, urlSuffix);
+ streamName = concatenatedStreamName;
+ }
+ trackId = NULL;
+
+ // Check again:
+ sms = fOurServer.lookupServerMediaSession(streamName, fOurServerMediaSession == NULL);
+ }
+ if (sms == NULL) {
+ if (fOurServerMediaSession == NULL) {
+ // The client asked for a stream that doesn't exist (and this session descriptor has not been used before):
+ ourClientConnection->handleCmd_notFound();
+ } else {
+ // The client asked for a stream that doesn't exist, but using a stream id for a stream that does exist. Bad request:
+ ourClientConnection->handleCmd_bad();
+ }
+ break;
+ } else {
+ if (fOurServerMediaSession == NULL) {
+ // We're accessing the "ServerMediaSession" for the first time.
+ fOurServerMediaSession = sms;
+ fOurServerMediaSession->incrementReferenceCount();
+ } else if (sms != fOurServerMediaSession) {
+ // The client asked for a stream that's different from the one originally requested for this stream id. Bad request:
+ ourClientConnection->handleCmd_bad();
+ break;
+ }
+ }
+
+ if (fStreamStates == NULL) {
+ // This is the first "SETUP" for this session. Set up our array of states for all of this session's subsessions (tracks):
+ fNumStreamStates = fOurServerMediaSession->numSubsessions();
+ fStreamStates = new struct streamState[fNumStreamStates];
+
+ ServerMediaSubsessionIterator iter(*fOurServerMediaSession);
+ ServerMediaSubsession* subsession;
+ for (unsigned i = 0; i < fNumStreamStates; ++i) {
+ subsession = iter.next();
+ fStreamStates[i].subsession = subsession;
+ fStreamStates[i].tcpSocketNum = -1; // for now; may get set for RTP-over-TCP streaming
+ fStreamStates[i].streamToken = NULL; // for now; it may be changed by the "getStreamParameters()" call that comes later
+ }
+ }
+
+ // Look up information for the specified subsession (track):
+ ServerMediaSubsession* subsession = NULL;
+ unsigned trackNum;
+ if (trackId != NULL && trackId[0] != '\0') { // normal case
+ for (trackNum = 0; trackNum < fNumStreamStates; ++trackNum) {
+ subsession = fStreamStates[trackNum].subsession;
+ if (subsession != NULL && strcmp(trackId, subsession->trackId()) == 0) break;
+ }
+ if (trackNum >= fNumStreamStates) {
+ // The specified track id doesn't exist, so this request fails:
+ ourClientConnection->handleCmd_notFound();
+ break;
+ }
+ } else {
+ // Weird case: there was no track id in the URL.
+ // This works only if we have only one subsession:
+ if (fNumStreamStates != 1 || fStreamStates[0].subsession == NULL) {
+ ourClientConnection->handleCmd_bad();
+ break;
+ }
+ trackNum = 0;
+ subsession = fStreamStates[trackNum].subsession;
+ }
+ // ASSERT: subsession != NULL
+
+ void*& token = fStreamStates[trackNum].streamToken; // alias
+ if (token != NULL) {
+ // We already handled a "SETUP" for this track (to the same client),
+ // so stop any existing streaming of it, before we set it up again:
+ subsession->pauseStream(fOurSessionId, token);
+ fOurRTSPServer.unnoteTCPStreamingOnSocket(fStreamStates[trackNum].tcpSocketNum, this, trackNum);
+ subsession->deleteStream(fOurSessionId, token);
+ }
+
+ // Look for a "Transport:" header in the request string, to extract client parameters:
+ StreamingMode streamingMode;
+ char* streamingModeString = NULL; // set when RAW_UDP streaming is specified
+ char* clientsDestinationAddressStr;
+ u_int8_t clientsDestinationTTL;
+ portNumBits clientRTPPortNum, clientRTCPPortNum;
+ unsigned char rtpChannelId, rtcpChannelId;
+ parseTransportHeader(fullRequestStr, streamingMode, streamingModeString,
+ clientsDestinationAddressStr, clientsDestinationTTL,
+ clientRTPPortNum, clientRTCPPortNum,
+ rtpChannelId, rtcpChannelId);
+ if ((streamingMode == RTP_TCP && rtpChannelId == 0xFF) ||
+ (streamingMode != RTP_TCP && ourClientConnection->fClientOutputSocket != ourClientConnection->fClientInputSocket)) {
+ // An anomolous situation, caused by a buggy client. Either:
+ // 1/ TCP streaming was requested, but with no "interleaving=" fields. (QuickTime Player sometimes does this.), or
+ // 2/ TCP streaming was not requested, but we're doing RTSP-over-HTTP tunneling (which implies TCP streaming).
+ // In either case, we assume TCP streaming, and set the RTP and RTCP channel ids to proper values:
+ streamingMode = RTP_TCP;
+ rtpChannelId = fTCPStreamIdCount; rtcpChannelId = fTCPStreamIdCount+1;
+ }
+ if (streamingMode == RTP_TCP) fTCPStreamIdCount += 2;
+
+ Port clientRTPPort(clientRTPPortNum);
+ Port clientRTCPPort(clientRTCPPortNum);
+
+ // Next, check whether a "Range:" or "x-playNow:" header is present in the request.
+ // This isn't legal, but some clients do this to combine "SETUP" and "PLAY":
+ double rangeStart = 0.0, rangeEnd = 0.0;
+ char* absStart = NULL; char* absEnd = NULL;
+ Boolean startTimeIsNow;
+ if (parseRangeHeader(fullRequestStr, rangeStart, rangeEnd, absStart, absEnd, startTimeIsNow)) {
+ delete[] absStart; delete[] absEnd;
+ fStreamAfterSETUP = True;
+ } else if (parsePlayNowHeader(fullRequestStr)) {
+ fStreamAfterSETUP = True;
+ } else {
+ fStreamAfterSETUP = False;
+ }
+
+ // Then, get server parameters from the 'subsession':
+ if (streamingMode == RTP_TCP) {
+ // Note that we'll be streaming over the RTSP TCP connection:
+ fStreamStates[trackNum].tcpSocketNum = ourClientConnection->fClientOutputSocket;
+ fOurRTSPServer.noteTCPStreamingOnSocket(fStreamStates[trackNum].tcpSocketNum, this, trackNum);
+ }
+ netAddressBits destinationAddress = 0;
+ u_int8_t destinationTTL = 255;
+#ifdef RTSP_ALLOW_CLIENT_DESTINATION_SETTING
+ if (clientsDestinationAddressStr != NULL) {
+ // Use the client-provided "destination" address.
+ // Note: This potentially allows the server to be used in denial-of-service
+ // attacks, so don't enable this code unless you're sure that clients are
+ // trusted.
+ destinationAddress = our_inet_addr(clientsDestinationAddressStr);
+ }
+ // Also use the client-provided TTL.
+ destinationTTL = clientsDestinationTTL;
+#endif
+ delete[] clientsDestinationAddressStr;
+ Port serverRTPPort(0);
+ Port serverRTCPPort(0);
+
+ // Make sure that we transmit on the same interface that's used by the client (in case we're a multi-homed server):
+ struct sockaddr_in sourceAddr; SOCKLEN_T namelen = sizeof sourceAddr;
+ getsockname(ourClientConnection->fClientInputSocket, (struct sockaddr*)&sourceAddr, &namelen);
+ netAddressBits origSendingInterfaceAddr = SendingInterfaceAddr;
+ netAddressBits origReceivingInterfaceAddr = ReceivingInterfaceAddr;
+ // NOTE: The following might not work properly, so we ifdef it out for now:
+#ifdef HACK_FOR_MULTIHOMED_SERVERS
+ ReceivingInterfaceAddr = SendingInterfaceAddr = sourceAddr.sin_addr.s_addr;
+#endif
+
+ subsession->getStreamParameters(fOurSessionId, ourClientConnection->fClientAddr.sin_addr.s_addr,
+ clientRTPPort, clientRTCPPort,
+ fStreamStates[trackNum].tcpSocketNum, rtpChannelId, rtcpChannelId,
+ destinationAddress, destinationTTL, fIsMulticast,
+ serverRTPPort, serverRTCPPort,
+ fStreamStates[trackNum].streamToken);
+ SendingInterfaceAddr = origSendingInterfaceAddr;
+ ReceivingInterfaceAddr = origReceivingInterfaceAddr;
+
+ AddressString destAddrStr(destinationAddress);
+ AddressString sourceAddrStr(sourceAddr);
+ char timeoutParameterString[100];
+ if (fOurRTSPServer.fReclamationSeconds > 0) {
+ sprintf(timeoutParameterString, ";timeout=%u", fOurRTSPServer.fReclamationSeconds);
+ } else {
+ timeoutParameterString[0] = '\0';
+ }
+ if (fIsMulticast) {
+ switch (streamingMode) {
+ case RTP_UDP: {
+ snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer,
+ "RTSP/1.0 200 OK\r\n"
+ "CSeq: %s\r\n"
+ "%s"
+ "Transport: RTP/AVP;multicast;destination=%s;source=%s;port=%d-%d;ttl=%d\r\n"
+ "Session: %08X%s\r\n\r\n",
+ ourClientConnection->fCurrentCSeq,
+ dateHeader(),
+ destAddrStr.val(), sourceAddrStr.val(), ntohs(serverRTPPort.num()), ntohs(serverRTCPPort.num()), destinationTTL,
+ fOurSessionId, timeoutParameterString);
+ break;
+ }
+ case RTP_TCP: {
+ // multicast streams can't be sent via TCP
+ ourClientConnection->handleCmd_unsupportedTransport();
+ break;
+ }
+ case RAW_UDP: {
+ snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer,
+ "RTSP/1.0 200 OK\r\n"
+ "CSeq: %s\r\n"
+ "%s"
+ "Transport: %s;multicast;destination=%s;source=%s;port=%d;ttl=%d\r\n"
+ "Session: %08X%s\r\n\r\n",
+ ourClientConnection->fCurrentCSeq,
+ dateHeader(),
+ streamingModeString, destAddrStr.val(), sourceAddrStr.val(), ntohs(serverRTPPort.num()), destinationTTL,
+ fOurSessionId, timeoutParameterString);
+ break;
+ }
+ }
+ } else {
+ switch (streamingMode) {
+ case RTP_UDP: {
+ snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer,
+ "RTSP/1.0 200 OK\r\n"
+ "CSeq: %s\r\n"
+ "%s"
+ "Transport: RTP/AVP;unicast;destination=%s;source=%s;client_port=%d-%d;server_port=%d-%d\r\n"
+ "Session: %08X%s\r\n\r\n",
+ ourClientConnection->fCurrentCSeq,
+ dateHeader(),
+ destAddrStr.val(), sourceAddrStr.val(), ntohs(clientRTPPort.num()), ntohs(clientRTCPPort.num()), ntohs(serverRTPPort.num()), ntohs(serverRTCPPort.num()),
+ fOurSessionId, timeoutParameterString);
+ break;
+ }
+ case RTP_TCP: {
+ if (!fOurRTSPServer.fAllowStreamingRTPOverTCP) {
+ ourClientConnection->handleCmd_unsupportedTransport();
+ } else {
+ snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer,
+ "RTSP/1.0 200 OK\r\n"
+ "CSeq: %s\r\n"
+ "%s"
+ "Transport: RTP/AVP/TCP;unicast;destination=%s;source=%s;interleaved=%d-%d\r\n"
+ "Session: %08X%s\r\n\r\n",
+ ourClientConnection->fCurrentCSeq,
+ dateHeader(),
+ destAddrStr.val(), sourceAddrStr.val(), rtpChannelId, rtcpChannelId,
+ fOurSessionId, timeoutParameterString);
+ }
+ break;
+ }
+ case RAW_UDP: {
+ snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer,
+ "RTSP/1.0 200 OK\r\n"
+ "CSeq: %s\r\n"
+ "%s"
+ "Transport: %s;unicast;destination=%s;source=%s;client_port=%d;server_port=%d\r\n"
+ "Session: %08X%s\r\n\r\n",
+ ourClientConnection->fCurrentCSeq,
+ dateHeader(),
+ streamingModeString, destAddrStr.val(), sourceAddrStr.val(), ntohs(clientRTPPort.num()), ntohs(serverRTPPort.num()),
+ fOurSessionId, timeoutParameterString);
+ break;
+ }
+ }
+ }
+ delete[] streamingModeString;
+ } while (0);
+
+ delete[] concatenatedStreamName;
+}
+
+void RTSPServer::RTSPClientSession
+::handleCmd_withinSession(RTSPServer::RTSPClientConnection* ourClientConnection,
+ char const* cmdName,
+ char const* urlPreSuffix, char const* urlSuffix,
+ char const* fullRequestStr) {
+ // This will either be:
+ // - a non-aggregated operation, if "urlPreSuffix" is the session (stream)
+ // name and "urlSuffix" is the subsession (track) name, or
+ // - an aggregated operation, if "urlSuffix" is the session (stream) name,
+ // or "urlPreSuffix" is the session (stream) name, and "urlSuffix" is empty,
+ // or "urlPreSuffix" and "urlSuffix" are both nonempty, but when concatenated, (with "/") form the session (stream) name.
+ // Begin by figuring out which of these it is:
+ ServerMediaSubsession* subsession;
+
+ if (fOurServerMediaSession == NULL) { // There wasn't a previous SETUP!
+ ourClientConnection->handleCmd_notSupported();
+ return;
+ } else if (urlSuffix[0] != '\0' && strcmp(fOurServerMediaSession->streamName(), urlPreSuffix) == 0) {
+ // Non-aggregated operation.
+ // Look up the media subsession whose track id is "urlSuffix":
+ ServerMediaSubsessionIterator iter(*fOurServerMediaSession);
+ while ((subsession = iter.next()) != NULL) {
+ if (strcmp(subsession->trackId(), urlSuffix) == 0) break; // success
+ }
+ if (subsession == NULL) { // no such track!
+ ourClientConnection->handleCmd_notFound();
+ return;
+ }
+ } else if (strcmp(fOurServerMediaSession->streamName(), urlSuffix) == 0 ||
+ (urlSuffix[0] == '\0' && strcmp(fOurServerMediaSession->streamName(), urlPreSuffix) == 0)) {
+ // Aggregated operation
+ subsession = NULL;
+ } else if (urlPreSuffix[0] != '\0' && urlSuffix[0] != '\0') {
+ // Aggregated operation, if <urlPreSuffix>/<urlSuffix> is the session (stream) name:
+ unsigned const urlPreSuffixLen = strlen(urlPreSuffix);
+ if (strncmp(fOurServerMediaSession->streamName(), urlPreSuffix, urlPreSuffixLen) == 0 &&
+ fOurServerMediaSession->streamName()[urlPreSuffixLen] == '/' &&
+ strcmp(&(fOurServerMediaSession->streamName())[urlPreSuffixLen+1], urlSuffix) == 0) {
+ subsession = NULL;
+ } else {
+ ourClientConnection->handleCmd_notFound();
+ return;
+ }
+ } else { // the request doesn't match a known stream and/or track at all!
+ ourClientConnection->handleCmd_notFound();
+ return;
+ }
+
+ if (strcmp(cmdName, "TEARDOWN") == 0) {
+ handleCmd_TEARDOWN(ourClientConnection, subsession);
+ } else if (strcmp(cmdName, "PLAY") == 0) {
+ handleCmd_PLAY(ourClientConnection, subsession, fullRequestStr);
+ } else if (strcmp(cmdName, "PAUSE") == 0) {
+ handleCmd_PAUSE(ourClientConnection, subsession);
+ } else if (strcmp(cmdName, "GET_PARAMETER") == 0) {
+ handleCmd_GET_PARAMETER(ourClientConnection, subsession, fullRequestStr);
+ } else if (strcmp(cmdName, "SET_PARAMETER") == 0) {
+ handleCmd_SET_PARAMETER(ourClientConnection, subsession, fullRequestStr);
+ }
+}
+
+void RTSPServer::RTSPClientSession
+::handleCmd_TEARDOWN(RTSPServer::RTSPClientConnection* ourClientConnection,
+ ServerMediaSubsession* subsession) {
+ unsigned i;
+ for (i = 0; i < fNumStreamStates; ++i) {
+ if (subsession == NULL /* means: aggregated operation */
+ || subsession == fStreamStates[i].subsession) {
+ if (fStreamStates[i].subsession != NULL) {
+ fOurRTSPServer.unnoteTCPStreamingOnSocket(fStreamStates[i].tcpSocketNum, this, i);
+ fStreamStates[i].subsession->deleteStream(fOurSessionId, fStreamStates[i].streamToken);
+ fStreamStates[i].subsession = NULL;
+ }
+ }
+ }
+
+ setRTSPResponse(ourClientConnection, "200 OK");
+
+ // Optimization: If all subsessions have now been torn down, then we know that we can reclaim our object now.
+ // (Without this optimization, however, this object would still get reclaimed later, as a result of a 'liveness' timeout.)
+ Boolean noSubsessionsRemain = True;
+ for (i = 0; i < fNumStreamStates; ++i) {
+ if (fStreamStates[i].subsession != NULL) {
+ noSubsessionsRemain = False;
+ break;
+ }
+ }
+ if (noSubsessionsRemain) delete this;
+}
+
+void RTSPServer::RTSPClientSession
+::handleCmd_PLAY(RTSPServer::RTSPClientConnection* ourClientConnection,
+ ServerMediaSubsession* subsession, char const* fullRequestStr) {
+ char* rtspURL
+ = fOurRTSPServer.rtspURL(fOurServerMediaSession, ourClientConnection->fClientInputSocket);
+ unsigned rtspURLSize = strlen(rtspURL);
+
+ // Parse the client's "Scale:" header, if any:
+ float scale;
+ Boolean sawScaleHeader = parseScaleHeader(fullRequestStr, scale);
+
+ // Try to set the stream's scale factor to this value:
+ if (subsession == NULL /*aggregate op*/) {
+ fOurServerMediaSession->testScaleFactor(scale);
+ } else {
+ subsession->testScaleFactor(scale);
+ }
+
+ char buf[100];
+ char* scaleHeader;
+ if (!sawScaleHeader) {
+ buf[0] = '\0'; // Because we didn't see a Scale: header, don't send one back
+ } else {
+ sprintf(buf, "Scale: %f\r\n", scale);
+ }
+ scaleHeader = strDup(buf);
+
+ // Parse the client's "Range:" header, if any:
+ float duration = 0.0;
+ double rangeStart = 0.0, rangeEnd = 0.0;
+ char* absStart = NULL; char* absEnd = NULL;
+ Boolean startTimeIsNow;
+ Boolean sawRangeHeader
+ = parseRangeHeader(fullRequestStr, rangeStart, rangeEnd, absStart, absEnd, startTimeIsNow);
+
+ if (sawRangeHeader && absStart == NULL/*not seeking by 'absolute' time*/) {
+ // Use this information, plus the stream's duration (if known), to create our own "Range:" header, for the response:
+ duration = subsession == NULL /*aggregate op*/
+ ? fOurServerMediaSession->duration() : subsession->duration();
+ if (duration < 0.0) {
+ // We're an aggregate PLAY, but the subsessions have different durations.
+ // Use the largest of these durations in our header
+ duration = -duration;
+ }
+
+ // Make sure that "rangeStart" and "rangeEnd" (from the client's "Range:" header)
+ // have sane values, before we send back our own "Range:" header in our response:
+ if (rangeStart < 0.0) rangeStart = 0.0;
+ else if (rangeStart > duration) rangeStart = duration;
+ if (rangeEnd < 0.0) rangeEnd = 0.0;
+ else if (rangeEnd > duration) rangeEnd = duration;
+ if ((scale > 0.0 && rangeStart > rangeEnd && rangeEnd > 0.0) ||
+ (scale < 0.0 && rangeStart < rangeEnd)) {
+ // "rangeStart" and "rangeEnd" were the wrong way around; swap them:
+ double tmp = rangeStart;
+ rangeStart = rangeEnd;
+ rangeEnd = tmp;
+ }
+ }
+
+ // Create a "RTP-Info:" line. It will get filled in from each subsession's state:
+ char const* rtpInfoFmt =
+ "%s" // "RTP-Info:", plus any preceding rtpInfo items
+ "%s" // comma separator, if needed
+ "url=%s/%s"
+ ";seq=%d"
+ ";rtptime=%u"
+ ;
+ unsigned rtpInfoFmtSize = strlen(rtpInfoFmt);
+ char* rtpInfo = strDup("RTP-Info: ");
+ unsigned i, numRTPInfoItems = 0;
+
+ // Do any required seeking/scaling on each subsession, before starting streaming.
+ // (However, we don't do this if the "PLAY" request was for just a single subsession
+ // of a multiple-subsession stream; for such streams, seeking/scaling can be done
+ // only with an aggregate "PLAY".)
+ for (i = 0; i < fNumStreamStates; ++i) {
+ if (subsession == NULL /* means: aggregated operation */ || fNumStreamStates == 1) {
+ if (fStreamStates[i].subsession != NULL) {
+ if (sawScaleHeader) {
+ fStreamStates[i].subsession->setStreamScale(fOurSessionId, fStreamStates[i].streamToken, scale);
+ }
+ if (absStart != NULL) {
+ // Special case handling for seeking by 'absolute' time:
+
+ fStreamStates[i].subsession->seekStream(fOurSessionId, fStreamStates[i].streamToken, absStart, absEnd);
+ } else {
+ // Seeking by relative (NPT) time:
+
+ u_int64_t numBytes;
+ if (!sawRangeHeader || startTimeIsNow) {
+ // We're resuming streaming without seeking, so we just do a 'null' seek
+ // (to get our NPT, and to specify when to end streaming):
+ fStreamStates[i].subsession->nullSeekStream(fOurSessionId, fStreamStates[i].streamToken,
+ rangeEnd, numBytes);
+ } else {
+ // We do a real 'seek':
+ double streamDuration = 0.0; // by default; means: stream until the end of the media
+ if (rangeEnd > 0.0 && (rangeEnd+0.001) < duration) {
+ // the 0.001 is because we limited the values to 3 decimal places
+ // We want the stream to end early. Set the duration we want:
+ streamDuration = rangeEnd - rangeStart;
+ if (streamDuration < 0.0) streamDuration = -streamDuration;
+ // should happen only if scale < 0.0
+ }
+ fStreamStates[i].subsession->seekStream(fOurSessionId, fStreamStates[i].streamToken,
+ rangeStart, streamDuration, numBytes);
+ }
+ }
+ }
+ }
+ }
+
+ // Create the "Range:" header that we'll send back in our response.
+ // (Note that we do this after seeking, in case the seeking operation changed the range start time.)
+ if (absStart != NULL) {
+ // We're seeking by 'absolute' time:
+ if (absEnd == NULL) {
+ sprintf(buf, "Range: clock=%s-\r\n", absStart);
+ } else {
+ sprintf(buf, "Range: clock=%s-%s\r\n", absStart, absEnd);
+ }
+ delete[] absStart; delete[] absEnd;
+ } else {
+ // We're seeking by relative (NPT) time:
+ if (!sawRangeHeader || startTimeIsNow) {
+ // We didn't seek, so in our response, begin the range with the current NPT (normal play time):
+ float curNPT = 0.0;
+ for (i = 0; i < fNumStreamStates; ++i) {
+ if (subsession == NULL /* means: aggregated operation */
+ || subsession == fStreamStates[i].subsession) {
+ if (fStreamStates[i].subsession == NULL) continue;
+ float npt = fStreamStates[i].subsession->getCurrentNPT(fStreamStates[i].streamToken);
+ if (npt > curNPT) curNPT = npt;
+ // Note: If this is an aggregate "PLAY" on a multi-subsession stream,
+ // then it's conceivable that the NPTs of each subsession may differ
+ // (if there has been a previous seek on just one subsession).
+ // In this (unusual) case, we just return the largest NPT; I hope that turns out OK...
+ }
+ }
+ rangeStart = curNPT;
+ }
+
+ if (rangeEnd == 0.0 && scale >= 0.0) {
+ sprintf(buf, "Range: npt=%.3f-\r\n", rangeStart);
+ } else {
+ sprintf(buf, "Range: npt=%.3f-%.3f\r\n", rangeStart, rangeEnd);
+ }
+ }
+ char* rangeHeader = strDup(buf);
+
+ // Now, start streaming:
+ for (i = 0; i < fNumStreamStates; ++i) {
+ if (subsession == NULL /* means: aggregated operation */
+ || subsession == fStreamStates[i].subsession) {
+ unsigned short rtpSeqNum = 0;
+ unsigned rtpTimestamp = 0;
+ if (fStreamStates[i].subsession == NULL) continue;
+ fStreamStates[i].subsession->startStream(fOurSessionId,
+ fStreamStates[i].streamToken,
+ (TaskFunc*)noteClientLiveness, this,
+ rtpSeqNum, rtpTimestamp,
+ RTSPServer::RTSPClientConnection::handleAlternativeRequestByte, ourClientConnection);
+ const char *urlSuffix = fStreamStates[i].subsession->trackId();
+ char* prevRTPInfo = rtpInfo;
+ unsigned rtpInfoSize = rtpInfoFmtSize
+ + strlen(prevRTPInfo)
+ + 1
+ + rtspURLSize + strlen(urlSuffix)
+ + 5 /*max unsigned short len*/
+ + 10 /*max unsigned (32-bit) len*/
+ + 2 /*allows for trailing \r\n at final end of string*/;
+ rtpInfo = new char[rtpInfoSize];
+ sprintf(rtpInfo, rtpInfoFmt,
+ prevRTPInfo,
+ numRTPInfoItems++ == 0 ? "" : ",",
+ rtspURL, urlSuffix,
+ rtpSeqNum,
+ rtpTimestamp
+ );
+ delete[] prevRTPInfo;
+ }
+ }
+ if (numRTPInfoItems == 0) {
+ rtpInfo[0] = '\0';
+ } else {
+ unsigned rtpInfoLen = strlen(rtpInfo);
+ rtpInfo[rtpInfoLen] = '\r';
+ rtpInfo[rtpInfoLen+1] = '\n';
+ rtpInfo[rtpInfoLen+2] = '\0';
+ }
+
+ // Fill in the response:
+ snprintf((char*)ourClientConnection->fResponseBuffer, sizeof ourClientConnection->fResponseBuffer,
+ "RTSP/1.0 200 OK\r\n"
+ "CSeq: %s\r\n"
+ "%s"
+ "%s"
+ "%s"
+ "Session: %08X\r\n"
+ "%s\r\n",
+ ourClientConnection->fCurrentCSeq,
+ dateHeader(),
+ scaleHeader,
+ rangeHeader,
+ fOurSessionId,
+ rtpInfo);
+ delete[] rtpInfo; delete[] rangeHeader;
+ delete[] scaleHeader; delete[] rtspURL;
+}
+
+void RTSPServer::RTSPClientSession
+::handleCmd_PAUSE(RTSPServer::RTSPClientConnection* ourClientConnection,
+ ServerMediaSubsession* subsession) {
+ for (unsigned i = 0; i < fNumStreamStates; ++i) {
+ if (subsession == NULL /* means: aggregated operation */
+ || subsession == fStreamStates[i].subsession) {
+ if (fStreamStates[i].subsession != NULL) {
+ fStreamStates[i].subsession->pauseStream(fOurSessionId, fStreamStates[i].streamToken);
+ }
+ }
+ }
+
+ setRTSPResponse(ourClientConnection, "200 OK", fOurSessionId);
+}
+
+void RTSPServer::RTSPClientSession
+::handleCmd_GET_PARAMETER(RTSPServer::RTSPClientConnection* ourClientConnection,
+ ServerMediaSubsession* /*subsession*/, char const* /*fullRequestStr*/) {
+ // By default, we implement "GET_PARAMETER" just as a 'keep alive', and send back a dummy response.
+ // (If you want to handle "GET_PARAMETER" properly, you can do so by defining a subclass of "RTSPServer"
+ // and "RTSPServer::RTSPClientSession", and then reimplement this virtual function in your subclass.)
+ setRTSPResponse(ourClientConnection, "200 OK", fOurSessionId, LIVEMEDIA_LIBRARY_VERSION_STRING);
+}
+
+void RTSPServer::RTSPClientSession
+::handleCmd_SET_PARAMETER(RTSPServer::RTSPClientConnection* ourClientConnection,
+ ServerMediaSubsession* /*subsession*/, char const* /*fullRequestStr*/) {
+ // By default, we implement "SET_PARAMETER" just as a 'keep alive', and send back an empty response.
+ // (If you want to handle "SET_PARAMETER" properly, you can do so by defining a subclass of "RTSPServer"
+ // and "RTSPServer::RTSPClientSession", and then reimplement this virtual function in your subclass.)
+ setRTSPResponse(ourClientConnection, "200 OK", fOurSessionId);
+}
+
+GenericMediaServer::ClientConnection*
+RTSPServer::createNewClientConnection(int clientSocket, struct sockaddr_in clientAddr) {
+ return new RTSPClientConnection(*this, clientSocket, clientAddr);
+}
+
+GenericMediaServer::ClientSession*
+RTSPServer::createNewClientSession(u_int32_t sessionId) {
+ return new RTSPClientSession(*this, sessionId);
+}
diff --git a/liveMedia/RTSPServerRegister.cpp b/liveMedia/RTSPServerRegister.cpp
new file mode 100644
index 0000000..77fd852
--- /dev/null
+++ b/liveMedia/RTSPServerRegister.cpp
@@ -0,0 +1,431 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A RTSP server
+// Implementation of functionality related to the "REGISTER" and "DEREGISTER" commands
+
+#include "RTSPServer.hh"
+#include "RTSPCommon.hh"
+#include "RTSPRegisterSender.hh"
+#include "ProxyServerMediaSession.hh"
+#include "GroupsockHelper.hh"
+
+////////// Implementation of "RTSPServer::registerStream()": //////////
+
+static void rtspRegisterResponseHandler(RTSPClient* rtspClient, int resultCode, char* resultString); // forward
+
+// A class that represents the state of a "REGISTER" request in progress:
+class RegisterRequestRecord: public RTSPRegisterSender {
+public:
+ RegisterRequestRecord(RTSPServer& ourServer, unsigned requestId,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToRegister,
+ RTSPServer::responseHandlerForREGISTER* responseHandler, Authenticator* authenticator,
+ Boolean requestStreamingViaTCP, char const* proxyURLSuffix)
+ : RTSPRegisterSender(ourServer.envir(), remoteClientNameOrAddress, remoteClientPortNum, rtspURLToRegister,
+ rtspRegisterResponseHandler, authenticator,
+ requestStreamingViaTCP, proxyURLSuffix, True/*reuseConnection*/,
+#ifdef DEBUG
+ 1/*verbosityLevel*/,
+#else
+ 0/*verbosityLevel*/,
+#endif
+ NULL),
+ fOurServer(ourServer), fRequestId(requestId), fResponseHandler(responseHandler) {
+ // Add ourself to our server's 'pending REGISTER or DEREGISTER requests' table:
+ ourServer.fPendingRegisterOrDeregisterRequests->Add((char const*)this, this);
+ }
+
+ virtual ~RegisterRequestRecord() {
+ // Remove ourself from the server's 'pending REGISTER or DEREGISTER requests' hash table before we go:
+ fOurServer.fPendingRegisterOrDeregisterRequests->Remove((char const*)this);
+ }
+
+ void handleResponse(int resultCode, char* resultString) {
+ if (resultCode == 0) {
+ // The "REGISTER" request succeeded, so use the still-open RTSP socket to await incoming commands from the remote endpoint:
+ int sock;
+ struct sockaddr_in remoteAddress;
+
+ grabConnection(sock, remoteAddress);
+ if (sock >= 0) {
+ increaseSendBufferTo(envir(), sock, 50*1024); // in anticipation of streaming over it
+ (void)fOurServer.createNewClientConnection(sock, remoteAddress);
+ }
+ }
+
+ if (fResponseHandler != NULL) {
+ // Call our (REGISTER-specific) response handler now:
+ (*fResponseHandler)(&fOurServer, fRequestId, resultCode, resultString);
+ } else {
+ // We need to delete[] "resultString" before we leave:
+ delete[] resultString;
+ }
+
+ // We're completely done with the REGISTER command now, so delete ourself now:
+ Medium::close(this);
+ }
+
+private:
+ RTSPServer& fOurServer;
+ unsigned fRequestId;
+ RTSPServer::responseHandlerForREGISTER* fResponseHandler;
+};
+
+static void rtspRegisterResponseHandler(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ RegisterRequestRecord* registerRequestRecord = (RegisterRequestRecord*)rtspClient;
+
+ registerRequestRecord->handleResponse(resultCode, resultString);
+}
+
+unsigned RTSPServer::registerStream(ServerMediaSession* serverMediaSession,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum,
+ responseHandlerForREGISTER* responseHandler,
+ char const* username, char const* password,
+ Boolean receiveOurStreamViaTCP, char const* proxyURLSuffix) {
+ // Create a new "RegisterRequestRecord" that will send the "REGISTER" command.
+ // (This object will automatically get deleted after we get a response to the "REGISTER" command, or if we're deleted.)
+ Authenticator* authenticator = NULL;
+ if (username != NULL) {
+ if (password == NULL) password = "";
+ authenticator = new Authenticator(username, password);
+ }
+ unsigned requestId = ++fRegisterOrDeregisterRequestCounter;
+ char const* url = rtspURL(serverMediaSession);
+ new RegisterRequestRecord(*this, requestId,
+ remoteClientNameOrAddress, remoteClientPortNum, url,
+ responseHandler, authenticator,
+ receiveOurStreamViaTCP, proxyURLSuffix);
+
+ delete[] (char*)url; // we can do this here because it was copied to the "RegisterRequestRecord"
+ delete authenticator; // ditto
+ return requestId;
+}
+
+////////// Implementation of "RTSPServer::deregisterStream()": //////////
+
+static void rtspDeregisterResponseHandler(RTSPClient* rtspClient, int resultCode, char* resultString); // forward
+
+// A class that represents the state of a "DEREGISTER" request in progress:
+class DeregisterRequestRecord: public RTSPDeregisterSender {
+public:
+ DeregisterRequestRecord(RTSPServer& ourServer, unsigned requestId,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToDeregister,
+ RTSPServer::responseHandlerForDEREGISTER* responseHandler, Authenticator* authenticator,
+ char const* proxyURLSuffix)
+ : RTSPDeregisterSender(ourServer.envir(), remoteClientNameOrAddress, remoteClientPortNum, rtspURLToDeregister,
+ rtspDeregisterResponseHandler, authenticator, proxyURLSuffix,
+#ifdef DEBUG
+ 1/*verbosityLevel*/,
+#else
+ 0/*verbosityLevel*/,
+#endif
+ NULL),
+ fOurServer(ourServer), fRequestId(requestId), fResponseHandler(responseHandler) {
+ // Add ourself to our server's 'pending REGISTER or DEREGISTER requests' table:
+ ourServer.fPendingRegisterOrDeregisterRequests->Add((char const*)this, this);
+ }
+
+ virtual ~DeregisterRequestRecord() {
+ // Remove ourself from the server's 'pending REGISTER or DEREGISTER requests' hash table before we go:
+ fOurServer.fPendingRegisterOrDeregisterRequests->Remove((char const*)this);
+ }
+
+ void handleResponse(int resultCode, char* resultString) {
+ if (fResponseHandler != NULL) {
+ // Call our (DEREGISTER-specific) response handler now:
+ (*fResponseHandler)(&fOurServer, fRequestId, resultCode, resultString);
+ } else {
+ // We need to delete[] "resultString" before we leave:
+ delete[] resultString;
+ }
+
+ // We're completely done with the DEREGISTER command now, so delete ourself now:
+ Medium::close(this);
+ }
+
+private:
+ RTSPServer& fOurServer;
+ unsigned fRequestId;
+ RTSPServer::responseHandlerForDEREGISTER* fResponseHandler;
+};
+
+static void rtspDeregisterResponseHandler(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ DeregisterRequestRecord* deregisterRequestRecord = (DeregisterRequestRecord*)rtspClient;
+
+ deregisterRequestRecord->handleResponse(resultCode, resultString);
+}
+
+unsigned RTSPServer::deregisterStream(ServerMediaSession* serverMediaSession,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum,
+ responseHandlerForDEREGISTER* responseHandler,
+ char const* username, char const* password,
+ char const* proxyURLSuffix) {
+ // Create a new "DeregisterRequestRecord" that will send the "DEREGISTER" command.
+ // (This object will automatically get deleted after we get a response to the "DEREGISTER" command, or if we're deleted.)
+ Authenticator* authenticator = NULL;
+ if (username != NULL) {
+ if (password == NULL) password = "";
+ authenticator = new Authenticator(username, password);
+ }
+ unsigned requestId = ++fRegisterOrDeregisterRequestCounter;
+ char const* url = rtspURL(serverMediaSession);
+ new DeregisterRequestRecord(*this, requestId,
+ remoteClientNameOrAddress, remoteClientPortNum, url,
+ responseHandler, authenticator,
+ proxyURLSuffix);
+
+ delete[] (char*)url; // we can do this here because it was copied to the "DeregisterRequestRecord"
+ delete authenticator; // ditto
+ return requestId;
+}
+
+Boolean RTSPServer::weImplementREGISTER(char const* /*cmd*//*"REGISTER" or "DEREGISTER"*/,
+ char const* /*proxyURLSuffix*/, char*& responseStr) {
+ // By default, servers do not implement our custom "REGISTER"/"DEREGISTER" commands:
+ responseStr = NULL;
+ return False;
+}
+
+void RTSPServer::implementCmd_REGISTER(char const* /*cmd*//*"REGISTER" or "DEREGISTER"*/,
+ char const* /*url*/, char const* /*urlSuffix*/, int /*socketToRemoteServer*/,
+ Boolean /*deliverViaTCP*/, char const* /*proxyURLSuffix*/) {
+ // By default, this function is a 'noop'
+}
+
+// Special mechanism for handling our custom "REGISTER" command:
+
+RTSPServer::RTSPClientConnection::ParamsForREGISTER
+::ParamsForREGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ RTSPServer::RTSPClientConnection* ourConnection, char const* url, char const* urlSuffix,
+ Boolean reuseConnection, Boolean deliverViaTCP, char const* proxyURLSuffix)
+ : fCmd(strDup(cmd)), fOurConnection(ourConnection), fURL(strDup(url)), fURLSuffix(strDup(urlSuffix)),
+ fReuseConnection(reuseConnection), fDeliverViaTCP(deliverViaTCP), fProxyURLSuffix(strDup(proxyURLSuffix)) {
+}
+
+RTSPServer::RTSPClientConnection::ParamsForREGISTER::~ParamsForREGISTER() {
+ delete[] (char*)fCmd; delete[] fURL; delete[] fURLSuffix; delete[] fProxyURLSuffix;
+}
+
+#define DELAY_USECS_AFTER_REGISTER_RESPONSE 100000 /*100ms*/
+
+void RTSPServer
+::RTSPClientConnection::handleCmd_REGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* url, char const* urlSuffix, char const* fullRequestStr,
+ Boolean reuseConnection, Boolean deliverViaTCP, char const* proxyURLSuffix) {
+ char* responseStr;
+ if (fOurRTSPServer.weImplementREGISTER(cmd, proxyURLSuffix, responseStr)) {
+ // The "REGISTER"/"DEREGISTER" command - if we implement it - may require access control:
+ if (!authenticationOK(cmd, urlSuffix, fullRequestStr)) return;
+
+ // We implement the "REGISTER"/"DEREGISTER" command by first replying to it, then actually
+ // handling it (in a separate event-loop task, that will get called after the reply has
+ // been done).
+ // Hack: If we're going to reuse the command's connection for subsequent RTSP commands, then we
+ // delay the actual handling of the command slightly, to make it less likely that the first
+ // subsequent RTSP command (e.g., "DESCRIBE") will end up in the client's reponse buffer before
+ // the socket (at the far end) gets reused for RTSP command handling.
+ setRTSPResponse(responseStr == NULL ? "200 OK" : responseStr);
+ delete[] responseStr;
+
+ ParamsForREGISTER* registerParams = new ParamsForREGISTER(cmd, this, url, urlSuffix, reuseConnection, deliverViaTCP, proxyURLSuffix);
+ envir().taskScheduler().scheduleDelayedTask(reuseConnection ? DELAY_USECS_AFTER_REGISTER_RESPONSE : 0,
+ (TaskFunc*)continueHandlingREGISTER, registerParams);
+ } else if (responseStr != NULL) {
+ setRTSPResponse(responseStr);
+ delete[] responseStr;
+ } else {
+ handleCmd_notSupported();
+ }
+}
+
+// A special version of "parseTransportHeader()", used just for parsing the "Transport:" header in an incoming "REGISTER" command:
+void parseTransportHeaderForREGISTER(char const* buf,
+ Boolean &reuseConnection,
+ Boolean& deliverViaTCP,
+ char*& proxyURLSuffix) {
+ // Initialize the result parameters to default values:
+ reuseConnection = False;
+ deliverViaTCP = False;
+ proxyURLSuffix = NULL;
+
+ // First, find "Transport:"
+ while (1) {
+ if (*buf == '\0') return; // not found
+ if (*buf == '\r' && *(buf+1) == '\n' && *(buf+2) == '\r') return; // end of the headers => not found
+ if (_strncasecmp(buf, "Transport:", 10) == 0) break;
+ ++buf;
+ }
+
+ // Then, run through each of the fields, looking for ones we handle:
+ char const* fields = buf + 10;
+ while (*fields == ' ') ++fields;
+ char* field = strDupSize(fields);
+ while (sscanf(fields, "%[^;\r\n]", field) == 1) {
+ if (strcmp(field, "reuse_connection") == 0) {
+ reuseConnection = True;
+ } else if (_strncasecmp(field, "preferred_delivery_protocol=udp", 31) == 0) {
+ deliverViaTCP = False;
+ } else if (_strncasecmp(field, "preferred_delivery_protocol=interleaved", 39) == 0) {
+ deliverViaTCP = True;
+ } else if (_strncasecmp(field, "proxy_url_suffix=", 17) == 0) {
+ delete[] proxyURLSuffix;
+ proxyURLSuffix = strDup(field+17);
+ }
+
+ fields += strlen(field);
+ while (*fields == ';' || *fields == ' ' || *fields == '\t') ++fields; // skip over separating ';' chars or whitespace
+ if (*fields == '\0' || *fields == '\r' || *fields == '\n') break;
+ }
+ delete[] field;
+}
+
+void RTSPServer::RTSPClientConnection::continueHandlingREGISTER(ParamsForREGISTER* params) {
+ params->fOurConnection->continueHandlingREGISTER1(params);
+}
+
+void RTSPServer::RTSPClientConnection::continueHandlingREGISTER1(ParamsForREGISTER* params) {
+ // Reuse our socket if requested:
+ int socketNumToBackEndServer = params->fReuseConnection ? fClientOutputSocket : -1;
+
+ RTSPServer* ourServer = &fOurRTSPServer; // copy the pointer now, in case we "delete this" below
+
+ if (socketNumToBackEndServer >= 0) {
+ // Because our socket will no longer be used by the server to handle incoming requests, we can now delete this
+ // "RTSPClientConnection" object. We do this now, in case the "implementCmd_REGISTER()" call below would also end up
+ // deleting this.
+ fClientInputSocket = fClientOutputSocket = -1; // so the socket doesn't get closed when we get deleted
+ delete this;
+ }
+
+ ourServer->implementCmd_REGISTER(params->fCmd,
+ params->fURL, params->fURLSuffix, socketNumToBackEndServer,
+ params->fDeliverViaTCP, params->fProxyURLSuffix);
+ delete params;
+}
+
+
+///////// RTSPServerWithREGISTERProxying implementation /////////
+
+RTSPServerWithREGISTERProxying* RTSPServerWithREGISTERProxying
+::createNew(UsageEnvironment& env, Port ourPort,
+ UserAuthenticationDatabase* authDatabase, UserAuthenticationDatabase* authDatabaseForREGISTER,
+ unsigned reclamationSeconds,
+ Boolean streamRTPOverTCP, int verbosityLevelForProxying,
+ char const* backEndUsername, char const* backEndPassword) {
+ int ourSocket = setUpOurSocket(env, ourPort);
+ if (ourSocket == -1) return NULL;
+
+ return new RTSPServerWithREGISTERProxying(env, ourSocket, ourPort,
+ authDatabase, authDatabaseForREGISTER,
+ reclamationSeconds,
+ streamRTPOverTCP, verbosityLevelForProxying,
+ backEndUsername, backEndPassword);
+}
+
+RTSPServerWithREGISTERProxying
+::RTSPServerWithREGISTERProxying(UsageEnvironment& env, int ourSocket, Port ourPort,
+ UserAuthenticationDatabase* authDatabase, UserAuthenticationDatabase* authDatabaseForREGISTER,
+ unsigned reclamationSeconds,
+ Boolean streamRTPOverTCP, int verbosityLevelForProxying,
+ char const* backEndUsername, char const* backEndPassword)
+ : RTSPServer(env, ourSocket, ourPort, authDatabase, reclamationSeconds),
+ fStreamRTPOverTCP(streamRTPOverTCP), fVerbosityLevelForProxying(verbosityLevelForProxying),
+ fRegisteredProxyCounter(0), fAllowedCommandNames(NULL), fAuthDBForREGISTER(authDatabaseForREGISTER),
+ fBackEndUsername(strDup(backEndUsername)), fBackEndPassword(strDup(backEndPassword)) {
+}
+
+RTSPServerWithREGISTERProxying::~RTSPServerWithREGISTERProxying() {
+ delete[] fAllowedCommandNames;
+ delete[] fBackEndUsername; delete[] fBackEndPassword;
+}
+
+char const* RTSPServerWithREGISTERProxying::allowedCommandNames() {
+ if (fAllowedCommandNames == NULL) {
+ char const* baseAllowedCommandNames = RTSPServer::allowedCommandNames();
+ char const* newAllowedCommandName = ", REGISTER, DEREGISTER";
+ fAllowedCommandNames = new char[strlen(baseAllowedCommandNames) + strlen(newAllowedCommandName) + 1/* for '\0' */];
+ sprintf(fAllowedCommandNames, "%s%s", baseAllowedCommandNames, newAllowedCommandName);
+ }
+ return fAllowedCommandNames;
+}
+
+Boolean RTSPServerWithREGISTERProxying
+::weImplementREGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* proxyURLSuffix, char*& responseStr) {
+ // First, check whether we have already proxied a stream as "proxyURLSuffix":
+ if (proxyURLSuffix != NULL) {
+ ServerMediaSession* sms = lookupServerMediaSession(proxyURLSuffix);
+ if ((strcmp(cmd, "REGISTER") == 0 && sms != NULL) ||
+ (strcmp(cmd, "DEREGISTER") == 0 && sms == NULL)) {
+ responseStr = strDup("451 Invalid parameter");
+ return False;
+ }
+ }
+
+ // Otherwise, we will implement it:
+ responseStr = NULL;
+ return True;
+}
+
+void RTSPServerWithREGISTERProxying
+::implementCmd_REGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* url, char const* /*urlSuffix*/, int socketToRemoteServer,
+ Boolean deliverViaTCP, char const* proxyURLSuffix) {
+ // Continue setting up proxying for the specified URL.
+ // By default:
+ // - We use "registeredProxyStream-N" as the (front-end) stream name (ignoring the back-end stream's 'urlSuffix'),
+ // unless "proxyURLSuffix" is non-NULL (in which case we use that)
+ // - There is no 'username' and 'password' for the back-end stream. (Thus, access-controlled back-end streams will fail.)
+ // - If "fStreamRTPOverTCP" is True, then we request delivery over TCP, regardless of the value of "deliverViaTCP".
+ // (Otherwise, if "fStreamRTPOverTCP" is False, we use the value of "deliverViaTCP" to decide this.)
+ // To change this default behavior, you will need to subclass "RTSPServerWithREGISTERProxying", and reimplement this function.
+
+ char const* proxyStreamName;
+ char proxyStreamNameBuf[100];
+ if (proxyURLSuffix == NULL) {
+ sprintf(proxyStreamNameBuf, "registeredProxyStream-%u", ++fRegisteredProxyCounter);
+ proxyStreamName = proxyStreamNameBuf;
+ } else {
+ proxyStreamName = proxyURLSuffix;
+ }
+
+ if (strcmp(cmd, "REGISTER") == 0) {
+ if (fStreamRTPOverTCP) deliverViaTCP = True;
+ portNumBits tunnelOverHTTPPortNum = deliverViaTCP ? (portNumBits)(~0) : 0;
+ // We don't support streaming from the back-end via RTSP/RTP/RTCP-over-HTTP; only via RTP/RTCP-over-TCP or RTP/RTCP-over-UDP
+
+ ServerMediaSession* sms
+ = ProxyServerMediaSession::createNew(envir(), this, url, proxyStreamName,
+ fBackEndUsername, fBackEndPassword,
+ tunnelOverHTTPPortNum, fVerbosityLevelForProxying, socketToRemoteServer);
+ addServerMediaSession(sms);
+
+ // (Regardless of the verbosity level) announce the fact that we're proxying this new stream, and the URL to use to access it:
+ char* proxyStreamURL = rtspURL(sms);
+ envir() << "Proxying the registered back-end stream \"" << url << "\".\n";
+ envir() << "\tPlay this stream using the URL: " << proxyStreamURL << "\n";
+ delete[] proxyStreamURL;
+ } else { // "DEREGISTER"
+ deleteServerMediaSession(lookupServerMediaSession(proxyStreamName));
+ }
+}
+
+UserAuthenticationDatabase* RTSPServerWithREGISTERProxying::getAuthenticationDatabaseForCommand(char const* cmdName) {
+ if (strcmp(cmdName, "REGISTER") == 0) return fAuthDBForREGISTER;
+
+ return RTSPServer::getAuthenticationDatabaseForCommand(cmdName);
+}
diff --git a/liveMedia/RTSPServerSupportingHTTPStreaming.cpp b/liveMedia/RTSPServerSupportingHTTPStreaming.cpp
new file mode 100644
index 0000000..691e7c8
--- /dev/null
+++ b/liveMedia/RTSPServerSupportingHTTPStreaming.cpp
@@ -0,0 +1,268 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A server that supports both RTSP, and HTTP streaming (using Apple's "HTTP Live Streaming" protocol)
+// Implementation
+
+#include "RTSPServer.hh"
+#include "RTSPServerSupportingHTTPStreaming.hh"
+#include "RTSPCommon.hh"
+#ifndef _WIN32_WCE
+#include <sys/stat.h>
+#endif
+#include <time.h>
+
+RTSPServerSupportingHTTPStreaming*
+RTSPServerSupportingHTTPStreaming::createNew(UsageEnvironment& env, Port rtspPort,
+ UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds) {
+ int ourSocket = setUpOurSocket(env, rtspPort);
+ if (ourSocket == -1) return NULL;
+
+ return new RTSPServerSupportingHTTPStreaming(env, ourSocket, rtspPort, authDatabase, reclamationTestSeconds);
+}
+
+RTSPServerSupportingHTTPStreaming
+::RTSPServerSupportingHTTPStreaming(UsageEnvironment& env, int ourSocket, Port rtspPort,
+ UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds)
+ : RTSPServer(env, ourSocket, rtspPort, authDatabase, reclamationTestSeconds) {
+}
+
+RTSPServerSupportingHTTPStreaming::~RTSPServerSupportingHTTPStreaming() {
+}
+
+GenericMediaServer::ClientConnection*
+RTSPServerSupportingHTTPStreaming::createNewClientConnection(int clientSocket, struct sockaddr_in clientAddr) {
+ return new RTSPClientConnectionSupportingHTTPStreaming(*this, clientSocket, clientAddr);
+}
+
+RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming
+::RTSPClientConnectionSupportingHTTPStreaming(RTSPServer& ourServer, int clientSocket, struct sockaddr_in clientAddr)
+ : RTSPClientConnection(ourServer, clientSocket, clientAddr),
+ fClientSessionId(0), fStreamSource(NULL), fPlaylistSource(NULL), fTCPSink(NULL) {
+}
+
+RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming::~RTSPClientConnectionSupportingHTTPStreaming() {
+ Medium::close(fPlaylistSource);
+ Medium::close(fStreamSource);
+ Medium::close(fTCPSink);
+}
+
+static char const* lastModifiedHeader(char const* fileName) {
+ static char buf[200];
+ buf[0] = '\0'; // by default, return an empty string
+
+#ifndef _WIN32_WCE
+ struct stat sb;
+ int statResult = stat(fileName, &sb);
+ if (statResult == 0) {
+ strftime(buf, sizeof buf, "Last-Modified: %a, %b %d %Y %H:%M:%S GMT\r\n", gmtime((const time_t*)&sb.st_mtime));
+ }
+#endif
+
+ return buf;
+}
+
+void RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming
+::handleHTTPCmd_StreamingGET(char const* urlSuffix, char const* /*fullRequestStr*/) {
+ // If "urlSuffix" ends with "?segment=<offset-in-seconds>,<duration-in-seconds>", then strip this off, and send the
+ // specified segment. Otherwise, construct and send a playlist that consists of segments from the specified file.
+ do {
+ char const* questionMarkPos = strrchr(urlSuffix, '?');
+ if (questionMarkPos == NULL) break;
+ unsigned offsetInSeconds, durationInSeconds;
+ if (sscanf(questionMarkPos, "?segment=%u,%u", &offsetInSeconds, &durationInSeconds) != 2) break;
+
+ char* streamName = strDup(urlSuffix);
+ streamName[questionMarkPos-urlSuffix] = '\0';
+
+ do {
+ ServerMediaSession* session = fOurServer.lookupServerMediaSession(streamName);
+ if (session == NULL) {
+ handleHTTPCmd_notFound();
+ break;
+ }
+
+ // We can't send multi-subsession streams over HTTP (because there's no defined way to multiplex more than one subsession).
+ // Therefore, use the first (and presumed only) substream:
+ ServerMediaSubsessionIterator iter(*session);
+ ServerMediaSubsession* subsession = iter.next();
+ if (subsession == NULL) {
+ // Treat an 'empty' ServerMediaSession the same as one that doesn't exist at all:
+ handleHTTPCmd_notFound();
+ break;
+ }
+
+ // Call "getStreamParameters()" to create the stream's source. (Because we're not actually streaming via RTP/RTCP, most
+ // of the parameters to the call are dummy.)
+ ++fClientSessionId;
+ Port clientRTPPort(0), clientRTCPPort(0), serverRTPPort(0), serverRTCPPort(0);
+ netAddressBits destinationAddress = 0;
+ u_int8_t destinationTTL = 0;
+ Boolean isMulticast = False;
+ void* streamToken;
+ subsession->getStreamParameters(fClientSessionId, 0, clientRTPPort,clientRTCPPort, -1,0,0, destinationAddress,destinationTTL, isMulticast, serverRTPPort,serverRTCPPort, streamToken);
+
+ // Seek the stream source to the desired place, with the desired duration, and (as a side effect) get the number of bytes:
+ double dOffsetInSeconds = (double)offsetInSeconds;
+ u_int64_t numBytes;
+ subsession->seekStream(fClientSessionId, streamToken, dOffsetInSeconds, (double)durationInSeconds, numBytes);
+ unsigned numTSBytesToStream = (unsigned)numBytes;
+
+ if (numTSBytesToStream == 0) {
+ // For some reason, we do not know the size of the requested range. We can't handle this request:
+ handleHTTPCmd_notSupported();
+ break;
+ }
+
+ // Construct our response:
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "HTTP/1.1 200 OK\r\n"
+ "%s"
+ "Server: LIVE555 Streaming Media v%s\r\n"
+ "%s"
+ "Content-Length: %d\r\n"
+ "Content-Type: text/plain; charset=ISO-8859-1\r\n"
+ "\r\n",
+ dateHeader(),
+ LIVEMEDIA_LIBRARY_VERSION_STRING,
+ lastModifiedHeader(streamName),
+ numTSBytesToStream);
+ // Send the response now, because we're about to add more data (from the source):
+ send(fClientOutputSocket, (char const*)fResponseBuffer, strlen((char*)fResponseBuffer), 0);
+ fResponseBuffer[0] = '\0'; // We've already sent the response. This tells the calling code not to send it again.
+
+ // Ask the media source to deliver - to the TCP sink - the desired data:
+ if (fStreamSource != NULL) { // sanity check
+ if (fTCPSink != NULL) fTCPSink->stopPlaying();
+ Medium::close(fStreamSource);
+ }
+ fStreamSource = subsession->getStreamSource(streamToken);
+ if (fStreamSource != NULL) {
+ if (fTCPSink == NULL) fTCPSink = TCPStreamSink::createNew(envir(), fClientOutputSocket);
+ fTCPSink->startPlaying(*fStreamSource, afterStreaming, this);
+ }
+ } while(0);
+
+ delete[] streamName;
+ return;
+ } while (0);
+
+ // "urlSuffix" does not end with "?segment=<offset-in-seconds>,<duration-in-seconds>".
+ // Construct and send a playlist that describes segments from the specified file.
+
+ // First, make sure that the named file exists, and is streamable:
+ ServerMediaSession* session = fOurServer.lookupServerMediaSession(urlSuffix);
+ if (session == NULL) {
+ handleHTTPCmd_notFound();
+ return;
+ }
+
+ // To be able to construct a playlist for the requested file, we need to know its duration:
+ float duration = session->duration();
+ if (duration <= 0.0) {
+ // We can't handle this request:
+ handleHTTPCmd_notSupported();
+ return;
+ }
+
+ // Now, construct the playlist. It will consist of a prefix, one or more media file specifications, and a suffix:
+ unsigned const maxIntLen = 10; // >= the maximum possible strlen() of an integer in the playlist
+ char const* const playlistPrefixFmt =
+ "#EXTM3U\r\n"
+ "#EXT-X-ALLOW-CACHE:YES\r\n"
+ "#EXT-X-MEDIA-SEQUENCE:0\r\n"
+ "#EXT-X-TARGETDURATION:%d\r\n";
+ unsigned const playlistPrefixFmt_maxLen = strlen(playlistPrefixFmt) + maxIntLen;
+
+ char const* const playlistMediaFileSpecFmt =
+ "#EXTINF:%d,\r\n"
+ "%s?segment=%d,%d\r\n";
+ unsigned const playlistMediaFileSpecFmt_maxLen = strlen(playlistMediaFileSpecFmt) + maxIntLen + strlen(urlSuffix) + 2*maxIntLen;
+
+ char const* const playlistSuffixFmt =
+ "#EXT-X-ENDLIST\r\n";
+ unsigned const playlistSuffixFmt_maxLen = strlen(playlistSuffixFmt);
+
+ // Figure out the 'target duration' that will produce a playlist that will fit in our response buffer. (But make it at least 10s.)
+ unsigned const playlistMaxSize = 10000;
+ unsigned const mediaFileSpecsMaxSize = playlistMaxSize - (playlistPrefixFmt_maxLen + playlistSuffixFmt_maxLen);
+ unsigned const maxNumMediaFileSpecs = mediaFileSpecsMaxSize/playlistMediaFileSpecFmt_maxLen;
+
+ unsigned targetDuration = (unsigned)(duration/maxNumMediaFileSpecs + 1);
+ if (targetDuration < 10) targetDuration = 10;
+
+ char* playlist = new char[playlistMaxSize];
+ char* s = playlist;
+ sprintf(s, playlistPrefixFmt, targetDuration);
+ s += strlen(s);
+
+ unsigned durSoFar = 0;
+ while (1) {
+ unsigned dur = targetDuration < duration ? targetDuration : (unsigned)duration;
+ duration -= dur;
+ sprintf(s, playlistMediaFileSpecFmt, dur, urlSuffix, durSoFar, dur);
+ s += strlen(s);
+ if (duration < 1.0) break;
+
+ durSoFar += dur;
+ }
+
+ sprintf(s, playlistSuffixFmt);
+ s += strlen(s);
+ unsigned playlistLen = s - playlist;
+
+ // Construct our response:
+ snprintf((char*)fResponseBuffer, sizeof fResponseBuffer,
+ "HTTP/1.1 200 OK\r\n"
+ "%s"
+ "Server: LIVE555 Streaming Media v%s\r\n"
+ "%s"
+ "Content-Length: %d\r\n"
+ "Content-Type: application/vnd.apple.mpegurl\r\n"
+ "\r\n",
+ dateHeader(),
+ LIVEMEDIA_LIBRARY_VERSION_STRING,
+ lastModifiedHeader(urlSuffix),
+ playlistLen);
+
+ // Send the response header now, because we're about to add more data (the playlist):
+ send(fClientOutputSocket, (char const*)fResponseBuffer, strlen((char*)fResponseBuffer), 0);
+ fResponseBuffer[0] = '\0'; // We've already sent the response. This tells the calling code not to send it again.
+
+ // Then, send the playlist. Because it's large, we don't do so using "send()", because that might not send it all at once.
+ // Instead, we stream the playlist over the TCP socket:
+ if (fPlaylistSource != NULL) { // sanity check
+ if (fTCPSink != NULL) fTCPSink->stopPlaying();
+ Medium::close(fPlaylistSource);
+ }
+ fPlaylistSource = ByteStreamMemoryBufferSource::createNew(envir(), (u_int8_t*)playlist, playlistLen);
+ if (fTCPSink == NULL) fTCPSink = TCPStreamSink::createNew(envir(), fClientOutputSocket);
+ fTCPSink->startPlaying(*fPlaylistSource, afterStreaming, this);
+}
+
+void RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming::afterStreaming(void* clientData) {
+ RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming* clientConnection
+ = (RTSPServerSupportingHTTPStreaming::RTSPClientConnectionSupportingHTTPStreaming*)clientData;
+ // Arrange to delete the 'client connection' object:
+ if (clientConnection->fRecursionCount > 0) {
+ // We're still in the midst of handling a request
+ clientConnection->fIsActive = False; // will cause the object to get deleted at the end of handling the request
+ } else {
+ // We're no longer handling a request; delete the object now:
+ delete clientConnection;
+ }
+}
diff --git a/liveMedia/RawVideoRTPSink.cpp b/liveMedia/RawVideoRTPSink.cpp
new file mode 100644
index 0000000..78f813a
--- /dev/null
+++ b/liveMedia/RawVideoRTPSink.cpp
@@ -0,0 +1,321 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for Raw video
+// Implementation
+
+#include "RawVideoRTPSink.hh"
+
+RawVideoRTPSink* RawVideoRTPSink
+::createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat,
+ unsigned height, unsigned width, unsigned depth,
+ char const* sampling, char const* colorimetry) {
+ return new RawVideoRTPSink(env, RTPgs,
+ rtpPayloadFormat,
+ height, width, depth,
+ sampling, colorimetry);
+}
+
+RawVideoRTPSink
+::RawVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat,
+ unsigned height, unsigned width, unsigned depth,
+ char const* sampling, char const* colorimetry)
+ : VideoRTPSink(env, RTPgs, rtpPayloadFormat, 90000, "RAW"),
+ fFmtpSDPLine(NULL), fSampling(NULL), fWidth(width), fHeight(height),
+ fDepth(depth), fColorimetry(NULL), fLineindex(0) {
+
+ // Then use this 'config' string to construct our "a=fmtp:" SDP line:
+ unsigned fmtpSDPLineMaxSize = 200;// 200 => more than enough space
+ fFmtpSDPLine = new char[fmtpSDPLineMaxSize];
+ sprintf(fFmtpSDPLine, "a=fmtp:%d sampling=%s;width=%u;height=%u;depth=%u;colorimetry=%s\r\n",
+ rtpPayloadType(), sampling, width, height, depth, colorimetry);
+
+ // Set parameters
+ fSampling = strDup(sampling);
+ fColorimetry = strDup(colorimetry);
+ setFrameParameters();
+}
+
+RawVideoRTPSink::~RawVideoRTPSink() {
+ delete[] fFmtpSDPLine;
+ delete[] fSampling;
+ delete[] fColorimetry;}
+
+char const* RawVideoRTPSink::auxSDPLine() {
+ return fFmtpSDPLine;
+}
+
+void RawVideoRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+
+ unsigned * lengths = NULL;
+ unsigned * offsets= NULL;
+ unsigned nbLines = getNbLineInPacket(fragmentationOffset, lengths, offsets);
+ unsigned specialHeaderSize = 2 + (6 * nbLines);
+ u_int8_t* specialHeader = new u_int8_t[specialHeaderSize];
+
+ // Extended Sequence Number (not used)
+ specialHeader[0] = 0;
+ specialHeader[1] = 0;
+
+ for (unsigned i = 0; i < nbLines; i++) {
+ // detection of new line incrementation
+ if ((offsets[i] == 0) && fragmentationOffset != 0) {
+ fLineindex = fLineindex + fFrameParameters.scanLineIterationStep;
+ }
+
+ // Set length
+ specialHeader[2 + (i * 6) + 0] = lengths[i] >> 8;
+ specialHeader[2 + (i * 6) + 1] = (u_int8_t)lengths[i];
+
+ // Field Identification (false for us)
+ bool fieldIdent = false;
+
+ // Set line index
+ specialHeader[2 + (i * 6) + 2] = ((fLineindex >> 8) & 0x7F) | (fieldIdent << 7);
+ specialHeader[2 + (i * 6) + 3] = (u_int8_t)fLineindex;
+
+ // Set Continuation bit
+ bool continuationBit = (i < nbLines - 1) ? true : false;
+
+ // Set offset
+ specialHeader[2 + (i * 6) + 4] = ((offsets[i] >> 8) & 0x7F) | (continuationBit << 7);
+ specialHeader[2 + (i * 6) + 5] = (u_int8_t)offsets[i];
+ }
+
+ setSpecialHeaderBytes(specialHeader, specialHeaderSize);
+
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the RTP 'M' ('marker') bit:
+ setMarkerBit();
+ // Reset line index
+ fLineindex = 0;
+ }
+
+ // Also set the RTP timestamp:
+ setTimestamp(framePresentationTime);
+
+ delete[] specialHeader;
+ delete[] lengths;
+ delete[] offsets;
+}
+
+Boolean RawVideoRTPSink::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // Only one frame per packet:
+ return False;
+}
+
+unsigned RawVideoRTPSink::specialHeaderSize() const {
+ unsigned * lengths = NULL;
+ unsigned * offsets= NULL;
+ unsigned nbLines = getNbLineInPacket(curFragmentationOffset(), lengths, offsets);
+ delete[] lengths;
+ delete[] offsets;
+ return 2 + (6 * nbLines);
+}
+
+unsigned RawVideoRTPSink::getNbLineInPacket(unsigned fragOffset, unsigned * &lengths, unsigned * &offsets) const
+{
+ unsigned rtpHeaderSize = 12;
+ unsigned specialHeaderSize = 2; // Extended Sequence Nb
+ unsigned packetMaxSize = ourMaxPacketSize();
+ unsigned nbLines = 0;
+ unsigned remainingSizeInPacket;
+
+ if (fragOffset >= fFrameParameters.frameSize) {
+ envir() << "RawVideoRTPSink::getNbLineInPacket(): bad fragOffset " << fragOffset << "\n";
+ return 0;
+ }
+ unsigned lengthArray[100] = {0};
+ unsigned offsetArray[100] = {0};
+ unsigned curDataTotalLength = 0;
+ unsigned lineOffset = (fragOffset % fFrameParameters.scanLineSize);
+
+ unsigned remainingLineSize = fFrameParameters.scanLineSize - (fragOffset % fFrameParameters.scanLineSize);
+ while(1) {
+ if (packetMaxSize - specialHeaderSize - rtpHeaderSize - 6 <= curDataTotalLength) {
+ break; // packet sanity check
+ }
+
+ // add one line
+ nbLines ++;
+ specialHeaderSize += 6;
+
+ remainingSizeInPacket = packetMaxSize - specialHeaderSize - rtpHeaderSize - curDataTotalLength;
+ remainingSizeInPacket -= remainingSizeInPacket % fFrameParameters.pGroupSize; // use only multiple of pgroup
+ lengthArray[nbLines-1] = remainingLineSize < remainingSizeInPacket ? remainingLineSize : remainingSizeInPacket;
+ offsetArray[nbLines-1] = lineOffset * fFrameParameters.scanLineIterationStep / fFrameParameters.pGroupSize;
+ if (remainingLineSize >= remainingSizeInPacket) {
+ break; //packet full
+ }
+
+ remainingLineSize = fFrameParameters.scanLineSize;
+ curDataTotalLength += lengthArray[nbLines-1];
+ lineOffset = 0;
+
+ if (fragOffset + curDataTotalLength >= fFrameParameters.frameSize) {
+ break; // end of the frame.
+ }
+ }
+
+ lengths = new unsigned[nbLines];
+ offsets = new unsigned[nbLines];
+ for (unsigned i = 0; i < nbLines; i++) {
+ lengths[i] = lengthArray[i];
+ offsets[i] = offsetArray[i];
+ }
+ return nbLines;
+}
+
+unsigned RawVideoRTPSink::computeOverflowForNewFrame(unsigned newFrameSize) const {
+ unsigned initialOverflow = MultiFramedRTPSink::computeOverflowForNewFrame(newFrameSize);
+
+ // Adjust (increase) this overflow to be a multiple of the pgroup value
+ unsigned numFrameBytesUsed = newFrameSize - initialOverflow;
+ initialOverflow += numFrameBytesUsed % fFrameParameters.pGroupSize;
+
+ return initialOverflow;
+}
+
+void RawVideoRTPSink::setFrameParameters() {
+ fFrameParameters.scanLineIterationStep = 1;
+ if ((strncmp("RGB", fSampling, strlen(fSampling)) == 0) || (strncmp("BGR", fSampling, strlen(fSampling)) == 0)) {
+ switch (fDepth) {
+ case 8:
+ fFrameParameters.pGroupSize = 3;
+ fFrameParameters.nbOfPixelInPGroup = 1;
+ break;
+ case 10:
+ fFrameParameters.pGroupSize = 15;
+ fFrameParameters.nbOfPixelInPGroup = 4;
+ break;
+ case 12:
+ fFrameParameters.pGroupSize = 9;
+ fFrameParameters.nbOfPixelInPGroup = 2;
+ break;
+ case 16:
+ fFrameParameters.pGroupSize = 6;
+ fFrameParameters.nbOfPixelInPGroup = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ else if ((strncmp("RGBA", fSampling, strlen(fSampling)) == 0) || (strncmp("BGRA", fSampling, strlen(fSampling)) == 0)) {
+ switch (fDepth) {
+ case 8:
+ fFrameParameters.pGroupSize = 4;
+ break;
+ case 10:
+ fFrameParameters.pGroupSize = 5;
+ break;
+ case 12:
+ fFrameParameters.pGroupSize = 6;
+ break;
+ case 16:
+ fFrameParameters.pGroupSize = 8;
+ break;
+ default:
+ break;
+ }
+ fFrameParameters.nbOfPixelInPGroup = 1;
+ } else if (strncmp("YCbCr-4:4:4", fSampling, strlen(fSampling)) == 0) {
+ switch (fDepth) {
+ case 8:
+ fFrameParameters.pGroupSize = 3;
+ fFrameParameters.nbOfPixelInPGroup = 1;
+ break;
+ case 10:
+ fFrameParameters.pGroupSize = 15;
+ fFrameParameters.nbOfPixelInPGroup = 4;
+ break;
+ case 12:
+ fFrameParameters.pGroupSize = 9;
+ fFrameParameters.nbOfPixelInPGroup = 2;
+ break;
+ case 16:
+ fFrameParameters.pGroupSize = 6;
+ fFrameParameters.nbOfPixelInPGroup = 1;
+ break;
+ default:
+ break;
+ }
+ } else if (strncmp("YCbCr-4:2:2", fSampling, strlen(fSampling)) == 0) {
+ switch (fDepth) {
+ case 8:
+ fFrameParameters.pGroupSize = 4;
+ break;
+ case 10:
+ fFrameParameters.pGroupSize = 5;
+ break;
+ case 12:
+ fFrameParameters.pGroupSize = 6;
+ break;
+ case 16:
+ fFrameParameters.pGroupSize = 8;
+ break;
+ default:
+ break;
+ }
+ fFrameParameters.nbOfPixelInPGroup = 2;
+ } else if (strncmp("YCbCr-4:1:1", fSampling, strlen(fSampling)) == 0) {
+ switch (fDepth) {
+ case 8:
+ fFrameParameters.pGroupSize = 6;
+ break;
+ case 10:
+ fFrameParameters.pGroupSize = 15;
+ break;
+ case 12:
+ fFrameParameters.pGroupSize = 9;
+ break;
+ case 16:
+ fFrameParameters.pGroupSize = 12;
+ break;
+ default:
+ break;
+ }
+ fFrameParameters.nbOfPixelInPGroup = 4;
+ } else if (strncmp("YCbCr-4:2:0", fSampling, strlen(fSampling)) == 0) {
+ switch (fDepth) {
+ case 8:
+ fFrameParameters.pGroupSize = 6;
+ break;
+ case 10:
+ fFrameParameters.pGroupSize = 15;
+ break;
+ case 12:
+ fFrameParameters.pGroupSize = 9;
+ break;
+ case 16:
+ fFrameParameters.pGroupSize = 12;
+ break;
+ default:
+ break;
+ }
+ fFrameParameters.nbOfPixelInPGroup = 4;
+ fFrameParameters.scanLineIterationStep = 2;
+ }
+ fFrameParameters.frameSize = fHeight * fWidth * fFrameParameters.pGroupSize / fFrameParameters.nbOfPixelInPGroup;
+ fFrameParameters.scanLineSize = fWidth * fFrameParameters.pGroupSize / fFrameParameters.nbOfPixelInPGroup * fFrameParameters.scanLineIterationStep;
+}
diff --git a/liveMedia/RawVideoRTPSource.cpp b/liveMedia/RawVideoRTPSource.cpp
new file mode 100644
index 0000000..52b40be
--- /dev/null
+++ b/liveMedia/RawVideoRTPSource.cpp
@@ -0,0 +1,182 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Raw Video RTP Sources (RFC 4175)
+// Implementation
+
+#include "RawVideoRTPSource.hh"
+
+////////// RawVideoBufferedPacket and RawVideoBufferedPacketFactory //////////
+
+class RawVideoBufferedPacket: public BufferedPacket {
+public:
+ RawVideoBufferedPacket(RawVideoRTPSource* ourSource);
+ virtual ~RawVideoBufferedPacket();
+
+private: // redefined virtual functions
+ virtual void getNextEnclosedFrameParameters(unsigned char*& framePtr,
+ unsigned dataSize,
+ unsigned& frameSize,
+ unsigned& frameDurationInMicroseconds);
+private:
+ RawVideoRTPSource* fOurSource;
+};
+
+class RawVideoBufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+
+////////// LineHeader //////////
+
+struct LineHeader {
+ u_int16_t length;
+ u_int16_t fieldIdAndLineNumber;
+ u_int16_t offsetWithinLine;
+};
+
+
+///////// RawVideoRTPSource implementation (RFC 4175) ////////
+
+RawVideoRTPSource*
+RawVideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new RawVideoRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency);
+}
+
+RawVideoRTPSource
+::RawVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency,
+ new RawVideoBufferedPacketFactory),
+ fNumLines(0), fNextLine(0), fLineHeaders(NULL) {
+}
+
+RawVideoRTPSource::~RawVideoRTPSource() {
+ delete[] fLineHeaders;
+}
+
+u_int16_t RawVideoRTPSource::currentLineNumber() const {
+ if (fNextLine == 0 || fLineHeaders == NULL) return 0; // we've called this function too soon!
+ return fLineHeaders[fNextLine-1].fieldIdAndLineNumber&0x7FFF;
+}
+
+u_int8_t RawVideoRTPSource::currentLineFieldId() const {
+ if (fNextLine == 0 || fLineHeaders == NULL) return 0; // we've called this function too soon!
+ return (fLineHeaders[fNextLine-1].fieldIdAndLineNumber&0x8000)>>15;
+}
+
+u_int16_t RawVideoRTPSource::currentOffsetWithinLine() const {
+ if (fNextLine == 0 || fLineHeaders == NULL) return 0; // we've called this function too soon!
+ return fLineHeaders[fNextLine-1].offsetWithinLine;
+}
+
+Boolean RawVideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ // The first 2 bytes of the header are the "Extended Sequence Number".
+ // In the current implementation, we ignore this.
+ if (packetSize < 2) return False;
+ headerStart += 2;
+ unsigned char* lineHeaderStart = headerStart;
+ packetSize -= 2;
+
+ // The rest of the header should consist of N*6 bytes (with N >= 1) for each line included.
+ // Count how many of these there are:
+ unsigned numLines = 0;
+ while (1) {
+ if (packetSize < 6) return False; // there's not enough room for another line header
+ ++numLines;
+ Boolean continuationBit = (headerStart[4]&0x80)>>7;
+ headerStart += 6;
+ packetSize -= 6;
+
+ // Check the "C" (continuation) bit of this header to see whether any more line headers follow:
+ if (continuationBit == 0) break; // no more line headers follow
+ }
+
+ // We now know how many lines are contained in this payload. Allocate and fill in "fLineHeaders":
+ fNumLines = numLines; // ASSERT: >= 1
+ fNextLine = 0;
+ delete[] fLineHeaders; fLineHeaders = new LineHeader[fNumLines];
+ unsigned totalLength = 0;
+ for (unsigned i = 0; i < fNumLines; ++i) {
+ fLineHeaders[i].length = (lineHeaderStart[0]<<8) + lineHeaderStart[1];
+ totalLength += fLineHeaders[i].length;
+ fLineHeaders[i].fieldIdAndLineNumber = (lineHeaderStart[2]<<8) + lineHeaderStart[3];
+ fLineHeaders[i].offsetWithinLine = ((lineHeaderStart[4]&0x7F)<<8) + lineHeaderStart[5];
+ lineHeaderStart += 6;
+ }
+
+ // Make sure that we have enough bytes for all of the line lengths promised:
+ if (totalLength > packetSize) {
+ fNumLines = 0;
+ delete[] fLineHeaders; fLineHeaders = NULL;
+ return False;
+ }
+
+ // Everything looks good:
+ fCurrentPacketBeginsFrame
+ = (fLineHeaders[0].fieldIdAndLineNumber&0x7FFF) == 0 && fLineHeaders[0].offsetWithinLine == 0;
+ fCurrentPacketCompletesFrame = packet->rtpMarkerBit();
+ resultSpecialHeaderSize = headerStart - packet->data();
+ return True;
+}
+
+char const* RawVideoRTPSource::MIMEtype() const {
+ return "video/RAW";
+}
+
+
+////////// RawVideoBufferedPacket and RawVideoBufferedPacketFactory implementation //////////
+
+RawVideoBufferedPacket
+::RawVideoBufferedPacket(RawVideoRTPSource* ourSource)
+ : fOurSource(ourSource) {
+}
+
+RawVideoBufferedPacket::~RawVideoBufferedPacket() {
+}
+
+void RawVideoBufferedPacket::getNextEnclosedFrameParameters(unsigned char*& /*framePtr*/,
+ unsigned dataSize,
+ unsigned& frameSize,
+ unsigned& frameDurationInMicroseconds) {
+ frameDurationInMicroseconds = 0; // because all lines within the same packet are from the same frame
+
+ if (fOurSource->fNextLine >= fOurSource->fNumLines) {
+ fOurSource->envir() << "RawVideoBufferedPacket::nextEnclosedFrameParameters("
+ << dataSize << "): data error ("
+ << fOurSource->fNextLine << " >= " << fOurSource->fNumLines << ")!\n";
+ frameSize = dataSize;
+ return;
+ }
+
+ frameSize = fOurSource->fLineHeaders[fOurSource->fNextLine++].length;
+}
+
+BufferedPacket* RawVideoBufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* ourSource) {
+ return new RawVideoBufferedPacket((RawVideoRTPSource*)ourSource);
+}
diff --git a/liveMedia/SIPClient.cpp b/liveMedia/SIPClient.cpp
new file mode 100644
index 0000000..cab2416
--- /dev/null
+++ b/liveMedia/SIPClient.cpp
@@ -0,0 +1,962 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic SIP client
+// Implementation
+
+#include "SIPClient.hh"
+#include "GroupsockHelper.hh"
+
+#if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4)
+#define _strncasecmp _strnicmp
+#else
+#define _strncasecmp strncasecmp
+#endif
+
+////////// SIPClient //////////
+
+SIPClient* SIPClient
+::createNew(UsageEnvironment& env,
+ unsigned char desiredAudioRTPPayloadFormat,
+ char const* mimeSubtype,
+ int verbosityLevel, char const* applicationName) {
+ return new SIPClient(env, desiredAudioRTPPayloadFormat, mimeSubtype,
+ verbosityLevel, applicationName);
+}
+
+void SIPClient::setUserAgentString(char const* userAgentName) {
+ if (userAgentName == NULL) return;
+
+ // Change the existing user agent header string:
+ char const* const formatStr = "User-Agent: %s\r\n";
+ unsigned const headerSize = strlen(formatStr) + strlen(userAgentName);
+ delete[] fUserAgentHeaderStr;
+ fUserAgentHeaderStr = new char[headerSize];
+ sprintf(fUserAgentHeaderStr, formatStr, userAgentName);
+ fUserAgentHeaderStrLen = strlen(fUserAgentHeaderStr);
+}
+
+SIPClient::SIPClient(UsageEnvironment& env,
+ unsigned char desiredAudioRTPPayloadFormat,
+ char const* mimeSubtype,
+ int verbosityLevel, char const* applicationName)
+ : Medium(env),
+ fT1(500000 /* 500 ms */),
+ fDesiredAudioRTPPayloadFormat(desiredAudioRTPPayloadFormat),
+ fVerbosityLevel(verbosityLevel), fCSeq(0),
+ fUserAgentHeaderStr(NULL), fUserAgentHeaderStrLen(0),
+ fURL(NULL), fURLSize(0),
+ fToTagStr(NULL), fToTagStrSize(0),
+ fUserName(NULL), fUserNameSize(0),
+ fInviteSDPDescription(NULL), fInviteSDPDescriptionReturned(NULL),
+ fInviteCmd(NULL), fInviteCmdSize(0) {
+ if (mimeSubtype == NULL) mimeSubtype = "";
+ fMIMESubtype = strDup(mimeSubtype);
+ fMIMESubtypeSize = strlen(fMIMESubtype);
+
+ if (applicationName == NULL) applicationName = "";
+ fApplicationName = strDup(applicationName);
+ fApplicationNameSize = strlen(fApplicationName);
+
+ struct in_addr ourAddress;
+ ourAddress.s_addr = ourIPAddress(env); // hack
+ fOurAddressStr = strDup(AddressString(ourAddress).val());
+ fOurAddressStrSize = strlen(fOurAddressStr);
+
+ fOurSocket = new Groupsock(env, ourAddress, 0, 255);
+ if (fOurSocket == NULL) {
+ env << "ERROR: Failed to create socket for addr "
+ << fOurAddressStr << ": "
+ << env.getResultMsg() << "\n";
+ }
+
+ // Now, find out our source port number. Hack: Do this by first trying to
+ // send a 0-length packet, so that the "getSourcePort()" call will work.
+ fOurSocket->output(envir(), (unsigned char*)"", 0);
+ Port srcPort(0);
+ getSourcePort(env, fOurSocket->socketNum(), srcPort);
+ if (srcPort.num() != 0) {
+ fOurPortNum = ntohs(srcPort.num());
+ } else {
+ // No luck. Try again using a default port number:
+ fOurPortNum = 5060;
+ delete fOurSocket;
+ fOurSocket = new Groupsock(env, ourAddress, fOurPortNum, 255);
+ if (fOurSocket == NULL) {
+ env << "ERROR: Failed to create socket for addr "
+ << fOurAddressStr << ", port "
+ << fOurPortNum << ": "
+ << env.getResultMsg() << "\n";
+ }
+ }
+
+ // Set the "User-Agent:" header to use in each request:
+ char const* const libName = "LIVE555 Streaming Media v";
+ char const* const libVersionStr = LIVEMEDIA_LIBRARY_VERSION_STRING;
+ char const* libPrefix; char const* libSuffix;
+ if (applicationName == NULL || applicationName[0] == '\0') {
+ applicationName = libPrefix = libSuffix = "";
+ } else {
+ libPrefix = " (";
+ libSuffix = ")";
+ }
+ unsigned userAgentNameSize
+ = fApplicationNameSize + strlen(libPrefix) + strlen(libName) + strlen(libVersionStr) + strlen(libSuffix) + 1;
+ char* userAgentName = new char[userAgentNameSize];
+ sprintf(userAgentName, "%s%s%s%s%s",
+ applicationName, libPrefix, libName, libVersionStr, libSuffix);
+ setUserAgentString(userAgentName);
+ delete[] userAgentName;
+
+ reset();
+}
+
+SIPClient::~SIPClient() {
+ reset();
+
+ delete[] fUserAgentHeaderStr;
+ delete fOurSocket;
+ delete[] (char*)fOurAddressStr;
+ delete[] (char*)fApplicationName;
+ delete[] (char*)fMIMESubtype;
+}
+
+void SIPClient::reset() {
+ fWorkingAuthenticator = NULL;
+ delete[] fInviteCmd; fInviteCmd = NULL; fInviteCmdSize = 0;
+ delete[] fInviteSDPDescription; fInviteSDPDescription = NULL;
+
+ delete[] (char*)fUserName; fUserName = strDup(fApplicationName);
+ fUserNameSize = strlen(fUserName);
+
+ fValidAuthenticator.reset();
+
+ delete[] (char*)fToTagStr; fToTagStr = NULL; fToTagStrSize = 0;
+ fServerPortNum = 0;
+ fServerAddress.s_addr = 0;
+ delete[] (char*)fURL; fURL = NULL; fURLSize = 0;
+}
+
+void SIPClient::setProxyServer(unsigned proxyServerAddress,
+ portNumBits proxyServerPortNum) {
+ fServerAddress.s_addr = proxyServerAddress;
+ fServerPortNum = proxyServerPortNum;
+ if (fOurSocket != NULL) {
+ fOurSocket->changeDestinationParameters(fServerAddress,
+ fServerPortNum, 255);
+ }
+}
+
+static char* getLine(char* startOfLine) {
+ // returns the start of the next line, or NULL if none
+ for (char* ptr = startOfLine; *ptr != '\0'; ++ptr) {
+ if (*ptr == '\r' || *ptr == '\n') {
+ // We found the end of the line
+ *ptr++ = '\0';
+ if (*ptr == '\n') ++ptr;
+ return ptr;
+ }
+ }
+
+ return NULL;
+}
+
+char* SIPClient::invite(char const* url, Authenticator* authenticator) {
+ // First, check whether "url" contains a username:password to be used:
+ char* username; char* password;
+ if (authenticator == NULL
+ && parseSIPURLUsernamePassword(url, username, password)) {
+ char* result = inviteWithPassword(url, username, password);
+ delete[] username; delete[] password; // they were dynamically allocated
+ return result;
+ }
+
+ if (!processURL(url)) return NULL;
+
+ delete[] (char*)fURL; fURL = strDup(url);
+ fURLSize = strlen(fURL);
+
+ fCallId = our_random32();
+ fFromTag = our_random32();
+
+ return invite1(authenticator);
+}
+
+char* SIPClient::invite1(Authenticator* authenticator) {
+ do {
+ // Send the INVITE command:
+
+ // First, construct an authenticator string:
+ fValidAuthenticator.reset();
+ fWorkingAuthenticator = authenticator;
+ char* authenticatorStr
+ = createAuthenticatorString(fWorkingAuthenticator, "INVITE", fURL);
+
+ // Then, construct the SDP description to be sent in the INVITE:
+ char* rtpmapLine;
+ unsigned rtpmapLineSize;
+ if (fMIMESubtypeSize > 0) {
+ char const* const rtpmapFmt =
+ "a=rtpmap:%u %s/8000\r\n";
+ unsigned rtpmapFmtSize = strlen(rtpmapFmt)
+ + 3 /* max char len */ + fMIMESubtypeSize;
+ rtpmapLine = new char[rtpmapFmtSize];
+ sprintf(rtpmapLine, rtpmapFmt,
+ fDesiredAudioRTPPayloadFormat, fMIMESubtype);
+ rtpmapLineSize = strlen(rtpmapLine);
+ } else {
+ // Static payload type => no "a=rtpmap:" line
+ rtpmapLine = strDup("");
+ rtpmapLineSize = 0;
+ }
+ char const* const inviteSDPFmt =
+ "v=0\r\n"
+ "o=- %u %u IN IP4 %s\r\n"
+ "s=%s session\r\n"
+ "c=IN IP4 %s\r\n"
+ "t=0 0\r\n"
+ "m=audio %u RTP/AVP %u\r\n"
+ "%s";
+ unsigned inviteSDPFmtSize = strlen(inviteSDPFmt)
+ + 20 /* max int len */ + 20 + fOurAddressStrSize
+ + fApplicationNameSize
+ + fOurAddressStrSize
+ + 5 /* max short len */ + 3 /* max char len */
+ + rtpmapLineSize;
+ delete[] fInviteSDPDescription;
+ fInviteSDPDescription = new char[inviteSDPFmtSize];
+ sprintf(fInviteSDPDescription, inviteSDPFmt,
+ fCallId, fCSeq, fOurAddressStr,
+ fApplicationName,
+ fOurAddressStr,
+ fClientStartPortNum, fDesiredAudioRTPPayloadFormat,
+ rtpmapLine);
+ unsigned inviteSDPSize = strlen(fInviteSDPDescription);
+ delete[] rtpmapLine;
+
+ char const* const cmdFmt =
+ "INVITE %s SIP/2.0\r\n"
+ "From: %s <sip:%s@%s>;tag=%u\r\n"
+ "Via: SIP/2.0/UDP %s:%u\r\n"
+ "Max-Forwards: 70\r\n"
+ "To: %s\r\n"
+ "Contact: sip:%s@%s:%u\r\n"
+ "Call-ID: %u@%s\r\n"
+ "CSeq: %d INVITE\r\n"
+ "Content-Type: application/sdp\r\n"
+ "%s" /* Proxy-Authorization: line (if any) */
+ "%s" /* User-Agent: line */
+ "Content-Length: %d\r\n\r\n"
+ "%s";
+ unsigned inviteCmdSize = strlen(cmdFmt)
+ + fURLSize
+ + 2*fUserNameSize + fOurAddressStrSize + 20 /* max int len */
+ + fOurAddressStrSize + 5 /* max port len */
+ + fURLSize
+ + fUserNameSize + fOurAddressStrSize + 5
+ + 20 + fOurAddressStrSize
+ + 20
+ + strlen(authenticatorStr)
+ + fUserAgentHeaderStrLen
+ + 20
+ + inviteSDPSize;
+ delete[] fInviteCmd; fInviteCmd = new char[inviteCmdSize];
+ sprintf(fInviteCmd, cmdFmt,
+ fURL,
+ fUserName, fUserName, fOurAddressStr, fFromTag,
+ fOurAddressStr, fOurPortNum,
+ fURL,
+ fUserName, fOurAddressStr, fOurPortNum,
+ fCallId, fOurAddressStr,
+ ++fCSeq,
+ authenticatorStr,
+ fUserAgentHeaderStr,
+ inviteSDPSize,
+ fInviteSDPDescription);
+ fInviteCmdSize = strlen(fInviteCmd);
+ delete[] authenticatorStr;
+
+ // Before sending the "INVITE", arrange to handle any response packets,
+ // and set up timers:
+ fInviteClientState = Calling;
+ fEventLoopStopFlag = 0;
+ TaskScheduler& sched = envir().taskScheduler(); // abbrev.
+ sched.turnOnBackgroundReadHandling(fOurSocket->socketNum(),
+ &inviteResponseHandler, this);
+ fTimerALen = 1*fT1; // initially
+ fTimerACount = 0; // initially
+ fTimerA = sched.scheduleDelayedTask(fTimerALen, timerAHandler, this);
+ fTimerB = sched.scheduleDelayedTask(64*fT1, timerBHandler, this);
+ fTimerD = NULL; // for now
+
+ if (!sendINVITE()) break;
+
+ // Enter the event loop, to handle response packets, and timeouts:
+ envir().taskScheduler().doEventLoop(&fEventLoopStopFlag);
+
+ // We're finished with this "INVITE".
+ // Turn off response handling and timers:
+ sched.turnOffBackgroundReadHandling(fOurSocket->socketNum());
+ sched.unscheduleDelayedTask(fTimerA);
+ sched.unscheduleDelayedTask(fTimerB);
+ sched.unscheduleDelayedTask(fTimerD);
+
+ // NOTE: We return the SDP description that we used in the "INVITE",
+ // not the one that we got from the server.
+ // ##### Later: match the codecs in the response (offer, answer) #####
+ if (fInviteSDPDescription != NULL) {
+ return strDup(fInviteSDPDescription);
+ }
+ } while (0);
+
+ return NULL;
+}
+
+void SIPClient::inviteResponseHandler(void* clientData, int /*mask*/) {
+ SIPClient* client = (SIPClient*)clientData;
+ unsigned responseCode = client->getResponseCode();
+ client->doInviteStateMachine(responseCode);
+}
+
+// Special 'response codes' that represent timers expiring:
+unsigned const timerAFires = 0xAAAAAAAA;
+unsigned const timerBFires = 0xBBBBBBBB;
+unsigned const timerDFires = 0xDDDDDDDD;
+
+void SIPClient::timerAHandler(void* clientData) {
+ SIPClient* client = (SIPClient*)clientData;
+ client->fTimerA = NULL;
+ if (client->fVerbosityLevel >= 1) {
+ client->envir() << "RETRANSMISSION " << ++client->fTimerACount
+ << ", after " << client->fTimerALen/1000000.0
+ << " additional seconds\n";
+ }
+ client->doInviteStateMachine(timerAFires);
+}
+
+void SIPClient::timerBHandler(void* clientData) {
+ SIPClient* client = (SIPClient*)clientData;
+ client->fTimerB = NULL;
+ if (client->fVerbosityLevel >= 1) {
+ client->envir() << "RETRANSMISSION TIMEOUT, after "
+ << 64*client->fT1/1000000.0 << " seconds\n";
+ fflush(stderr);
+ }
+ client->doInviteStateMachine(timerBFires);
+}
+
+void SIPClient::timerDHandler(void* clientData) {
+ SIPClient* client = (SIPClient*)clientData;
+ client->fTimerD = NULL;
+ if (client->fVerbosityLevel >= 1) {
+ client->envir() << "TIMER D EXPIRED\n";
+ }
+ client->doInviteStateMachine(timerDFires);
+}
+
+void SIPClient::doInviteStateMachine(unsigned responseCode) {
+ // Implement the state transition diagram (RFC 3261, Figure 5)
+ TaskScheduler& sched = envir().taskScheduler(); // abbrev.
+ switch (fInviteClientState) {
+ case Calling: {
+ if (responseCode == timerAFires) {
+ // Restart timer A (with double the timeout interval):
+ fTimerALen *= 2;
+ fTimerA
+ = sched.scheduleDelayedTask(fTimerALen, timerAHandler, this);
+
+ fInviteClientState = Calling;
+ if (!sendINVITE()) doInviteStateTerminated(0);
+ } else {
+ // Turn off timers A & B before moving to a new state:
+ sched.unscheduleDelayedTask(fTimerA);
+ sched.unscheduleDelayedTask(fTimerB);
+
+ if (responseCode == timerBFires) {
+ envir().setResultMsg("No response from server");
+ doInviteStateTerminated(0);
+ } else if (responseCode >= 100 && responseCode <= 199) {
+ fInviteClientState = Proceeding;
+ } else if (responseCode >= 200 && responseCode <= 299) {
+ doInviteStateTerminated(responseCode);
+ } else if (responseCode >= 400 && responseCode <= 499) {
+ doInviteStateTerminated(responseCode);
+ // this isn't what the spec says, but it seems right...
+ } else if (responseCode >= 300 && responseCode <= 699) {
+ fInviteClientState = Completed;
+ fTimerD
+ = sched.scheduleDelayedTask(32000000, timerDHandler, this);
+ if (!sendACK()) doInviteStateTerminated(0);
+ }
+ }
+ break;
+ }
+
+ case Proceeding: {
+ if (responseCode >= 100 && responseCode <= 199) {
+ fInviteClientState = Proceeding;
+ } else if (responseCode >= 200 && responseCode <= 299) {
+ doInviteStateTerminated(responseCode);
+ } else if (responseCode >= 400 && responseCode <= 499) {
+ doInviteStateTerminated(responseCode);
+ // this isn't what the spec says, but it seems right...
+ } else if (responseCode >= 300 && responseCode <= 699) {
+ fInviteClientState = Completed;
+ fTimerD = sched.scheduleDelayedTask(32000000, timerDHandler, this);
+ if (!sendACK()) doInviteStateTerminated(0);
+ }
+ break;
+ }
+
+ case Completed: {
+ if (responseCode == timerDFires) {
+ envir().setResultMsg("Transaction terminated");
+ doInviteStateTerminated(0);
+ } else if (responseCode >= 300 && responseCode <= 699) {
+ fInviteClientState = Completed;
+ if (!sendACK()) doInviteStateTerminated(0);
+ }
+ break;
+ }
+
+ case Terminated: {
+ doInviteStateTerminated(responseCode);
+ break;
+ }
+ }
+}
+
+void SIPClient::doInviteStateTerminated(unsigned responseCode) {
+ fInviteClientState = Terminated; // FWIW...
+ if (responseCode < 200 || responseCode > 299) {
+ // We failed, so return NULL;
+ delete[] fInviteSDPDescription; fInviteSDPDescription = NULL;
+ delete[] fInviteSDPDescriptionReturned; fInviteSDPDescriptionReturned = NULL;
+ }
+
+ // Unblock the event loop:
+ fEventLoopStopFlag = ~0;
+}
+
+Boolean SIPClient::sendINVITE() {
+ if (!sendRequest(fInviteCmd, fInviteCmdSize)) {
+ envir().setResultErrMsg("INVITE send() failed: ");
+ return False;
+ }
+ return True;
+}
+
+unsigned SIPClient::getResponseCode() {
+ unsigned responseCode = 0;
+ do {
+ // Get the response from the server:
+ unsigned const readBufSize = 10000;
+ char readBuffer[readBufSize+1]; char* readBuf = readBuffer;
+
+ char* firstLine = NULL;
+ char* nextLineStart = NULL;
+ unsigned bytesRead = getResponse(readBuf, readBufSize);
+ if (bytesRead == 0) break;
+ if (fVerbosityLevel >= 1) {
+ envir() << "Received INVITE response: " << readBuf << "\n";
+ }
+
+ // Inspect the first line to get the response code:
+ firstLine = readBuf;
+ nextLineStart = getLine(firstLine);
+ if (!parseResponseCode(firstLine, responseCode)) break;
+
+ if (responseCode != 200) {
+ if (responseCode >= 400 && responseCode <= 499
+ && fWorkingAuthenticator != NULL) {
+ // We have an authentication failure, so fill in
+ // "*fWorkingAuthenticator" using the contents of a following
+ // "Proxy-Authenticate:" or "WWW-Authenticate:" line. (Once we compute a 'response' for
+ // "fWorkingAuthenticator", it can be used in a subsequent request
+ // - that will hopefully succeed.)
+ char* lineStart;
+ while (1) {
+ lineStart = nextLineStart;
+ if (lineStart == NULL) break;
+
+ nextLineStart = getLine(lineStart);
+ if (lineStart[0] == '\0') break; // this is a blank line
+
+ char* realm = strDupSize(lineStart);
+ char* nonce = strDupSize(lineStart);
+ // ##### Check for the format of "Proxy-Authenticate:" lines from
+ // ##### known server types.
+ // ##### This is a crock! We should make the parsing more general
+ Boolean foundAuthenticateHeader = False;
+ if (
+ // Asterisk #####
+ sscanf(lineStart, "Proxy-Authenticate: Digest realm=\"%[^\"]\", nonce=\"%[^\"]\"",
+ realm, nonce) == 2 ||
+ sscanf(lineStart, "WWW-Authenticate: Digest realm=\"%[^\"]\", nonce=\"%[^\"]\"",
+ realm, nonce) == 2 ||
+ // Cisco ATA #####
+ sscanf(lineStart, "Proxy-Authenticate: Digest algorithm=MD5,domain=\"%*[^\"]\",nonce=\"%[^\"]\", realm=\"%[^\"]\"",
+ nonce, realm) == 2) {
+ fWorkingAuthenticator->setRealmAndNonce(realm, nonce);
+ foundAuthenticateHeader = True;
+ }
+ delete[] realm; delete[] nonce;
+ if (foundAuthenticateHeader) break;
+ }
+ }
+ envir().setResultMsg("cannot handle INVITE response: ", firstLine);
+ break;
+ }
+
+ // Skip every subsequent header line, until we see a blank line.
+ // While doing so, check for "To:" and "Content-Length:" lines.
+ // The remaining data is assumed to be the SDP descriptor that we want.
+ // We should really do some more checking on the headers here - e.g., to
+ // check for "Content-type: application/sdp", "CSeq", etc. #####
+ int contentLength = -1;
+ char* lineStart;
+ while (1) {
+ lineStart = nextLineStart;
+ if (lineStart == NULL) break;
+
+ nextLineStart = getLine(lineStart);
+ if (lineStart[0] == '\0') break; // this is a blank line
+
+ char* toTagStr = strDupSize(lineStart);
+ if (sscanf(lineStart, "To:%*[^;]; tag=%s", toTagStr) == 1) {
+ delete[] (char*)fToTagStr; fToTagStr = strDup(toTagStr);
+ fToTagStrSize = strlen(fToTagStr);
+ }
+ delete[] toTagStr;
+
+ if (sscanf(lineStart, "Content-Length: %d", &contentLength) == 1
+ || sscanf(lineStart, "Content-length: %d", &contentLength) == 1) {
+ if (contentLength < 0) {
+ envir().setResultMsg("Bad \"Content-Length:\" header: \"",
+ lineStart, "\"");
+ break;
+ }
+ }
+ }
+
+ // We're now at the end of the response header lines
+ if (lineStart == NULL) {
+ envir().setResultMsg("no content following header lines: ", readBuf);
+ break;
+ }
+
+ // Use the remaining data as the SDP descr, but first, check
+ // the "Content-Length:" header (if any) that we saw. We may need to
+ // read more data, or we may have extraneous data in the buffer.
+ char* bodyStart = nextLineStart;
+ if (bodyStart != NULL && contentLength >= 0) {
+ // We saw a "Content-Length:" header
+ unsigned numBodyBytes = &readBuf[bytesRead] - bodyStart;
+ if (contentLength > (int)numBodyBytes) {
+ // We need to read more data. First, make sure we have enough
+ // space for it:
+ unsigned numExtraBytesNeeded = contentLength - numBodyBytes;
+#ifdef USING_TCP
+ // THIS CODE WORKS ONLY FOR TCP: #####
+ unsigned remainingBufferSize
+ = readBufSize - (bytesRead + (readBuf - readBuffer));
+ if (numExtraBytesNeeded > remainingBufferSize) {
+ char tmpBuf[200];
+ sprintf(tmpBuf, "Read buffer size (%d) is too small for \"Content-Length:\" %d (need a buffer size of >= %d bytes\n",
+ readBufSize, contentLength,
+ readBufSize + numExtraBytesNeeded - remainingBufferSize);
+ envir().setResultMsg(tmpBuf);
+ break;
+ }
+
+ // Keep reading more data until we have enough:
+ if (fVerbosityLevel >= 1) {
+ envir() << "Need to read " << numExtraBytesNeeded
+ << " extra bytes\n";
+ }
+ while (numExtraBytesNeeded > 0) {
+ char* ptr = &readBuf[bytesRead];
+ unsigned bytesRead2;
+ struct sockaddr_in fromAddr;
+ Boolean readSuccess
+ = fOurSocket->handleRead((unsigned char*)ptr,
+ numExtraBytesNeeded,
+ bytesRead2, fromAddr);
+ if (!readSuccess) break;
+ ptr[bytesRead2] = '\0';
+ if (fVerbosityLevel >= 1) {
+ envir() << "Read " << bytesRead2
+ << " extra bytes: " << ptr << "\n";
+ }
+
+ bytesRead += bytesRead2;
+ numExtraBytesNeeded -= bytesRead2;
+ }
+#endif
+ if (numExtraBytesNeeded > 0) break; // one of the reads failed
+ }
+
+ bodyStart[contentLength] = '\0'; // trims any extra data
+ delete[] fInviteSDPDescriptionReturned; fInviteSDPDescriptionReturned = strDup(bodyStart);
+ }
+ } while (0);
+
+ return responseCode;
+}
+
+char* SIPClient::inviteWithPassword(char const* url, char const* username,
+ char const* password) {
+ delete[] (char*)fUserName; fUserName = strDup(username);
+ fUserNameSize = strlen(fUserName);
+
+ Authenticator authenticator(username, password);
+ char* inviteResult = invite(url, &authenticator);
+ if (inviteResult != NULL) {
+ // We are already authorized
+ return inviteResult;
+ }
+
+ // The "realm" and "nonce" fields should have been filled in:
+ if (authenticator.realm() == NULL || authenticator.nonce() == NULL) {
+ // We haven't been given enough information to try again, so fail:
+ return NULL;
+ }
+
+ // Try again (but with the same CallId):
+ inviteResult = invite1(&authenticator);
+ if (inviteResult != NULL) {
+ // The authenticator worked, so use it in future requests:
+ fValidAuthenticator = authenticator;
+ }
+
+ return inviteResult;
+}
+
+Boolean SIPClient::sendACK() {
+ char* cmd = NULL;
+ do {
+ char const* const cmdFmt =
+ "ACK %s SIP/2.0\r\n"
+ "From: %s <sip:%s@%s>;tag=%u\r\n"
+ "Via: SIP/2.0/UDP %s:%u\r\n"
+ "Max-Forwards: 70\r\n"
+ "To: %s;tag=%s\r\n"
+ "Call-ID: %u@%s\r\n"
+ "CSeq: %d ACK\r\n"
+ "Content-Length: 0\r\n\r\n";
+ unsigned cmdSize = strlen(cmdFmt)
+ + fURLSize
+ + 2*fUserNameSize + fOurAddressStrSize + 20 /* max int len */
+ + fOurAddressStrSize + 5 /* max port len */
+ + fURLSize + fToTagStrSize
+ + 20 + fOurAddressStrSize
+ + 20;
+ cmd = new char[cmdSize];
+ sprintf(cmd, cmdFmt,
+ fURL,
+ fUserName, fUserName, fOurAddressStr, fFromTag,
+ fOurAddressStr, fOurPortNum,
+ fURL, fToTagStr,
+ fCallId, fOurAddressStr,
+ fCSeq /* note: it's the same as before; not incremented */);
+
+ if (!sendRequest(cmd, strlen(cmd))) {
+ envir().setResultErrMsg("ACK send() failed: ");
+ break;
+ }
+
+ delete[] cmd;
+ return True;
+ } while (0);
+
+ delete[] cmd;
+ return False;
+}
+
+Boolean SIPClient::sendBYE() {
+ // NOTE: This should really be retransmitted, for reliability #####
+ char* cmd = NULL;
+ do {
+ char const* const cmdFmt =
+ "BYE %s SIP/2.0\r\n"
+ "From: %s <sip:%s@%s>;tag=%u\r\n"
+ "Via: SIP/2.0/UDP %s:%u\r\n"
+ "Max-Forwards: 70\r\n"
+ "To: %s;tag=%s\r\n"
+ "Call-ID: %u@%s\r\n"
+ "CSeq: %d BYE\r\n"
+ "Content-Length: 0\r\n\r\n";
+ unsigned cmdSize = strlen(cmdFmt)
+ + fURLSize
+ + 2*fUserNameSize + fOurAddressStrSize + 20 /* max int len */
+ + fOurAddressStrSize + 5 /* max port len */
+ + fURLSize + fToTagStrSize
+ + 20 + fOurAddressStrSize
+ + 20;
+ cmd = new char[cmdSize];
+ sprintf(cmd, cmdFmt,
+ fURL,
+ fUserName, fUserName, fOurAddressStr, fFromTag,
+ fOurAddressStr, fOurPortNum,
+ fURL, fToTagStr,
+ fCallId, fOurAddressStr,
+ ++fCSeq);
+
+ if (!sendRequest(cmd, strlen(cmd))) {
+ envir().setResultErrMsg("BYE send() failed: ");
+ break;
+ }
+
+ delete[] cmd;
+ return True;
+ } while (0);
+
+ delete[] cmd;
+ return False;
+}
+
+Boolean SIPClient::processURL(char const* url) {
+ do {
+ // If we don't already have a server address/port, then
+ // get these by parsing the URL:
+ if (fServerAddress.s_addr == 0) {
+ NetAddress destAddress;
+ if (!parseSIPURL(envir(), url, destAddress, fServerPortNum)) break;
+ fServerAddress.s_addr = *(unsigned*)(destAddress.data());
+
+ if (fOurSocket != NULL) {
+ fOurSocket->changeDestinationParameters(fServerAddress,
+ fServerPortNum, 255);
+ }
+ }
+
+ return True;
+ } while (0);
+
+ return False;
+}
+
+Boolean SIPClient::parseSIPURL(UsageEnvironment& env, char const* url,
+ NetAddress& address,
+ portNumBits& portNum) {
+ do {
+ // Parse the URL as "sip:<username>@<address>:<port>/<etc>"
+ // (with ":<port>" and "/<etc>" optional)
+ // Also, skip over any "<username>[:<password>]@" preceding <address>
+ char const* prefix = "sip:";
+ unsigned const prefixLength = 4;
+ if (_strncasecmp(url, prefix, prefixLength) != 0) {
+ env.setResultMsg("URL is not of the form \"", prefix, "\"");
+ break;
+ }
+
+ unsigned const parseBufferSize = 100;
+ char parseBuffer[parseBufferSize];
+ unsigned addressStartIndex = prefixLength;
+ while (url[addressStartIndex] != '\0'
+ && url[addressStartIndex++] != '@') {}
+ char const* from = &url[addressStartIndex];
+
+ // Skip over any "<username>[:<password>]@"
+ char const* from1 = from;
+ while (*from1 != '\0' && *from1 != '/') {
+ if (*from1 == '@') {
+ from = ++from1;
+ break;
+ }
+ ++from1;
+ }
+
+ char* to = &parseBuffer[0];
+ unsigned i;
+ for (i = 0; i < parseBufferSize; ++i) {
+ if (*from == '\0' || *from == ':' || *from == '/') {
+ // We've completed parsing the address
+ *to = '\0';
+ break;
+ }
+ *to++ = *from++;
+ }
+ if (i == parseBufferSize) {
+ env.setResultMsg("URL is too long");
+ break;
+ }
+
+ NetAddressList addresses(parseBuffer);
+ if (addresses.numAddresses() == 0) {
+ env.setResultMsg("Failed to find network address for \"",
+ parseBuffer, "\"");
+ break;
+ }
+ address = *(addresses.firstAddress());
+
+ portNum = 5060; // default value
+ char nextChar = *from;
+ if (nextChar == ':') {
+ int portNumInt;
+ if (sscanf(++from, "%d", &portNumInt) != 1) {
+ env.setResultMsg("No port number follows ':'");
+ break;
+ }
+ if (portNumInt < 1 || portNumInt > 65535) {
+ env.setResultMsg("Bad port number");
+ break;
+ }
+ portNum = (portNumBits)portNumInt;
+ }
+
+ return True;
+ } while (0);
+
+ return False;
+}
+
+Boolean SIPClient::parseSIPURLUsernamePassword(char const* url,
+ char*& username,
+ char*& password) {
+ username = password = NULL; // by default
+ do {
+ // Parse the URL as "sip:<username>[:<password>]@<whatever>"
+ char const* prefix = "sip:";
+ unsigned const prefixLength = 4;
+ if (_strncasecmp(url, prefix, prefixLength) != 0) break;
+
+ // Look for the ':' and '@':
+ unsigned usernameIndex = prefixLength;
+ unsigned colonIndex = 0, atIndex = 0;
+ for (unsigned i = usernameIndex; url[i] != '\0' && url[i] != '/'; ++i) {
+ if (url[i] == ':' && colonIndex == 0) {
+ colonIndex = i;
+ } else if (url[i] == '@') {
+ atIndex = i;
+ break; // we're done
+ }
+ }
+ if (atIndex == 0) break; // no '@' found
+
+ char* urlCopy = strDup(url);
+ urlCopy[atIndex] = '\0';
+ if (colonIndex > 0) {
+ urlCopy[colonIndex] = '\0';
+ password = strDup(&urlCopy[colonIndex+1]);
+ } else {
+ password = strDup("");
+ }
+ username = strDup(&urlCopy[usernameIndex]);
+ delete[] urlCopy;
+
+ return True;
+ } while (0);
+
+ return False;
+}
+
+char*
+SIPClient::createAuthenticatorString(Authenticator const* authenticator,
+ char const* cmd, char const* url) {
+ if (authenticator != NULL && authenticator->realm() != NULL
+ && authenticator->nonce() != NULL && authenticator->username() != NULL
+ && authenticator->password() != NULL) {
+ // We've been provided a filled-in authenticator, so use it:
+ char const* const authFmt
+ = "Authorization: Digest username=\"%s\", realm=\"%s\", nonce=\"%s\", response=\"%s\", uri=\"%s\"\r\n";
+ char const* response = authenticator->computeDigestResponse(cmd, url);
+ unsigned authBufSize = strlen(authFmt)
+ + strlen(authenticator->username()) + strlen(authenticator->realm())
+ + strlen(authenticator->nonce()) + strlen(url) + strlen(response);
+ char* authenticatorStr = new char[authBufSize];
+ sprintf(authenticatorStr, authFmt,
+ authenticator->username(), authenticator->realm(),
+ authenticator->nonce(), response, url);
+ authenticator->reclaimDigestResponse(response);
+
+ return authenticatorStr;
+ }
+
+ return strDup("");
+}
+
+Boolean SIPClient::sendRequest(char const* requestString,
+ unsigned requestLength) {
+ if (fVerbosityLevel >= 1) {
+ envir() << "Sending request: " << requestString << "\n";
+ }
+ // NOTE: We should really check that "requestLength" is not #####
+ // too large for UDP (see RFC 3261, section 18.1.1) #####
+ return fOurSocket->output(envir(), (unsigned char*)requestString, requestLength);
+}
+
+unsigned SIPClient::getResponse(char*& responseBuffer,
+ unsigned responseBufferSize) {
+ if (responseBufferSize == 0) return 0; // just in case...
+ responseBuffer[0] = '\0'; // ditto
+
+ // Keep reading data from the socket until we see "\r\n\r\n" (except
+ // at the start), or until we fill up our buffer.
+ // Don't read any more than this.
+ char* p = responseBuffer;
+ Boolean haveSeenNonCRLF = False;
+ int bytesRead = 0;
+ while (bytesRead < (int)responseBufferSize) {
+ unsigned bytesReadNow;
+ struct sockaddr_in fromAddr;
+ unsigned char* toPosn = (unsigned char*)(responseBuffer+bytesRead);
+ Boolean readSuccess
+ = fOurSocket->handleRead(toPosn, responseBufferSize-bytesRead,
+ bytesReadNow, fromAddr);
+ if (!readSuccess || bytesReadNow == 0) {
+ envir().setResultMsg("SIP response was truncated");
+ break;
+ }
+ bytesRead += bytesReadNow;
+
+ // Check whether we have "\r\n\r\n":
+ char* lastToCheck = responseBuffer+bytesRead-4;
+ if (lastToCheck < responseBuffer) continue;
+ for (; p <= lastToCheck; ++p) {
+ if (haveSeenNonCRLF) {
+ if (*p == '\r' && *(p+1) == '\n' &&
+ *(p+2) == '\r' && *(p+3) == '\n') {
+ responseBuffer[bytesRead] = '\0';
+
+ // Before returning, trim any \r or \n from the start:
+ while (*responseBuffer == '\r' || *responseBuffer == '\n') {
+ ++responseBuffer;
+ --bytesRead;
+ }
+ return bytesRead;
+ }
+ } else {
+ if (*p != '\r' && *p != '\n') {
+ haveSeenNonCRLF = True;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+Boolean SIPClient::parseResponseCode(char const* line,
+ unsigned& responseCode) {
+ if (sscanf(line, "%*s%u", &responseCode) != 1) {
+ envir().setResultMsg("no response code in line: \"", line, "\"");
+ return False;
+ }
+
+ return True;
+}
diff --git a/liveMedia/SRTPCryptographicContext.cpp b/liveMedia/SRTPCryptographicContext.cpp
new file mode 100644
index 0000000..2fb8235
--- /dev/null
+++ b/liveMedia/SRTPCryptographicContext.cpp
@@ -0,0 +1,464 @@
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// This code may not be copied or used in any form without permission from Live Networks, Inc.
+//
+// The SRTP 'Cryptographic Context', used in all of our uses of SRTP.
+// Implementation
+
+#include "SRTPCryptographicContext.hh"
+#ifndef NO_OPENSSL
+#include "HMAC_SHA1.hh"
+#include <openssl/aes.h>
+#endif
+
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+
+SRTPCryptographicContext
+::SRTPCryptographicContext(MIKEYState const& mikeyState)
+#ifndef NO_OPENSSL
+ : fMIKEYState(mikeyState),
+ fHaveReceivedSRTPPackets(False), fSRTCPIndex(0) {
+ // Begin by doing a key derivation, to generate the keying data that we need:
+ performKeyDerivation();
+#else
+ {
+#endif
+}
+
+SRTPCryptographicContext::~SRTPCryptographicContext() {
+}
+
+Boolean SRTPCryptographicContext
+::processIncomingSRTPPacket(u_int8_t* buffer, unsigned inPacketSize,
+ unsigned& outPacketSize) {
+#ifndef NO_OPENSSL
+ do {
+ if (inPacketSize < 12) { // For SRTP, 12 is the minimum packet size (if unauthenticated)
+#ifdef DEBUG
+ fprintf(stderr, "SRTPCryptographicContext::processIncomingSRTPPacket(): Error: Packet size %d is too short for SRTP!\n", inPacketSize);
+#endif
+ break;
+ }
+
+ unsigned const numBytesPastEncryption
+ = SRTP_MKI_LENGTH + (weAuthenticate() ? SRTP_AUTH_TAG_LENGTH : 0);
+ if (inPacketSize <= numBytesPastEncryption) {
+#ifdef DEBUG
+ fprintf(stderr, "SRTPCryptographicContext::processIncomingSRTPPacket(): Error: Packet size %d is too short for encrpytion and/or authentication!\n", inPacketSize);
+#endif
+ break;
+ }
+
+ // Begin by figuring out this packet's 'index' (ROC and RTP sequence number),
+ // and the ROC and RTP sequence number that will be used subsequently, provided that
+ // this packet authenticates OK:
+ u_int16_t const rtpSeqNum = (buffer[2]<<8)|buffer[3];
+ u_int32_t nextROC, thisPacketsROC;
+ u_int16_t nextHighRTPSeqNum;
+
+ if (!fHaveReceivedSRTPPackets) {
+ // First time:
+ nextROC = thisPacketsROC = fROC = 0;
+ nextHighRTPSeqNum = rtpSeqNum;
+ } else {
+ // Check whether the sequence number has rolled over, or is out-of-order:
+ u_int16_t const SEQ_NUM_THRESHOLD = 0x1000;
+ if (rtpSeqNum >= fPreviousHighRTPSeqNum) {
+ // normal case, or out-of-order packet that crosses a rollover:
+ if (rtpSeqNum - fPreviousHighRTPSeqNum < SEQ_NUM_THRESHOLD) {
+ // normal case:
+ nextROC = thisPacketsROC = fROC;
+ nextHighRTPSeqNum = rtpSeqNum;
+ } else {
+ // out-of-order packet that crosses rollover:
+ nextROC = fROC;
+ thisPacketsROC = fROC-1;
+ nextHighRTPSeqNum = fPreviousHighRTPSeqNum;
+ }
+ } else {
+ // rollover, or out-of-order packet that crosses a rollover:
+ if (fPreviousHighRTPSeqNum - rtpSeqNum > SEQ_NUM_THRESHOLD) {
+ // rollover:
+ nextROC = thisPacketsROC = fROC+1;
+ nextHighRTPSeqNum = rtpSeqNum;
+ } else {
+ // out-of-order packet (that doesn't cross a rollover):
+ nextROC = thisPacketsROC = fROC;
+ nextHighRTPSeqNum = fPreviousHighRTPSeqNum;
+ }
+ }
+ }
+
+ if (weAuthenticate()) {
+ // Authenticate the packet.
+ unsigned const numBytesToAuthenticate
+ = inPacketSize - (SRTP_MKI_LENGTH + SRTP_AUTH_TAG_LENGTH); // ASSERT: > 0
+ u_int8_t const* authenticationTag = &buffer[inPacketSize - SRTP_AUTH_TAG_LENGTH];
+
+ if (!verifySRTPAuthenticationTag(buffer, numBytesToAuthenticate, thisPacketsROC, authenticationTag)) {
+#ifdef DEBUG
+ fprintf(stderr, "SRTPCryptographicContext::processIncomingSRTPPacket(): Failed to authenticate incoming SRTP packet!\n");
+#endif
+ break;
+ }
+ }
+
+ // Now that we've verified the packet, set the 'index values' for next time:
+ fROC = nextROC;
+ fPreviousHighRTPSeqNum = nextHighRTPSeqNum;
+ fHaveReceivedSRTPPackets = True;
+
+ if (weEncryptSRTP()) {
+ // Decrypt the SRTP packet. It has the index "thisPacketsROC" with "rtpSeqNum":
+ u_int64_t index = (thisPacketsROC<<16)|rtpSeqNum;
+
+ // Figure out the RTP header size. This will tell us which bytes to decrypt:
+ unsigned rtpHeaderSize = 12; // at least the basic 12-byte header
+ rtpHeaderSize += (buffer[0]&0x0F)*4; // # CSRC identifiers
+ if ((buffer[0]&0x10) != 0) {
+ // There's a RTP extension header. Add its size:
+ if (inPacketSize < rtpHeaderSize + 4) {
+#ifdef DEBUG
+ fprintf(stderr, "SRTPCryptographicContext::processIncomingSRTPPacket(): Error: Packet size %d is shorter than the minimum specified RTP header size %d!\n", inPacketSize, rtpHeaderSize + 4);
+#endif
+ break;
+ }
+ u_int16_t hdrExtLength = (buffer[rtpHeaderSize+2]<<8)|buffer[rtpHeaderSize+3];
+ rtpHeaderSize += 4 + hdrExtLength*4;
+ }
+
+ unsigned const offsetToEncryptedBytes = rtpHeaderSize;
+ unsigned numEncryptedBytes = inPacketSize - numBytesPastEncryption; // ASSERT: > 0
+ if (offsetToEncryptedBytes > numEncryptedBytes) {
+#ifdef DEBUG
+ fprintf(stderr, "SRTPCryptographicContext::processIncomingSRTPPacket(): Error: RTP header size %d (expected <= %d) is too large!\n", rtpHeaderSize, numEncryptedBytes);
+#endif
+ break;
+ }
+ numEncryptedBytes -= offsetToEncryptedBytes;
+
+ u_int32_t const SSRC = (buffer[8]<<24)|(buffer[9]<<16)|(buffer[10]<<8)|buffer[11];
+ decryptSRTPPacket(index, SSRC, &buffer[offsetToEncryptedBytes], numEncryptedBytes);
+ outPacketSize = inPacketSize - numBytesPastEncryption; // trim to what we use
+ }
+
+ return True;
+ } while (0);
+#endif
+
+ // An error occurred in the handling of the packet:
+ return False;
+}
+
+Boolean SRTPCryptographicContext
+::processIncomingSRTCPPacket(u_int8_t* buffer, unsigned inPacketSize,
+ unsigned& outPacketSize) {
+#ifndef NO_OPENSSL
+ do {
+ if (inPacketSize < 12) {
+ // For SRTCP, 8 is the minumum RTCP packet size, but there's also a mandatory
+ // 4-byte "E+SRTCP index" word.
+#ifdef DEBUG
+ fprintf(stderr, "SRTPCryptographicContext::processIncomingSRTCPPacket(): Error: Packet size %d is too short for SRTCP!\n", inPacketSize);
+#endif
+ break;
+ }
+
+ unsigned const numBytesPastEncryption
+ = 4/*E+SRTCP index*/ + SRTP_MKI_LENGTH + (weAuthenticate() ? SRTP_AUTH_TAG_LENGTH : 0);
+ if (inPacketSize <= numBytesPastEncryption) {
+#ifdef DEBUG
+ fprintf(stderr, "SRTPCryptographicContext::processIncomingSRTCPPacket(): Error: Packet size %d is too short for encrpytion and/or authentication!\n", inPacketSize);
+#endif
+ break;
+ }
+
+ if (weAuthenticate()) {
+ // Authenticate the packet.
+ unsigned const numBytesToAuthenticate
+ = inPacketSize - (SRTP_MKI_LENGTH + SRTP_AUTH_TAG_LENGTH); // ASSERT: > 0
+ u_int8_t const* authenticationTag = &buffer[inPacketSize - SRTP_AUTH_TAG_LENGTH];
+
+ if (!verifySRTCPAuthenticationTag(buffer, numBytesToAuthenticate, authenticationTag)) {
+#ifdef DEBUG
+ fprintf(stderr, "SRTPCryptographicContext::processIncomingSRTCPPacket(): Failed to authenticate incoming SRTCP packet!\n");
+#endif
+ break;
+ }
+ }
+
+ if (weEncryptSRTCP()) {
+ // Decrypt the SRTCP packet:
+ unsigned numEncryptedBytes = inPacketSize - numBytesPastEncryption; // ASSERT: > 0
+ u_int8_t const* p = &buffer[numEncryptedBytes]; // E + SRTCP index
+ u_int32_t E_plus_SRTCPIndex = (p[0]<<24)|(p[1]<<16)|(p[2]<<8)|p[3];
+ if ((E_plus_SRTCPIndex&0x80000000) != 0) {
+ // The packet is encrypted
+ unsigned const offsetToEncryptedBytes = 8;
+ if (offsetToEncryptedBytes > numEncryptedBytes) {
+#ifdef DEBUG
+ fprintf(stderr, "SRTPCryptographicContext::processIncomingSRTCPPacket(): Error: RTCP packet size %d is too small!\n", numEncryptedBytes);
+#endif
+ break;
+ }
+ numEncryptedBytes -= offsetToEncryptedBytes;
+
+ u_int32_t index = E_plus_SRTCPIndex&0x7FFFFFFF;
+ u_int32_t const SSRC = (buffer[4]<<24)|(buffer[5]<<16)|(buffer[6]<<8)|buffer[7];
+ decryptSRTCPPacket(index, SSRC, &buffer[offsetToEncryptedBytes], numEncryptedBytes);
+ }
+ outPacketSize = inPacketSize - numBytesPastEncryption; // trim to what we use
+ }
+
+ return True;
+ } while (0);
+#endif
+
+ // An error occurred in the handling of the packet:
+ return False;
+}
+
+Boolean SRTPCryptographicContext
+::processOutgoingSRTCPPacket(u_int8_t* buffer, unsigned inPacketSize,
+ unsigned& outPacketSize) {
+#ifndef NO_OPENSSL
+ do {
+ // Encrypt the appropriate part of the packet.
+ u_int8_t eFlag = 0x00;
+ if (weEncryptSRTCP()) {
+ unsigned const unencryptedHeaderSize = 8;
+ if (inPacketSize < unencryptedHeaderSize) { // packet is too small
+ // Hack: Let small, non RTCP packets through w/o encryption; they may be used to
+ // punch through NATs
+ outPacketSize = inPacketSize;
+ return True;
+ }
+ unsigned const encryptedDataSize = inPacketSize - unencryptedHeaderSize;
+
+ u_int8_t* const dataToEncrypt = &buffer[unencryptedHeaderSize];
+ u_int32_t const ssrc = (buffer[4]<<24)|(buffer[5]<<16)|(buffer[6]<<8)|buffer[7];
+ encryptSRTCPPacket(fSRTCPIndex, ssrc, dataToEncrypt, encryptedDataSize);
+ eFlag = 0x80;
+ }
+
+ outPacketSize = inPacketSize; // initially
+
+ // Add 4 bytes for the 'E' flag and SRTCP index:
+ buffer[outPacketSize++] = (fSRTCPIndex>>24)|eFlag;
+ buffer[outPacketSize++] = fSRTCPIndex>>16;
+ buffer[outPacketSize++] = fSRTCPIndex>>8;
+ buffer[outPacketSize++] = fSRTCPIndex;
+ ++fSRTCPIndex; // for next time
+
+ // Add the MKI:
+ buffer[outPacketSize++] = MKI()>>24;
+ buffer[outPacketSize++] = MKI()>>16;
+ buffer[outPacketSize++] = MKI()>>8;
+ buffer[outPacketSize++] = MKI();
+
+ // Generate and add an authentication tag over the data built so far (except the MKI)
+ outPacketSize += generateSRTCPAuthenticationTag(buffer, outPacketSize-SRTP_MKI_LENGTH,
+ &buffer[outPacketSize]);
+
+ return True;
+ } while (0);
+#endif
+
+ // An error occurred:
+ return False;
+}
+
+#ifndef NO_OPENSSL
+unsigned SRTPCryptographicContext
+::generateSRTCPAuthenticationTag(u_int8_t const* dataToAuthenticate, unsigned numBytesToAuthenticate,
+ u_int8_t* resultAuthenticationTag) {
+ return generateAuthenticationTag(fDerivedKeys.srtcp, dataToAuthenticate, numBytesToAuthenticate,
+ resultAuthenticationTag);
+}
+
+Boolean SRTPCryptographicContext
+::verifySRTPAuthenticationTag(u_int8_t* dataToAuthenticate, unsigned numBytesToAuthenticate,
+ u_int32_t roc, u_int8_t const* authenticationTag) {
+ // Append the (4-byte) 'ROC' (roll-over counter) to "dataToAuthenticate" before computing
+ // the authentication tag. We can do this because we have enough space after
+ // "dataToAuthenticate":
+ // - If we're encrypted, then there's assumed to be a (4-byte) MKI there. Just overwrite
+ // that. (If we need the MKI, we could copy it beforehand; later, allow for there being
+ // no MKI #####)
+ // - If we're not encrypted, then the ROC will overwrite part of the existing
+ // authentication tag, so we need to make a copy of this.
+ u_int8_t const* existingAuthenticationTag;
+ Boolean haveMKI = True; // later, allow for there being no MKI #####
+ u_int8_t authenticationTagCopy[SRTP_AUTH_TAG_LENGTH];
+
+ if (fMIKEYState.encryptSRTP() && haveMKI) {
+ existingAuthenticationTag = authenticationTag;
+ } else {
+ memcpy(authenticationTagCopy, authenticationTag, sizeof authenticationTagCopy);
+ existingAuthenticationTag = authenticationTagCopy;
+ }
+
+ dataToAuthenticate[numBytesToAuthenticate++] = roc>>24;
+ dataToAuthenticate[numBytesToAuthenticate++] = roc>>16;
+ dataToAuthenticate[numBytesToAuthenticate++] = roc>>8;
+ dataToAuthenticate[numBytesToAuthenticate++] = roc;
+
+ return verifyAuthenticationTag(fDerivedKeys.srtp,
+ dataToAuthenticate, numBytesToAuthenticate,
+ existingAuthenticationTag);
+}
+
+Boolean SRTPCryptographicContext
+::verifySRTCPAuthenticationTag(u_int8_t const* dataToAuthenticate, unsigned numBytesToAuthenticate,
+ u_int8_t const* authenticationTag) {
+return verifyAuthenticationTag(fDerivedKeys.srtcp,
+ dataToAuthenticate, numBytesToAuthenticate,
+ authenticationTag);
+}
+
+void SRTPCryptographicContext
+::decryptSRTPPacket(u_int64_t index, u_int32_t ssrc, u_int8_t* data, unsigned numDataBytes) {
+ cryptData(fDerivedKeys.srtp, index, ssrc, data, numDataBytes);
+}
+
+void SRTPCryptographicContext
+::decryptSRTCPPacket(u_int32_t index, u_int32_t ssrc, u_int8_t* data, unsigned numDataBytes) {
+ cryptData(fDerivedKeys.srtcp, (u_int64_t)index, ssrc, data, numDataBytes);
+}
+
+void SRTPCryptographicContext
+::encryptSRTCPPacket(u_int32_t index, u_int32_t ssrc, u_int8_t* data, unsigned numDataBytes) {
+ cryptData(fDerivedKeys.srtcp, (u_int64_t)index, ssrc, data, numDataBytes);
+}
+
+unsigned SRTPCryptographicContext
+::generateAuthenticationTag(derivedKeys& keysToUse,
+ u_int8_t const* dataToAuthenticate, unsigned numBytesToAuthenticate,
+ u_int8_t* resultAuthenticationTag) {
+ if (SRTP_AUTH_TAG_LENGTH > SHA1_DIGEST_LEN) return 0; // sanity check; shouldn't happen
+ u_int8_t computedAuthTag[SHA1_DIGEST_LEN];
+ HMAC_SHA1(keysToUse.authKey, sizeof keysToUse.authKey,
+ dataToAuthenticate, numBytesToAuthenticate,
+ computedAuthTag);
+
+ for (unsigned i = 0; i < SRTP_AUTH_TAG_LENGTH; ++i) {
+ resultAuthenticationTag[i] = computedAuthTag[i];
+ }
+
+ return SRTP_AUTH_TAG_LENGTH;
+}
+
+Boolean SRTPCryptographicContext
+::verifyAuthenticationTag(derivedKeys& keysToUse,
+ u_int8_t const* dataToAuthenticate, unsigned numBytesToAuthenticate,
+ u_int8_t const* authenticationTag) {
+ u_int8_t computedAuthTag[SHA1_DIGEST_LEN];
+ HMAC_SHA1(keysToUse.authKey, sizeof keysToUse.authKey,
+ dataToAuthenticate, numBytesToAuthenticate,
+ computedAuthTag);
+
+ if (SRTP_AUTH_TAG_LENGTH > SHA1_DIGEST_LEN) return False; // sanity check
+ for (unsigned i = 0; i < SRTP_AUTH_TAG_LENGTH; ++i) {
+ if (computedAuthTag[i] != authenticationTag[i]) return False;
+ }
+ return True;
+}
+
+ void SRTPCryptographicContext::cryptData(derivedKeys& keys, u_int64_t index, u_int32_t ssrc,
+ u_int8_t* data, unsigned numDataBytes) {
+ // Begin by constructing the IV: (salt * 2^16) XOR (ssrc * 2^64) XOR (index * 2^16)
+ u_int8_t iv[SRTP_CIPHER_KEY_LENGTH];
+
+ memmove(iv, keys.salt, SRTP_CIPHER_SALT_LENGTH);
+ iv[SRTP_CIPHER_SALT_LENGTH] = iv[SRTP_CIPHER_SALT_LENGTH + 1] = 0;
+ // (This is based upon the fact that SRTP_CIPHER_KEY_LENGTH == SRTP_CIPHER_SALT_LENGTH + 2)
+
+ iv[sizeof iv-12] ^= ssrc>>24; iv[sizeof iv-11] ^= ssrc>>16; iv[sizeof iv-10] ^= ssrc>>8; iv[sizeof iv-9] ^= ssrc;
+
+ iv[sizeof iv-8] ^= index>>40; iv[sizeof iv-7] ^= index>>32; iv[sizeof iv-6] ^= index>>24; iv[sizeof iv-5] ^= index>>16; iv[sizeof iv-4] ^= index>>8; iv[sizeof iv-3] ^= index;
+
+ // Now generate as many blocks of the keystream as we need, by repeatedly encrypting
+ // the IV using our cipher key. (After each step, we increment the IV by 1.)
+ // We then XOR the keystream into the provided data, to do the en/decryption.
+ AES_KEY key;
+ AES_set_encrypt_key(keys.cipherKey, 8*SRTP_CIPHER_KEY_LENGTH, &key);
+
+ while (numDataBytes > 0) {
+ u_int8_t keyStream[SRTP_CIPHER_KEY_LENGTH];
+ AES_encrypt(iv, keyStream, &key);
+
+ unsigned numBytesToUse
+ = numDataBytes < SRTP_CIPHER_KEY_LENGTH ? numDataBytes : SRTP_CIPHER_KEY_LENGTH;
+ for (unsigned i = 0; i < numBytesToUse; ++i) data[i] ^= keyStream[i];
+ data += numBytesToUse;
+ numDataBytes -= numBytesToUse;
+
+ // Increment the IV by 1:
+ u_int8_t* ptr = &iv[sizeof iv];
+ do {
+ --ptr;
+ ++*ptr;
+ } while (*ptr == 0x00);
+ }
+}
+
+void SRTPCryptographicContext::performKeyDerivation() {
+ // Perform a key derivation for the master key+salt, as defined
+ // by RFC 3711, section 4.3:
+ deriveKeysFromMaster(masterKey(), masterSalt(), fDerivedKeys);
+}
+
+#define deriveKey(label, resultKey) deriveSingleKey(masterKey, salt, label, sizeof resultKey, resultKey)
+
+void SRTPCryptographicContext
+::deriveKeysFromMaster(u_int8_t const* masterKey, u_int8_t const* salt,
+ allDerivedKeys& allKeysResult) {
+ // Derive cipher, salt, and auth keys for both SRTP and SRTCP:
+ deriveKey(label_srtp_encryption, allKeysResult.srtp.cipherKey);
+ deriveKey(label_srtp_msg_auth, allKeysResult.srtp.authKey);
+ deriveKey(label_srtp_salt, allKeysResult.srtp.salt);
+
+ deriveKey(label_srtcp_encryption, allKeysResult.srtcp.cipherKey);
+ deriveKey(label_srtcp_msg_auth, allKeysResult.srtcp.authKey);
+ deriveKey(label_srtcp_salt, allKeysResult.srtcp.salt);
+}
+
+#define KDF_PRF_CIPHER_BLOCK_LENGTH 16
+
+void SRTPCryptographicContext
+::deriveSingleKey(u_int8_t const* masterKey, u_int8_t const* salt,
+ SRTPKeyDerivationLabel label,
+ unsigned resultKeyLength, u_int8_t* resultKey) {
+ // This looks a little different from the mechanism described in RFC 3711, section 4.3, but
+ // it's what the 'libsrtp' code does, so I hope it's functionally equivalent:
+ AES_KEY key;
+ AES_set_encrypt_key(masterKey, 8*SRTP_CIPHER_KEY_LENGTH, &key);
+
+ u_int8_t counter[KDF_PRF_CIPHER_BLOCK_LENGTH];
+ // Set the first bytes of "counter" to be the 'salt'; set the remainder to zero:
+ memmove(counter, salt, SRTP_CIPHER_SALT_LENGTH);
+ for (unsigned i = SRTP_CIPHER_SALT_LENGTH; i < sizeof counter; ++i) {
+ counter[i] = 0;
+ }
+
+ // XOR "label" into byte 7 of "counter":
+ counter[7] ^= label;
+
+ // And use the resulting "counter" as the plaintext:
+ u_int8_t const* plaintext = counter;
+
+ unsigned numBytesRemaining = resultKeyLength;
+ while (numBytesRemaining > 0) {
+ u_int8_t ciphertext[KDF_PRF_CIPHER_BLOCK_LENGTH];
+ AES_encrypt(plaintext, ciphertext, &key);
+
+ unsigned numBytesToCopy
+ = numBytesRemaining < KDF_PRF_CIPHER_BLOCK_LENGTH ? numBytesRemaining : KDF_PRF_CIPHER_BLOCK_LENGTH;
+ memmove(resultKey, ciphertext, numBytesToCopy);
+ resultKey += numBytesToCopy;
+ numBytesRemaining -= numBytesToCopy;
+ ++counter[15]; // for next time
+ }
+}
+#endif
diff --git a/liveMedia/ServerMediaSession.cpp b/liveMedia/ServerMediaSession.cpp
new file mode 100644
index 0000000..043d974
--- /dev/null
+++ b/liveMedia/ServerMediaSession.cpp
@@ -0,0 +1,456 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A data structure that represents a session that consists of
+// potentially multiple (audio and/or video) sub-sessions
+// (This data structure is used for media *streamers* - i.e., servers.
+// For media receivers, use "MediaSession" instead.)
+// Implementation
+
+#include "ServerMediaSession.hh"
+#include <GroupsockHelper.hh>
+#include <math.h>
+#if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4)
+#define snprintf _snprintf
+#endif
+
+////////// ServerMediaSession //////////
+
+ServerMediaSession* ServerMediaSession
+::createNew(UsageEnvironment& env,
+ char const* streamName, char const* info,
+ char const* description, Boolean isSSM, char const* miscSDPLines) {
+ return new ServerMediaSession(env, streamName, info, description,
+ isSSM, miscSDPLines);
+}
+
+Boolean ServerMediaSession
+::lookupByName(UsageEnvironment& env, char const* mediumName,
+ ServerMediaSession*& resultSession) {
+ resultSession = NULL; // unless we succeed
+
+ Medium* medium;
+ if (!Medium::lookupByName(env, mediumName, medium)) return False;
+
+ if (!medium->isServerMediaSession()) {
+ env.setResultMsg(mediumName, " is not a 'ServerMediaSession' object");
+ return False;
+ }
+
+ resultSession = (ServerMediaSession*)medium;
+ return True;
+}
+
+static char const* const libNameStr = "LIVE555 Streaming Media v";
+char const* const libVersionStr = LIVEMEDIA_LIBRARY_VERSION_STRING;
+
+ServerMediaSession::ServerMediaSession(UsageEnvironment& env,
+ char const* streamName,
+ char const* info,
+ char const* description,
+ Boolean isSSM, char const* miscSDPLines)
+ : Medium(env), fIsSSM(isSSM), fSubsessionsHead(NULL),
+ fSubsessionsTail(NULL), fSubsessionCounter(0),
+ fReferenceCount(0), fDeleteWhenUnreferenced(False) {
+ fStreamName = strDup(streamName == NULL ? "" : streamName);
+
+ char* libNamePlusVersionStr = NULL; // by default
+ if (info == NULL || description == NULL) {
+ libNamePlusVersionStr = new char[strlen(libNameStr) + strlen(libVersionStr) + 1];
+ sprintf(libNamePlusVersionStr, "%s%s", libNameStr, libVersionStr);
+ }
+ fInfoSDPString = strDup(info == NULL ? libNamePlusVersionStr : info);
+ fDescriptionSDPString = strDup(description == NULL ? libNamePlusVersionStr : description);
+ delete[] libNamePlusVersionStr;
+
+ fMiscSDPLines = strDup(miscSDPLines == NULL ? "" : miscSDPLines);
+
+ gettimeofday(&fCreationTime, NULL);
+}
+
+ServerMediaSession::~ServerMediaSession() {
+ deleteAllSubsessions();
+ delete[] fStreamName;
+ delete[] fInfoSDPString;
+ delete[] fDescriptionSDPString;
+ delete[] fMiscSDPLines;
+}
+
+Boolean
+ServerMediaSession::addSubsession(ServerMediaSubsession* subsession) {
+ if (subsession->fParentSession != NULL) return False; // it's already used
+
+ if (fSubsessionsTail == NULL) {
+ fSubsessionsHead = subsession;
+ } else {
+ fSubsessionsTail->fNext = subsession;
+ }
+ fSubsessionsTail = subsession;
+
+ subsession->fParentSession = this;
+ subsession->fTrackNumber = ++fSubsessionCounter;
+ return True;
+}
+
+void ServerMediaSession::testScaleFactor(float& scale) {
+ // First, try setting all subsessions to the desired scale.
+ // If the subsessions' actual scales differ from each other, choose the
+ // value that's closest to 1, and then try re-setting all subsessions to that
+ // value. If the subsessions' actual scales still differ, re-set them all to 1.
+ float minSSScale = 1.0;
+ float maxSSScale = 1.0;
+ float bestSSScale = 1.0;
+ float bestDistanceTo1 = 0.0;
+ ServerMediaSubsession* subsession;
+ for (subsession = fSubsessionsHead; subsession != NULL;
+ subsession = subsession->fNext) {
+ float ssscale = scale;
+ subsession->testScaleFactor(ssscale);
+ if (subsession == fSubsessionsHead) { // this is the first subsession
+ minSSScale = maxSSScale = bestSSScale = ssscale;
+ bestDistanceTo1 = (float)fabs(ssscale - 1.0f);
+ } else {
+ if (ssscale < minSSScale) {
+ minSSScale = ssscale;
+ } else if (ssscale > maxSSScale) {
+ maxSSScale = ssscale;
+ }
+
+ float distanceTo1 = (float)fabs(ssscale - 1.0f);
+ if (distanceTo1 < bestDistanceTo1) {
+ bestSSScale = ssscale;
+ bestDistanceTo1 = distanceTo1;
+ }
+ }
+ }
+ if (minSSScale == maxSSScale) {
+ // All subsessions are at the same scale: minSSScale == bestSSScale == maxSSScale
+ scale = minSSScale;
+ return;
+ }
+
+ // The scales for each subsession differ. Try to set each one to the value
+ // that's closest to 1:
+ for (subsession = fSubsessionsHead; subsession != NULL;
+ subsession = subsession->fNext) {
+ float ssscale = bestSSScale;
+ subsession->testScaleFactor(ssscale);
+ if (ssscale != bestSSScale) break; // no luck
+ }
+ if (subsession == NULL) {
+ // All subsessions are at the same scale: bestSSScale
+ scale = bestSSScale;
+ return;
+ }
+
+ // Still no luck. Set each subsession's scale to 1:
+ for (subsession = fSubsessionsHead; subsession != NULL;
+ subsession = subsession->fNext) {
+ float ssscale = 1;
+ subsession->testScaleFactor(ssscale);
+ }
+ scale = 1;
+}
+
+float ServerMediaSession::duration() const {
+ float minSubsessionDuration = 0.0;
+ float maxSubsessionDuration = 0.0;
+ for (ServerMediaSubsession* subsession = fSubsessionsHead; subsession != NULL;
+ subsession = subsession->fNext) {
+ // Hack: If any subsession supports seeking by 'absolute' time, then return a negative value, to indicate that only subsessions
+ // will have a "a=range:" attribute:
+ char* absStartTime = NULL; char* absEndTime = NULL;
+ subsession->getAbsoluteTimeRange(absStartTime, absEndTime);
+ if (absStartTime != NULL) return -1.0f;
+
+ float ssduration = subsession->duration();
+ if (subsession == fSubsessionsHead) { // this is the first subsession
+ minSubsessionDuration = maxSubsessionDuration = ssduration;
+ } else if (ssduration < minSubsessionDuration) {
+ minSubsessionDuration = ssduration;
+ } else if (ssduration > maxSubsessionDuration) {
+ maxSubsessionDuration = ssduration;
+ }
+ }
+
+ if (maxSubsessionDuration != minSubsessionDuration) {
+ return -maxSubsessionDuration; // because subsession durations differ
+ } else {
+ return maxSubsessionDuration; // all subsession durations are the same
+ }
+}
+
+void ServerMediaSession::noteLiveness() {
+ // default implementation: do nothing
+}
+
+void ServerMediaSession::deleteAllSubsessions() {
+ Medium::close(fSubsessionsHead);
+ fSubsessionsHead = fSubsessionsTail = NULL;
+ fSubsessionCounter = 0;
+}
+
+Boolean ServerMediaSession::isServerMediaSession() const {
+ return True;
+}
+
+char* ServerMediaSession::generateSDPDescription() {
+ AddressString ipAddressStr(ourIPAddress(envir()));
+ unsigned ipAddressStrSize = strlen(ipAddressStr.val());
+
+ // For a SSM sessions, we need a "a=source-filter: incl ..." line also:
+ char* sourceFilterLine;
+ if (fIsSSM) {
+ char const* const sourceFilterFmt =
+ "a=source-filter: incl IN IP4 * %s\r\n"
+ "a=rtcp-unicast: reflection\r\n";
+ unsigned const sourceFilterFmtSize = strlen(sourceFilterFmt) + ipAddressStrSize + 1;
+
+ sourceFilterLine = new char[sourceFilterFmtSize];
+ sprintf(sourceFilterLine, sourceFilterFmt, ipAddressStr.val());
+ } else {
+ sourceFilterLine = strDup("");
+ }
+
+ char* rangeLine = NULL; // for now
+ char* sdp = NULL; // for now
+
+ do {
+ // Count the lengths of each subsession's media-level SDP lines.
+ // (We do this first, because the call to "subsession->sdpLines()"
+ // causes correct subsession 'duration()'s to be calculated later.)
+ unsigned sdpLength = 0;
+ ServerMediaSubsession* subsession;
+ for (subsession = fSubsessionsHead; subsession != NULL;
+ subsession = subsession->fNext) {
+ char const* sdpLines = subsession->sdpLines();
+ if (sdpLines == NULL) continue; // the media's not available
+ sdpLength += strlen(sdpLines);
+ }
+ if (sdpLength == 0) break; // the session has no usable subsessions
+
+ // Unless subsessions have differing durations, we also have a "a=range:" line:
+ float dur = duration();
+ if (dur == 0.0) {
+ rangeLine = strDup("a=range:npt=0-\r\n");
+ } else if (dur > 0.0) {
+ char buf[100];
+ sprintf(buf, "a=range:npt=0-%.3f\r\n", dur);
+ rangeLine = strDup(buf);
+ } else { // subsessions have differing durations, so "a=range:" lines go there
+ rangeLine = strDup("");
+ }
+
+ char const* const sdpPrefixFmt =
+ "v=0\r\n"
+ "o=- %ld%06ld %d IN IP4 %s\r\n"
+ "s=%s\r\n"
+ "i=%s\r\n"
+ "t=0 0\r\n"
+ "a=tool:%s%s\r\n"
+ "a=type:broadcast\r\n"
+ "a=control:*\r\n"
+ "%s"
+ "%s"
+ "a=x-qt-text-nam:%s\r\n"
+ "a=x-qt-text-inf:%s\r\n"
+ "%s";
+ sdpLength += strlen(sdpPrefixFmt)
+ + 20 + 6 + 20 + ipAddressStrSize
+ + strlen(fDescriptionSDPString)
+ + strlen(fInfoSDPString)
+ + strlen(libNameStr) + strlen(libVersionStr)
+ + strlen(sourceFilterLine)
+ + strlen(rangeLine)
+ + strlen(fDescriptionSDPString)
+ + strlen(fInfoSDPString)
+ + strlen(fMiscSDPLines);
+ sdpLength += 1000; // in case the length of the "subsession->sdpLines()" calls below change
+ sdp = new char[sdpLength];
+ if (sdp == NULL) break;
+
+ // Generate the SDP prefix (session-level lines):
+ snprintf(sdp, sdpLength, sdpPrefixFmt,
+ fCreationTime.tv_sec, fCreationTime.tv_usec, // o= <session id>
+ 1, // o= <version> // (needs to change if params are modified)
+ ipAddressStr.val(), // o= <address>
+ fDescriptionSDPString, // s= <description>
+ fInfoSDPString, // i= <info>
+ libNameStr, libVersionStr, // a=tool:
+ sourceFilterLine, // a=source-filter: incl (if a SSM session)
+ rangeLine, // a=range: line
+ fDescriptionSDPString, // a=x-qt-text-nam: line
+ fInfoSDPString, // a=x-qt-text-inf: line
+ fMiscSDPLines); // miscellaneous session SDP lines (if any)
+
+ // Then, add the (media-level) lines for each subsession:
+ char* mediaSDP = sdp;
+ for (subsession = fSubsessionsHead; subsession != NULL;
+ subsession = subsession->fNext) {
+ unsigned mediaSDPLength = strlen(mediaSDP);
+ mediaSDP += mediaSDPLength;
+ sdpLength -= mediaSDPLength;
+ if (sdpLength <= 1) break; // the SDP has somehow become too long
+
+ char const* sdpLines = subsession->sdpLines();
+ if (sdpLines != NULL) snprintf(mediaSDP, sdpLength, "%s", sdpLines);
+ }
+ } while (0);
+
+ delete[] rangeLine; delete[] sourceFilterLine;
+ return sdp;
+}
+
+
+////////// ServerMediaSubsessionIterator //////////
+
+ServerMediaSubsessionIterator
+::ServerMediaSubsessionIterator(ServerMediaSession& session)
+ : fOurSession(session) {
+ reset();
+}
+
+ServerMediaSubsessionIterator::~ServerMediaSubsessionIterator() {
+}
+
+ServerMediaSubsession* ServerMediaSubsessionIterator::next() {
+ ServerMediaSubsession* result = fNextPtr;
+
+ if (fNextPtr != NULL) fNextPtr = fNextPtr->fNext;
+
+ return result;
+}
+
+void ServerMediaSubsessionIterator::reset() {
+ fNextPtr = fOurSession.fSubsessionsHead;
+}
+
+
+////////// ServerMediaSubsession //////////
+
+ServerMediaSubsession::ServerMediaSubsession(UsageEnvironment& env)
+ : Medium(env),
+ fParentSession(NULL), fServerAddressForSDP(0), fPortNumForSDP(0),
+ fNext(NULL), fTrackNumber(0), fTrackId(NULL) {
+}
+
+ServerMediaSubsession::~ServerMediaSubsession() {
+ delete[] (char*)fTrackId;
+ Medium::close(fNext);
+}
+
+char const* ServerMediaSubsession::trackId() {
+ if (fTrackNumber == 0) return NULL; // not yet in a ServerMediaSession
+
+ if (fTrackId == NULL) {
+ char buf[100];
+ sprintf(buf, "track%d", fTrackNumber);
+ fTrackId = strDup(buf);
+ }
+ return fTrackId;
+}
+
+void ServerMediaSubsession::pauseStream(unsigned /*clientSessionId*/,
+ void* /*streamToken*/) {
+ // default implementation: do nothing
+}
+void ServerMediaSubsession::seekStream(unsigned /*clientSessionId*/,
+ void* /*streamToken*/, double& /*seekNPT*/, double /*streamDuration*/, u_int64_t& numBytes) {
+ // default implementation: do nothing
+ numBytes = 0;
+}
+void ServerMediaSubsession::seekStream(unsigned /*clientSessionId*/,
+ void* /*streamToken*/, char*& absStart, char*& absEnd) {
+ // default implementation: do nothing (but delete[] and assign "absStart" and "absEnd" to NULL, to show that we don't handle this)
+ delete[] absStart; absStart = NULL;
+ delete[] absEnd; absEnd = NULL;
+}
+void ServerMediaSubsession::nullSeekStream(unsigned /*clientSessionId*/, void* /*streamToken*/,
+ double streamEndTime, u_int64_t& numBytes) {
+ // default implementation: do nothing
+ numBytes = 0;
+}
+void ServerMediaSubsession::setStreamScale(unsigned /*clientSessionId*/,
+ void* /*streamToken*/, float /*scale*/) {
+ // default implementation: do nothing
+}
+float ServerMediaSubsession::getCurrentNPT(void* /*streamToken*/) {
+ // default implementation: return 0.0
+ return 0.0;
+}
+FramedSource* ServerMediaSubsession::getStreamSource(void* /*streamToken*/) {
+ // default implementation: return NULL
+ return NULL;
+}
+void ServerMediaSubsession::deleteStream(unsigned /*clientSessionId*/,
+ void*& /*streamToken*/) {
+ // default implementation: do nothing
+}
+
+void ServerMediaSubsession::testScaleFactor(float& scale) {
+ // default implementation: Support scale = 1 only
+ scale = 1;
+}
+
+float ServerMediaSubsession::duration() const {
+ // default implementation: assume an unbounded session:
+ return 0.0;
+}
+
+void ServerMediaSubsession::getAbsoluteTimeRange(char*& absStartTime, char*& absEndTime) const {
+ // default implementation: We don't support seeking by 'absolute' time, so indicate this by setting both parameters to NULL:
+ absStartTime = absEndTime = NULL;
+}
+
+void ServerMediaSubsession::setServerAddressAndPortForSDP(netAddressBits addressBits,
+ portNumBits portBits) {
+ fServerAddressForSDP = addressBits;
+ fPortNumForSDP = portBits;
+}
+
+char const*
+ServerMediaSubsession::rangeSDPLine() const {
+ // First, check for the special case where we support seeking by 'absolute' time:
+ char* absStart = NULL; char* absEnd = NULL;
+ getAbsoluteTimeRange(absStart, absEnd);
+ if (absStart != NULL) {
+ char buf[100];
+
+ if (absEnd != NULL) {
+ sprintf(buf, "a=range:clock=%s-%s\r\n", absStart, absEnd);
+ } else {
+ sprintf(buf, "a=range:clock=%s-\r\n", absStart);
+ }
+ return strDup(buf);
+ }
+
+ if (fParentSession == NULL) return NULL;
+
+ // If all of our parent's subsessions have the same duration
+ // (as indicated by "fParentSession->duration() >= 0"), there's no "a=range:" line:
+ if (fParentSession->duration() >= 0.0) return strDup("");
+
+ // Use our own duration for a "a=range:" line:
+ float ourDuration = duration();
+ if (ourDuration == 0.0) {
+ return strDup("a=range:npt=0-\r\n");
+ } else {
+ char buf[100];
+ sprintf(buf, "a=range:npt=0-%.3f\r\n", ourDuration);
+ return strDup(buf);
+ }
+}
diff --git a/liveMedia/SimpleRTPSink.cpp b/liveMedia/SimpleRTPSink.cpp
new file mode 100644
index 0000000..2e51af8
--- /dev/null
+++ b/liveMedia/SimpleRTPSink.cpp
@@ -0,0 +1,94 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simple RTP sink that packs frames into each outgoing
+// packet, without any fragmentation or special headers.
+// Implementation
+
+#include "SimpleRTPSink.hh"
+
+SimpleRTPSink::SimpleRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* sdpMediaTypeString,
+ char const* rtpPayloadFormatName,
+ unsigned numChannels,
+ Boolean allowMultipleFramesPerPacket,
+ Boolean doNormalMBitRule)
+ : MultiFramedRTPSink(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency, rtpPayloadFormatName,
+ numChannels),
+ fAllowMultipleFramesPerPacket(allowMultipleFramesPerPacket), fSetMBitOnNextPacket(False) {
+ fSDPMediaTypeString
+ = strDup(sdpMediaTypeString == NULL ? "unknown" : sdpMediaTypeString);
+ fSetMBitOnLastFrames = doNormalMBitRule && strcmp(fSDPMediaTypeString, "audio") != 0;
+}
+
+SimpleRTPSink::~SimpleRTPSink() {
+ delete[] (char*)fSDPMediaTypeString;
+}
+
+SimpleRTPSink*
+SimpleRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* sdpMediaTypeString,
+ char const* rtpPayloadFormatName,
+ unsigned numChannels,
+ Boolean allowMultipleFramesPerPacket,
+ Boolean doNormalMBitRule) {
+ return new SimpleRTPSink(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency,
+ sdpMediaTypeString, rtpPayloadFormatName,
+ numChannels,
+ allowMultipleFramesPerPacket,
+ doNormalMBitRule);
+}
+
+void SimpleRTPSink::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the RTP 'M' ('marker') bit, if appropriate:
+ if (fSetMBitOnLastFrames) setMarkerBit();
+ }
+ if (fSetMBitOnNextPacket) {
+ // An external object has asked for the 'M' bit to be set on the next packet:
+ setMarkerBit();
+ fSetMBitOnNextPacket = False;
+ }
+
+ // Important: Also call our base class's doSpecialFrameHandling(),
+ // to set the packet's timestamp:
+ MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
+ frameStart, numBytesInFrame,
+ framePresentationTime,
+ numRemainingBytes);
+}
+
+Boolean SimpleRTPSink::
+frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ return fAllowMultipleFramesPerPacket;
+}
+
+char const* SimpleRTPSink::sdpMediaType() const {
+ return fSDPMediaTypeString;
+}
diff --git a/liveMedia/SimpleRTPSource.cpp b/liveMedia/SimpleRTPSource.cpp
new file mode 100644
index 0000000..8c98eb8
--- /dev/null
+++ b/liveMedia/SimpleRTPSource.cpp
@@ -0,0 +1,68 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A RTP source for a simple RTP payload format that
+// - doesn't have any special headers following the RTP header
+// - doesn't have any special framing apart from the packet data itself
+// Implementation
+
+#include "SimpleRTPSource.hh"
+#include <string.h>
+
+SimpleRTPSource*
+SimpleRTPSource::createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* mimeTypeString,
+ unsigned offset, Boolean doNormalMBitRule) {
+ return new SimpleRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency,
+ mimeTypeString, offset, doNormalMBitRule);
+}
+
+SimpleRTPSource
+::SimpleRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* mimeTypeString,
+ unsigned offset, Boolean doNormalMBitRule)
+ : MultiFramedRTPSource(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency),
+ fMIMEtypeString(strDup(mimeTypeString)), fOffset(offset) {
+ fUseMBitForFrameEnd = doNormalMBitRule && strncmp(mimeTypeString, "audio/", 6) != 0;
+}
+
+SimpleRTPSource::~SimpleRTPSource() {
+ delete[] (char*)fMIMEtypeString;
+}
+
+Boolean SimpleRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ fCurrentPacketCompletesFrame
+ = !fUseMBitForFrameEnd || packet->rtpMarkerBit();
+
+ resultSpecialHeaderSize = fOffset;
+ return True;
+}
+
+char const* SimpleRTPSource::MIMEtype() const {
+ if (fMIMEtypeString == NULL) return MultiFramedRTPSource::MIMEtype();
+
+ return fMIMEtypeString;
+}
diff --git a/liveMedia/StreamParser.cpp b/liveMedia/StreamParser.cpp
new file mode 100644
index 0000000..00862e5
--- /dev/null
+++ b/liveMedia/StreamParser.cpp
@@ -0,0 +1,210 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Abstract class for parsing a byte stream
+// Implementation
+
+#include "StreamParser.hh"
+
+#include <string.h>
+#include <stdlib.h>
+
+#define BANK_SIZE 150000
+
+void StreamParser::flushInput() {
+ fCurParserIndex = fSavedParserIndex = 0;
+ fSavedRemainingUnparsedBits = fRemainingUnparsedBits = 0;
+ fTotNumValidBytes = 0;
+}
+
+StreamParser::StreamParser(FramedSource* inputSource,
+ FramedSource::onCloseFunc* onInputCloseFunc,
+ void* onInputCloseClientData,
+ clientContinueFunc* clientContinueFunc,
+ void* clientContinueClientData)
+ : fInputSource(inputSource), fClientOnInputCloseFunc(onInputCloseFunc),
+ fClientOnInputCloseClientData(onInputCloseClientData),
+ fClientContinueFunc(clientContinueFunc),
+ fClientContinueClientData(clientContinueClientData),
+ fSavedParserIndex(0), fSavedRemainingUnparsedBits(0),
+ fCurParserIndex(0), fRemainingUnparsedBits(0),
+ fTotNumValidBytes(0), fHaveSeenEOF(False) {
+ fBank[0] = new unsigned char[BANK_SIZE];
+ fBank[1] = new unsigned char[BANK_SIZE];
+ fCurBankNum = 0;
+ fCurBank = fBank[fCurBankNum];
+
+ fLastSeenPresentationTime.tv_sec = 0; fLastSeenPresentationTime.tv_usec = 0;
+}
+
+StreamParser::~StreamParser() {
+ delete[] fBank[0]; delete[] fBank[1];
+}
+
+void StreamParser::saveParserState() {
+ fSavedParserIndex = fCurParserIndex;
+ fSavedRemainingUnparsedBits = fRemainingUnparsedBits;
+}
+
+void StreamParser::restoreSavedParserState() {
+ fCurParserIndex = fSavedParserIndex;
+ fRemainingUnparsedBits = fSavedRemainingUnparsedBits;
+}
+
+void StreamParser::skipBits(unsigned numBits) {
+ if (numBits <= fRemainingUnparsedBits) {
+ fRemainingUnparsedBits -= numBits;
+ } else {
+ numBits -= fRemainingUnparsedBits;
+
+ unsigned numBytesToExamine = (numBits+7)/8; // round up
+ ensureValidBytes(numBytesToExamine);
+ fCurParserIndex += numBytesToExamine;
+
+ fRemainingUnparsedBits = 8*numBytesToExamine - numBits;
+ }
+}
+
+unsigned StreamParser::getBits(unsigned numBits) {
+ if (numBits <= fRemainingUnparsedBits) {
+ unsigned char lastByte = *lastParsed();
+ lastByte >>= (fRemainingUnparsedBits - numBits);
+ fRemainingUnparsedBits -= numBits;
+
+ return (unsigned)lastByte &~ ((~0u)<<numBits);
+ } else {
+ unsigned char lastByte;
+ if (fRemainingUnparsedBits > 0) {
+ lastByte = *lastParsed();
+ } else {
+ lastByte = 0;
+ }
+
+ unsigned remainingBits = numBits - fRemainingUnparsedBits; // > 0
+
+ // For simplicity, read the next 4 bytes, even though we might not
+ // need all of them here:
+ unsigned result = test4Bytes();
+
+ result >>= (32 - remainingBits);
+ result |= (lastByte << remainingBits);
+ if (numBits < 32) result &=~ ((~0u)<<numBits);
+
+ unsigned const numRemainingBytes = (remainingBits+7)/8;
+ fCurParserIndex += numRemainingBytes;
+ fRemainingUnparsedBits = 8*numRemainingBytes - remainingBits;
+
+ return result;
+ }
+}
+
+unsigned StreamParser::bankSize() const {
+ return BANK_SIZE;
+}
+
+#define NO_MORE_BUFFERED_INPUT 1
+
+void StreamParser::ensureValidBytes1(unsigned numBytesNeeded) {
+ // We need to read some more bytes from the input source.
+ // First, clarify how much data to ask for:
+ unsigned maxInputFrameSize = fInputSource->maxFrameSize();
+ if (maxInputFrameSize > numBytesNeeded) numBytesNeeded = maxInputFrameSize;
+
+ // First, check whether these new bytes would overflow the current
+ // bank. If so, start using a new bank now.
+ if (fCurParserIndex + numBytesNeeded > BANK_SIZE) {
+ // Swap banks, but save any still-needed bytes from the old bank:
+ unsigned numBytesToSave = fTotNumValidBytes - fSavedParserIndex;
+ unsigned char const* from = &curBank()[fSavedParserIndex];
+
+ fCurBankNum = (fCurBankNum + 1)%2;
+ fCurBank = fBank[fCurBankNum];
+ memmove(curBank(), from, numBytesToSave);
+ fCurParserIndex = fCurParserIndex - fSavedParserIndex;
+ fSavedParserIndex = 0;
+ fTotNumValidBytes = numBytesToSave;
+ }
+
+ // ASSERT: fCurParserIndex + numBytesNeeded > fTotNumValidBytes
+ // && fCurParserIndex + numBytesNeeded <= BANK_SIZE
+ if (fCurParserIndex + numBytesNeeded > BANK_SIZE) {
+ // If this happens, it means that we have too much saved parser state.
+ // To fix this, increase BANK_SIZE as appropriate.
+ fInputSource->envir() << "StreamParser internal error ("
+ << fCurParserIndex << " + "
+ << numBytesNeeded << " > "
+ << BANK_SIZE << ")\n";
+ fInputSource->envir().internalError();
+ }
+
+ // Try to read as many new bytes as will fit in the current bank:
+ unsigned maxNumBytesToRead = BANK_SIZE - fTotNumValidBytes;
+ fInputSource->getNextFrame(&curBank()[fTotNumValidBytes],
+ maxNumBytesToRead,
+ afterGettingBytes, this,
+ onInputClosure, this);
+
+ throw NO_MORE_BUFFERED_INPUT;
+}
+
+void StreamParser::afterGettingBytes(void* clientData,
+ unsigned numBytesRead,
+ unsigned /*numTruncatedBytes*/,
+ struct timeval presentationTime,
+ unsigned /*durationInMicroseconds*/){
+ StreamParser* parser = (StreamParser*)clientData;
+ if (parser != NULL) parser->afterGettingBytes1(numBytesRead, presentationTime);
+}
+
+void StreamParser::afterGettingBytes1(unsigned numBytesRead, struct timeval presentationTime) {
+ // Sanity check: Make sure we didn't get too many bytes for our bank:
+ if (fTotNumValidBytes + numBytesRead > BANK_SIZE) {
+ fInputSource->envir()
+ << "StreamParser::afterGettingBytes() warning: read "
+ << numBytesRead << " bytes; expected no more than "
+ << BANK_SIZE - fTotNumValidBytes << "\n";
+ }
+
+ fLastSeenPresentationTime = presentationTime;
+
+ unsigned char* ptr = &curBank()[fTotNumValidBytes];
+ fTotNumValidBytes += numBytesRead;
+
+ // Continue our original calling source where it left off:
+ restoreSavedParserState();
+ // Sigh... this is a crock; things would have been a lot simpler
+ // here if we were using threads, with synchronous I/O...
+ fClientContinueFunc(fClientContinueClientData, ptr, numBytesRead, presentationTime);
+}
+
+void StreamParser::onInputClosure(void* clientData) {
+ StreamParser* parser = (StreamParser*)clientData;
+ if (parser != NULL) parser->onInputClosure1();
+}
+
+void StreamParser::onInputClosure1() {
+ if (!fHaveSeenEOF) {
+ // We're hitting EOF for the first time. Set our 'EOF' flag, and continue parsing, as if we'd just read 0 bytes of data.
+ // This allows the parser to re-parse any remaining unparsed data (perhaps while testing for EOF at the end):
+ fHaveSeenEOF = True;
+ afterGettingBytes1(0, fLastSeenPresentationTime);
+ } else {
+ // We're hitting EOF for the second time. Now, we handle the source input closure:
+ fHaveSeenEOF = False;
+ if (fClientOnInputCloseFunc != NULL) (*fClientOnInputCloseFunc)(fClientOnInputCloseClientData);
+ }
+}
diff --git a/liveMedia/StreamParser.hh b/liveMedia/StreamParser.hh
new file mode 100644
index 0000000..7cdb7dd
--- /dev/null
+++ b/liveMedia/StreamParser.hh
@@ -0,0 +1,167 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Abstract class for parsing a byte stream
+// C++ header
+
+#ifndef _STREAM_PARSER_HH
+#define _STREAM_PARSER_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class StreamParser {
+public:
+ virtual void flushInput();
+
+protected: // we're a virtual base class
+ typedef void (clientContinueFunc)(void* clientData,
+ unsigned char* ptr, unsigned size,
+ struct timeval presentationTime);
+ StreamParser(FramedSource* inputSource,
+ FramedSource::onCloseFunc* onInputCloseFunc,
+ void* onInputCloseClientData,
+ clientContinueFunc* clientContinueFunc,
+ void* clientContinueClientData);
+ virtual ~StreamParser();
+
+ void saveParserState();
+ virtual void restoreSavedParserState();
+
+ u_int32_t get4Bytes() { // byte-aligned; returned in big-endian order
+ u_int32_t result = test4Bytes();
+ fCurParserIndex += 4;
+ fRemainingUnparsedBits = 0;
+
+ return result;
+ }
+ u_int32_t test4Bytes() { // as above, but doesn't advance ptr
+ ensureValidBytes(4);
+
+ unsigned char const* ptr = nextToParse();
+ return (ptr[0]<<24)|(ptr[1]<<16)|(ptr[2]<<8)|ptr[3];
+ }
+
+ u_int16_t get2Bytes() {
+ ensureValidBytes(2);
+
+ unsigned char const* ptr = nextToParse();
+ u_int16_t result = (ptr[0]<<8)|ptr[1];
+
+ fCurParserIndex += 2;
+ fRemainingUnparsedBits = 0;
+
+ return result;
+ }
+ u_int16_t test2Bytes() {
+ ensureValidBytes(2);
+
+ unsigned char const* ptr = nextToParse();
+ return (ptr[0]<<8)|ptr[1];
+ }
+
+
+ u_int8_t get1Byte() { // byte-aligned
+ ensureValidBytes(1);
+ fRemainingUnparsedBits = 0;
+ return curBank()[fCurParserIndex++];
+ }
+ u_int8_t test1Byte() { // as above, but doesn't advance ptr
+ ensureValidBytes(1);
+ return nextToParse()[0];
+ }
+
+ void getBytes(u_int8_t* to, unsigned numBytes) {
+ testBytes(to, numBytes);
+ fCurParserIndex += numBytes;
+ fRemainingUnparsedBits = 0;
+ }
+ void testBytes(u_int8_t* to, unsigned numBytes) { // as above, but doesn't advance ptr
+ ensureValidBytes(numBytes);
+ memmove(to, nextToParse(), numBytes);
+ }
+ void skipBytes(unsigned numBytes) {
+ ensureValidBytes(numBytes);
+ fCurParserIndex += numBytes;
+ }
+
+ void skipBits(unsigned numBits);
+ unsigned getBits(unsigned numBits);
+ // numBits <= 32; returns data into low-order bits of result
+
+ unsigned curOffset() const { return fCurParserIndex; }
+
+ unsigned& totNumValidBytes() { return fTotNumValidBytes; }
+
+ Boolean haveSeenEOF() const { return fHaveSeenEOF; }
+
+ unsigned bankSize() const;
+
+private:
+ unsigned char* curBank() { return fCurBank; }
+ unsigned char* nextToParse() { return &curBank()[fCurParserIndex]; }
+ unsigned char* lastParsed() { return &curBank()[fCurParserIndex-1]; }
+
+ // makes sure that at least "numBytes" valid bytes remain:
+ void ensureValidBytes(unsigned numBytesNeeded) {
+ // common case: inlined:
+ if (fCurParserIndex + numBytesNeeded <= fTotNumValidBytes) return;
+
+ ensureValidBytes1(numBytesNeeded);
+ }
+ void ensureValidBytes1(unsigned numBytesNeeded);
+
+ static void afterGettingBytes(void* clientData, unsigned numBytesRead,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingBytes1(unsigned numBytesRead, struct timeval presentationTime);
+
+ static void onInputClosure(void* clientData);
+ void onInputClosure1();
+
+private:
+ FramedSource* fInputSource; // should be a byte-stream source??
+ FramedSource::onCloseFunc* fClientOnInputCloseFunc;
+ void* fClientOnInputCloseClientData;
+ clientContinueFunc* fClientContinueFunc;
+ void* fClientContinueClientData;
+
+ // Use a pair of 'banks', and swap between them as they fill up:
+ unsigned char* fBank[2];
+ unsigned char fCurBankNum;
+ unsigned char* fCurBank;
+
+ // The most recent 'saved' parse position:
+ unsigned fSavedParserIndex; // <= fCurParserIndex
+ unsigned char fSavedRemainingUnparsedBits;
+
+ // The current position of the parser within the current bank:
+ unsigned fCurParserIndex; // <= fTotNumValidBytes
+ unsigned char fRemainingUnparsedBits; // in previous byte: [0,7]
+
+ // The total number of valid bytes stored in the current bank:
+ unsigned fTotNumValidBytes; // <= BANK_SIZE
+
+ // Whether we have seen EOF on the input source:
+ Boolean fHaveSeenEOF;
+
+ struct timeval fLastSeenPresentationTime; // hack used for EOF handling
+};
+
+#endif
diff --git a/liveMedia/StreamReplicator.cpp b/liveMedia/StreamReplicator.cpp
new file mode 100644
index 0000000..687b654
--- /dev/null
+++ b/liveMedia/StreamReplicator.cpp
@@ -0,0 +1,333 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// An class that can be used to create (possibly multiple) 'replicas' of an incoming stream.
+// Implementation.
+
+#include "StreamReplicator.hh"
+
+////////// Definition of "StreamReplica": The class that implements each stream replica //////////
+
+class StreamReplica: public FramedSource {
+protected:
+ friend class StreamReplicator;
+ StreamReplica(StreamReplicator& ourReplicator); // called only by "StreamReplicator::createStreamReplica()"
+ virtual ~StreamReplica();
+
+private: // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+
+private:
+ static void copyReceivedFrame(StreamReplica* toReplica, StreamReplica* fromReplica);
+
+private:
+ StreamReplicator& fOurReplicator;
+ int fFrameIndex; // 0 or 1, depending upon which frame we're currently requesting; could also be -1 if we've stopped playing
+
+ // Replicas that are currently awaiting data are kept in a (singly-linked) list:
+ StreamReplica* fNext;
+};
+
+
+////////// StreamReplicator implementation //////////
+
+StreamReplicator* StreamReplicator::createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean deleteWhenLastReplicaDies) {
+ return new StreamReplicator(env, inputSource, deleteWhenLastReplicaDies);
+}
+
+StreamReplicator::StreamReplicator(UsageEnvironment& env, FramedSource* inputSource, Boolean deleteWhenLastReplicaDies)
+ : Medium(env),
+ fInputSource(inputSource), fDeleteWhenLastReplicaDies(deleteWhenLastReplicaDies), fInputSourceHasClosed(False),
+ fNumReplicas(0), fNumActiveReplicas(0), fNumDeliveriesMadeSoFar(0),
+ fFrameIndex(0), fMasterReplica(NULL), fReplicasAwaitingCurrentFrame(NULL), fReplicasAwaitingNextFrame(NULL) {
+}
+
+StreamReplicator::~StreamReplicator() {
+ Medium::close(fInputSource);
+}
+
+FramedSource* StreamReplicator::createStreamReplica() {
+ ++fNumReplicas;
+ return new StreamReplica(*this);
+}
+
+void StreamReplicator::getNextFrame(StreamReplica* replica) {
+ if (fInputSourceHasClosed) { // handle closure instead
+ replica->handleClosure();
+ return;
+ }
+
+ if (replica->fFrameIndex == -1) {
+ // This replica had stopped playing (or had just been created), but is now actively reading. Note this:
+ replica->fFrameIndex = fFrameIndex;
+ ++fNumActiveReplicas;
+ }
+
+ if (fMasterReplica == NULL) {
+ // This is the first replica to request the next unread frame. Make it the 'master' replica - meaning that we read the frame
+ // into its buffer, and then copy from this into the other replicas' buffers.
+ fMasterReplica = replica;
+
+ // Arrange to read the next frame into this replica's buffer:
+ if (fInputSource != NULL) fInputSource->getNextFrame(fMasterReplica->fTo, fMasterReplica->fMaxSize,
+ afterGettingFrame, this, onSourceClosure, this);
+ } else if (replica->fFrameIndex != fFrameIndex) {
+ // This replica is already asking for the next frame (because it has already received the current frame). Enqueue it:
+ replica->fNext = fReplicasAwaitingNextFrame;
+ fReplicasAwaitingNextFrame = replica;
+ } else {
+ // This replica is asking for the current frame. Enqueue it:
+ replica->fNext = fReplicasAwaitingCurrentFrame;
+ fReplicasAwaitingCurrentFrame = replica;
+
+ if (fInputSource != NULL && !fInputSource->isCurrentlyAwaitingData()) {
+ // The current frame has already arrived, so deliver it to this replica now:
+ deliverReceivedFrame();
+ }
+ }
+}
+
+void StreamReplicator::deactivateStreamReplica(StreamReplica* replicaBeingDeactivated) {
+ if (replicaBeingDeactivated->fFrameIndex == -1) return; // this replica has already been deactivated (or was never activated at all)
+
+ // Assert: fNumActiveReplicas > 0
+ if (fNumActiveReplicas == 0) fprintf(stderr, "StreamReplicator::deactivateStreamReplica() Internal Error!\n"); // should not happen
+ --fNumActiveReplicas;
+
+ // Forget about any frame delivery that might have just been made to this replica:
+ if (replicaBeingDeactivated->fFrameIndex != fFrameIndex && fNumDeliveriesMadeSoFar > 0) --fNumDeliveriesMadeSoFar;
+
+ replicaBeingDeactivated->fFrameIndex = -1;
+
+ // Check whether the replica being deactivated is the 'master' replica, or is enqueued awaiting a frame:
+ if (replicaBeingDeactivated == fMasterReplica) {
+ // We need to replace the 'master replica', if we can:
+ if (fReplicasAwaitingCurrentFrame == NULL) {
+ // There's currently no replacement 'master replica'
+ fMasterReplica = NULL;
+ } else {
+ // There's another replica that we can use as a replacement 'master replica':
+ fMasterReplica = fReplicasAwaitingCurrentFrame;
+ fReplicasAwaitingCurrentFrame = fReplicasAwaitingCurrentFrame->fNext;
+ fMasterReplica->fNext = NULL;
+ }
+
+ // Check whether the read into the old master replica's buffer is still pending, or has completed:
+ if (fInputSource != NULL) {
+ if (fInputSource->isCurrentlyAwaitingData()) {
+ // We have a pending read into the old master replica's buffer.
+ // We need to stop it, and retry the read with a new master (if available)
+ fInputSource->stopGettingFrames();
+
+ if (fMasterReplica != NULL) {
+ fInputSource->getNextFrame(fMasterReplica->fTo, fMasterReplica->fMaxSize,
+ afterGettingFrame, this, onSourceClosure, this);
+ }
+ } else {
+ // The read into the old master replica's buffer has already completed. Copy the data to the new master replica (if any):
+ if (fMasterReplica != NULL) {
+ StreamReplica::copyReceivedFrame(fMasterReplica, replicaBeingDeactivated);
+ } else {
+ // We don't have a new master replica, so we can't copy the received frame to any new replica that might ask for it.
+ // Fortunately this should be a very rare occurrence.
+ }
+ }
+ }
+ } else {
+ // The replica that's being removed was not our 'master replica', but make sure it's not on either of our queues:
+ if (fReplicasAwaitingCurrentFrame != NULL) {
+ if (replicaBeingDeactivated == fReplicasAwaitingCurrentFrame) {
+ fReplicasAwaitingCurrentFrame = replicaBeingDeactivated->fNext;
+ replicaBeingDeactivated->fNext = NULL;
+ }
+ else {
+ for (StreamReplica* r1 = fReplicasAwaitingCurrentFrame; r1->fNext != NULL; r1 = r1->fNext) {
+ if (r1->fNext == replicaBeingDeactivated) {
+ r1->fNext = replicaBeingDeactivated->fNext;
+ replicaBeingDeactivated->fNext = NULL;
+ break;
+ }
+ }
+ }
+ }
+ if (fReplicasAwaitingNextFrame != NULL) {
+ if (replicaBeingDeactivated == fReplicasAwaitingNextFrame) {
+ fReplicasAwaitingNextFrame = replicaBeingDeactivated->fNext;
+ replicaBeingDeactivated->fNext = NULL;
+ }
+ else {
+ for (StreamReplica* r2 = fReplicasAwaitingNextFrame; r2->fNext != NULL; r2 = r2->fNext) {
+ if (r2->fNext == replicaBeingDeactivated) {
+ r2->fNext = replicaBeingDeactivated->fNext;
+ replicaBeingDeactivated->fNext = NULL;
+ break;
+ }
+ }
+ }
+ }
+
+ // Check for the possibility that - now that a replica has been deactivated - all other
+ // replicas have received the current frame, and so now we need to complete delivery to
+ // the master replica:
+ if (fMasterReplica != NULL && fInputSource != NULL && !fInputSource->isCurrentlyAwaitingData()) deliverReceivedFrame();
+ }
+
+ if (fNumActiveReplicas == 0 && fInputSource != NULL) fInputSource->stopGettingFrames(); // tell our source to stop too
+}
+
+void StreamReplicator::removeStreamReplica(StreamReplica* replicaBeingRemoved) {
+ // First, handle the replica that's being removed the same way that we would if it were merely being deactivated:
+ deactivateStreamReplica(replicaBeingRemoved);
+
+ // Assert: fNumReplicas > 0
+ if (fNumReplicas == 0) fprintf(stderr, "StreamReplicator::removeStreamReplica() Internal Error!\n"); // should not happen
+ --fNumReplicas;
+
+ // If this was the last replica, then delete ourselves (if we were set up to do so):
+ if (fNumReplicas == 0 && fDeleteWhenLastReplicaDies) {
+ Medium::close(this);
+ return;
+ }
+}
+
+void StreamReplicator::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime, unsigned durationInMicroseconds) {
+ ((StreamReplicator*)clientData)->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
+}
+
+void StreamReplicator::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime, unsigned durationInMicroseconds) {
+ // The frame was read into our master replica's buffer. Update the master replica's state, but don't complete delivery to it
+ // just yet. We do that later, after we're sure that we've delivered it to all other replicas.
+ fMasterReplica->fFrameSize = frameSize;
+ fMasterReplica->fNumTruncatedBytes = numTruncatedBytes;
+ fMasterReplica->fPresentationTime = presentationTime;
+ fMasterReplica->fDurationInMicroseconds = durationInMicroseconds;
+
+ deliverReceivedFrame();
+}
+
+void StreamReplicator::onSourceClosure(void* clientData) {
+ ((StreamReplicator*)clientData)->onSourceClosure();
+}
+
+void StreamReplicator::onSourceClosure() {
+ fInputSourceHasClosed = True;
+
+ // Signal the closure to each replica that is currently awaiting a frame:
+ StreamReplica* replica;
+ while ((replica = fReplicasAwaitingCurrentFrame) != NULL) {
+ fReplicasAwaitingCurrentFrame = replica->fNext;
+ replica->fNext = NULL;
+ replica->handleClosure();
+ }
+ while ((replica = fReplicasAwaitingNextFrame) != NULL) {
+ fReplicasAwaitingNextFrame = replica->fNext;
+ replica->fNext = NULL;
+ replica->handleClosure();
+ }
+ if ((replica = fMasterReplica) != NULL) {
+ fMasterReplica = NULL;
+ replica->handleClosure();
+ }
+}
+
+void StreamReplicator::deliverReceivedFrame() {
+ // The 'master replica' has received its copy of the current frame.
+ // Copy it (and complete delivery) to any other replica that has requested this frame.
+ // Then, if no more requests for this frame are expected, complete delivery to the 'master replica' itself.
+ StreamReplica* replica;
+ while ((replica = fReplicasAwaitingCurrentFrame) != NULL) {
+ fReplicasAwaitingCurrentFrame = replica->fNext;
+ replica->fNext = NULL;
+
+ // Assert: fMasterReplica != NULL
+ if (fMasterReplica == NULL) fprintf(stderr, "StreamReplicator::deliverReceivedFrame() Internal Error 1!\n"); // shouldn't happen
+ StreamReplica::copyReceivedFrame(replica, fMasterReplica);
+ replica->fFrameIndex = 1 - replica->fFrameIndex; // toggle it (0<->1), because this replica no longer awaits the current frame
+ ++fNumDeliveriesMadeSoFar;
+
+ // Assert: fNumDeliveriesMadeSoFar < fNumActiveReplicas; // because we still have the 'master replica' to deliver to
+ if (!(fNumDeliveriesMadeSoFar < fNumActiveReplicas)) fprintf(stderr, "StreamReplicator::deliverReceivedFrame() Internal Error 2(%d,%d)!\n", fNumDeliveriesMadeSoFar, fNumActiveReplicas); // should not happen
+
+ // Complete delivery to this replica:
+ FramedSource::afterGetting(replica);
+ }
+
+ if (fNumDeliveriesMadeSoFar == fNumActiveReplicas - 1 && fMasterReplica != NULL) {
+ // No more requests for this frame are expected, so complete delivery to the 'master replica':
+ replica = fMasterReplica;
+ fMasterReplica = NULL;
+ replica->fFrameIndex = 1 - replica->fFrameIndex; // toggle it (0<->1), because this replica no longer awaits the current frame
+ fFrameIndex = 1 - fFrameIndex; // toggle it (0<->1) for the next frame
+ fNumDeliveriesMadeSoFar = 0; // reset for the next frame
+
+ if (fReplicasAwaitingNextFrame != NULL) {
+ // One of the other replicas has already requested the next frame, so make it the next 'master replica':
+ fMasterReplica = fReplicasAwaitingNextFrame;
+ fReplicasAwaitingNextFrame = fReplicasAwaitingNextFrame->fNext;
+ fMasterReplica->fNext = NULL;
+
+ // Arrange to read the next frame into this replica's buffer:
+ if (fInputSource != NULL) fInputSource->getNextFrame(fMasterReplica->fTo, fMasterReplica->fMaxSize,
+ afterGettingFrame, this, onSourceClosure, this);
+ }
+
+ // Move any other replicas that had already requested the next frame to the 'requesting current frame' list:
+ // Assert: fReplicasAwaitingCurrentFrame == NULL;
+ if (!(fReplicasAwaitingCurrentFrame == NULL)) fprintf(stderr, "StreamReplicator::deliverReceivedFrame() Internal Error 3!\n"); // should not happen
+ fReplicasAwaitingCurrentFrame = fReplicasAwaitingNextFrame;
+ fReplicasAwaitingNextFrame = NULL;
+
+ // Complete delivery to the 'master' replica (thereby completing all deliveries for this frame):
+ FramedSource::afterGetting(replica);
+ }
+}
+
+
+////////// StreamReplica implementation //////////
+
+StreamReplica::StreamReplica(StreamReplicator& ourReplicator)
+ : FramedSource(ourReplicator.envir()),
+ fOurReplicator(ourReplicator),
+ fFrameIndex(-1/*we haven't started playing yet*/), fNext(NULL) {
+}
+
+StreamReplica::~StreamReplica() {
+ fOurReplicator.removeStreamReplica(this);
+}
+
+void StreamReplica::doGetNextFrame() {
+ fOurReplicator.getNextFrame(this);
+}
+
+void StreamReplica::doStopGettingFrames() {
+ fOurReplicator.deactivateStreamReplica(this);
+}
+
+void StreamReplica::copyReceivedFrame(StreamReplica* toReplica, StreamReplica* fromReplica) {
+ // First, figure out how much data to copy. ("toReplica" might have a smaller buffer than "fromReplica".)
+ unsigned numNewBytesToTruncate
+ = toReplica->fMaxSize < fromReplica->fFrameSize ? fromReplica->fFrameSize - toReplica->fMaxSize : 0;
+ toReplica->fFrameSize = fromReplica->fFrameSize - numNewBytesToTruncate;
+ toReplica->fNumTruncatedBytes = fromReplica->fNumTruncatedBytes + numNewBytesToTruncate;
+
+ memmove(toReplica->fTo, fromReplica->fTo, toReplica->fFrameSize);
+ toReplica->fPresentationTime = fromReplica->fPresentationTime;
+ toReplica->fDurationInMicroseconds = fromReplica->fDurationInMicroseconds;
+}
diff --git a/liveMedia/T140TextRTPSink.cpp b/liveMedia/T140TextRTPSink.cpp
new file mode 100644
index 0000000..84554fb
--- /dev/null
+++ b/liveMedia/T140TextRTPSink.cpp
@@ -0,0 +1,184 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for T.140 text (RFC 2793)
+// Implementation
+
+#include "T140TextRTPSink.hh"
+#include <GroupsockHelper.hh> // for "gettimeofday()"
+
+////////// T140TextRTPSink implementation //////////
+
+T140TextRTPSink::T140TextRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat)
+ : TextRTPSink(env, RTPgs, rtpPayloadFormat, 1000/*mandatory RTP timestamp frequency for this payload format*/, "T140"),
+ fOurIdleFilter(NULL), fAreInIdlePeriod(True) {
+}
+
+T140TextRTPSink::~T140TextRTPSink() {
+ fSource = fOurIdleFilter; // hack: in case "fSource" had gotten set to NULL before we were called
+ stopPlaying(); // call this now, because we won't have our 'idle filter' when the base class destructor calls it later.
+
+ // Close our 'idle filter' as well:
+ Medium::close(fOurIdleFilter);
+ fSource = NULL; // for the base class destructor, which gets called next
+}
+
+T140TextRTPSink*
+T140TextRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat) {
+ return new T140TextRTPSink(env, RTPgs, rtpPayloadFormat);
+}
+
+Boolean T140TextRTPSink::continuePlaying() {
+ // First, check whether we have an 'idle filter' set up yet. If not, create it now, and insert it in front of our existing source:
+ if (fOurIdleFilter == NULL) {
+ fOurIdleFilter = new T140IdleFilter(envir(), fSource);
+ } else {
+ fOurIdleFilter->reassignInputSource(fSource);
+ }
+ fSource = fOurIdleFilter;
+
+ // Then call the parent class's implementation:
+ return MultiFramedRTPSink::continuePlaying();
+}
+
+void T140TextRTPSink::doSpecialFrameHandling(unsigned /*fragmentationOffset*/,
+ unsigned char* /*frameStart*/,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned /*numRemainingBytes*/) {
+ // Set the RTP 'M' (marker) bit if we have just ended an idle period - i.e., if we were in an idle period, but just got data:
+ if (fAreInIdlePeriod && numBytesInFrame > 0) setMarkerBit();
+ fAreInIdlePeriod = numBytesInFrame == 0;
+
+ setTimestamp(framePresentationTime);
+}
+
+Boolean T140TextRTPSink::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/, unsigned /*numBytesInFrame*/) const {
+ return False; // We don't concatenate input data; instead, send it out immediately
+}
+
+
+////////// T140IdleFilter implementation //////////
+
+T140IdleFilter::T140IdleFilter(UsageEnvironment& env, FramedSource* inputSource)
+ : FramedFilter(env, inputSource),
+ fIdleTimerTask(NULL),
+ fBufferSize(OutPacketBuffer::maxSize), fNumBufferedBytes(0) {
+ fBuffer = new char[fBufferSize];
+}
+
+T140IdleFilter::~T140IdleFilter() {
+ envir().taskScheduler().unscheduleDelayedTask(fIdleTimerTask);
+
+ delete[] fBuffer;
+ detachInputSource(); // so that the subsequent ~FramedFilter() doesn't delete it
+}
+
+#define IDLE_TIMEOUT_MICROSECONDS 300000 /* 300 ms */
+
+void T140IdleFilter::doGetNextFrame() {
+ // First, see if we have buffered data that we can deliver:
+ if (fNumBufferedBytes > 0) {
+ deliverFromBuffer();
+ return;
+ }
+
+ // We don't have any buffered data, so ask our input source for data (unless we've already done so).
+ // But also set a timer to expire if this doesn't arrive promptly:
+ fIdleTimerTask = envir().taskScheduler().scheduleDelayedTask(IDLE_TIMEOUT_MICROSECONDS, handleIdleTimeout, this);
+ if (fInputSource != NULL && !fInputSource->isCurrentlyAwaitingData()) {
+ fInputSource->getNextFrame((unsigned char*)fBuffer, fBufferSize, afterGettingFrame, this, onSourceClosure, this);
+ }
+}
+
+void T140IdleFilter::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ ((T140IdleFilter*)clientData)->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
+}
+
+void T140IdleFilter::afterGettingFrame(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // First, cancel any pending idle timer:
+ envir().taskScheduler().unscheduleDelayedTask(fIdleTimerTask);
+
+ // Then note the new data that we have in our buffer:
+ fNumBufferedBytes = frameSize;
+ fBufferedNumTruncatedBytes = numTruncatedBytes;
+ fBufferedDataPresentationTime = presentationTime;
+ fBufferedDataDurationInMicroseconds = durationInMicroseconds;
+
+ // Then, attempt to deliver this data. (If we can't deliver it now, we'll do so the next time the reader asks for data.)
+ if (isCurrentlyAwaitingData()) (void)deliverFromBuffer();
+}
+
+void T140IdleFilter::doStopGettingFrames() {
+ // Cancel any pending idle timer:
+ envir().taskScheduler().unscheduleDelayedTask(fIdleTimerTask);
+
+ // And call the parent's implementation of this virtual function:
+ FramedFilter::doStopGettingFrames();
+}
+
+void T140IdleFilter::handleIdleTimeout(void* clientData) {
+ ((T140IdleFilter*)clientData)->handleIdleTimeout();
+}
+
+void T140IdleFilter::handleIdleTimeout() {
+ fIdleTimerTask = NULL;
+ // No data has arrived from the upstream source within our specified 'idle period' (after data was requested from downstream).
+ // Send an empty 'idle' frame to our downstream "T140TextRTPSink". (This will cause an empty RTP packet to get sent.)
+ deliverEmptyFrame();
+}
+
+void T140IdleFilter::deliverFromBuffer() {
+ if (fNumBufferedBytes <= fMaxSize) { // common case
+ fNumTruncatedBytes = fBufferedNumTruncatedBytes;
+ fFrameSize = fNumBufferedBytes;
+ } else {
+ fNumTruncatedBytes = fBufferedNumTruncatedBytes + fNumBufferedBytes - fMaxSize;
+ fFrameSize = fMaxSize;
+ }
+
+ memmove(fTo, fBuffer, fFrameSize);
+ fPresentationTime = fBufferedDataPresentationTime;
+ fDurationInMicroseconds = fBufferedDataDurationInMicroseconds;
+
+ fNumBufferedBytes = 0; // reset buffer
+
+ FramedSource::afterGetting(this); // complete delivery
+}
+
+void T140IdleFilter::deliverEmptyFrame() {
+ fFrameSize = fNumTruncatedBytes = 0;
+ gettimeofday(&fPresentationTime, NULL);
+ FramedSource::afterGetting(this); // complete delivery
+}
+
+void T140IdleFilter::onSourceClosure(void* clientData) {
+ ((T140IdleFilter*)clientData)->onSourceClosure();
+}
+
+void T140IdleFilter::onSourceClosure() {
+ envir().taskScheduler().unscheduleDelayedTask(fIdleTimerTask);
+
+ handleClosure();
+}
diff --git a/liveMedia/TCPStreamSink.cpp b/liveMedia/TCPStreamSink.cpp
new file mode 100644
index 0000000..aaa6149
--- /dev/null
+++ b/liveMedia/TCPStreamSink.cpp
@@ -0,0 +1,118 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A sink representing a TCP output stream
+// Implementation
+
+#include "TCPStreamSink.hh"
+#include <GroupsockHelper.hh> // for "ignoreSigPipeOnSocket()"
+
+TCPStreamSink* TCPStreamSink::createNew(UsageEnvironment& env, int socketNum) {
+ return new TCPStreamSink(env, socketNum);
+}
+
+TCPStreamSink::TCPStreamSink(UsageEnvironment& env, int socketNum)
+ : MediaSink(env),
+ fUnwrittenBytesStart(0), fUnwrittenBytesEnd(0),
+ fInputSourceIsOpen(False), fOutputSocketIsWritable(True),
+ fOutputSocketNum(socketNum) {
+ ignoreSigPipeOnSocket(socketNum);
+}
+
+TCPStreamSink::~TCPStreamSink() {
+ // Turn off any pending background handling of our output socket:
+ envir().taskScheduler().disableBackgroundHandling(fOutputSocketNum);
+}
+
+Boolean TCPStreamSink::continuePlaying() {
+ fInputSourceIsOpen = fSource != NULL;
+ processBuffer();
+
+ return True;
+}
+
+#define TCP_STREAM_SINK_MIN_READ_SIZE 1000
+
+void TCPStreamSink::processBuffer() {
+ // First, try writing data to our output socket, if we can:
+ if (fOutputSocketIsWritable && numUnwrittenBytes() > 0) {
+ int numBytesWritten
+ = send(fOutputSocketNum, (const char*)&fBuffer[fUnwrittenBytesStart], numUnwrittenBytes(), 0);
+ if (numBytesWritten < (int)numUnwrittenBytes()) {
+ // The output socket is no longer writable. Set a handler to be called when it becomes writable again.
+ fOutputSocketIsWritable = False;
+ if (envir().getErrno() != EPIPE) { // on this error, the socket might still be writable, but no longer usable
+ envir().taskScheduler().setBackgroundHandling(fOutputSocketNum, SOCKET_WRITABLE, socketWritableHandler, this);
+ }
+ }
+ if (numBytesWritten > 0) {
+ // We wrote at least some of our data. Update our buffer pointers:
+ fUnwrittenBytesStart += numBytesWritten;
+ if (fUnwrittenBytesStart > fUnwrittenBytesEnd) fUnwrittenBytesStart = fUnwrittenBytesEnd; // sanity check
+ if (fUnwrittenBytesStart == fUnwrittenBytesEnd && (!fInputSourceIsOpen || !fSource->isCurrentlyAwaitingData())) {
+ fUnwrittenBytesStart = fUnwrittenBytesEnd = 0; // reset the buffer to empty
+ }
+ }
+ }
+
+ // Then, read from our input source, if we can (& we're not already reading from it):
+ if (fInputSourceIsOpen && freeBufferSpace() >= TCP_STREAM_SINK_MIN_READ_SIZE && !fSource->isCurrentlyAwaitingData()) {
+ fSource->getNextFrame(&fBuffer[fUnwrittenBytesEnd], freeBufferSpace(), afterGettingFrame, this, ourOnSourceClosure, this);
+ } else if (!fInputSourceIsOpen && numUnwrittenBytes() == 0) {
+ // We're now done:
+ onSourceClosure();
+ }
+}
+
+void TCPStreamSink::socketWritableHandler(void* clientData, int /*mask*/) {
+ TCPStreamSink* sink = (TCPStreamSink*)clientData;
+ sink->socketWritableHandler1();
+}
+
+void TCPStreamSink::socketWritableHandler1() {
+ envir().taskScheduler().disableBackgroundHandling(fOutputSocketNum); // disable this handler until the next time it's needed
+
+ fOutputSocketIsWritable = True;
+ processBuffer();
+}
+
+void TCPStreamSink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval /*presentationTime*/, unsigned /*durationInMicroseconds*/) {
+ TCPStreamSink* sink = (TCPStreamSink*)clientData;
+ sink->afterGettingFrame(frameSize, numTruncatedBytes);
+}
+
+void TCPStreamSink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes) {
+ if (numTruncatedBytes > 0) {
+ envir() << "TCPStreamSink::afterGettingFrame(): The input frame data was too large for our buffer. "
+ << numTruncatedBytes
+ << " bytes of trailing data was dropped! Correct this by increasing the definition of \"TCP_STREAM_SINK_BUFFER_SIZE\" in \"include/TCPStreamSink.hh\".\n";
+ }
+ fUnwrittenBytesEnd += frameSize;
+ processBuffer();
+}
+
+void TCPStreamSink::ourOnSourceClosure(void* clientData) {
+ TCPStreamSink* sink = (TCPStreamSink*)clientData;
+ sink->ourOnSourceClosure1();
+}
+
+void TCPStreamSink::ourOnSourceClosure1() {
+ // The input source has closed:
+ fInputSourceIsOpen = False;
+ processBuffer();
+}
diff --git a/liveMedia/TLSState.cpp b/liveMedia/TLSState.cpp
new file mode 100644
index 0000000..418db90
--- /dev/null
+++ b/liveMedia/TLSState.cpp
@@ -0,0 +1,122 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// State encapsulating a TLS connection
+// Implementation
+
+#include "TLSState.hh"
+#include "RTSPClient.hh"
+
+TLSState::TLSState(RTSPClient& client)
+ : isNeeded(False)
+#ifndef NO_OPENSSL
+ , fClient(client), fHasBeenSetup(False), fCtx(NULL), fCon(NULL)
+#endif
+{
+}
+
+TLSState::~TLSState() {
+ reset();
+}
+
+int TLSState::connect(int socketNum) {
+#ifndef NO_OPENSSL
+ if (!fHasBeenSetup && !setup(socketNum)) return -1; // error
+
+ // Complete the SSL-level connection to the server:
+ int sslConnectResult = SSL_connect(fCon);
+ int sslGetErrorResult = SSL_get_error(fCon, sslConnectResult);
+
+ if (sslConnectResult > 0) {
+ return sslConnectResult; // connection has completed
+ } else if (sslConnectResult < 0
+ && (sslGetErrorResult == SSL_ERROR_WANT_READ ||
+ sslGetErrorResult == SSL_ERROR_WANT_WRITE)) {
+ // We need to wait until the socket is readable or writable:
+ fClient.envir().taskScheduler()
+ .setBackgroundHandling(socketNum,
+ sslGetErrorResult == SSL_ERROR_WANT_READ ? SOCKET_READABLE : SOCKET_WRITABLE,
+ (TaskScheduler::BackgroundHandlerProc*)&RTSPClient::connectionHandler,
+ &fClient);
+ return 0; // connection is pending
+ } else {
+ fClient.envir().setResultErrMsg("TLS connection to server failed: ", sslGetErrorResult);
+ return -1; // error
+ }
+#else
+ return -1;
+#endif
+}
+
+int TLSState::write(const char* data, unsigned count) {
+#ifndef NO_OPENSSL
+ return SSL_write(fCon, data, count);
+#else
+ return -1;
+#endif
+}
+
+int TLSState::read(u_int8_t* buffer, unsigned bufferSize) {
+#ifndef NO_OPENSSL
+ int result = SSL_read(fCon, buffer, bufferSize);
+ if (result < 0 && SSL_get_error(fCon, result) == SSL_ERROR_WANT_READ) {
+ // The data can't be delivered yet. Return 0 (bytes read); we'll try again later
+ return 0;
+ }
+ return result;
+#else
+ return 0;
+#endif
+}
+
+void TLSState::reset() {
+#ifndef NO_OPENSSL
+ if (fHasBeenSetup) SSL_shutdown(fCon);
+
+ if (fCon != NULL) { SSL_free(fCon); fCon = NULL; }
+ if (fCtx != NULL) { SSL_CTX_free(fCtx); fCtx = NULL; }
+#endif
+}
+
+Boolean TLSState::setup(int socketNum) {
+#ifndef NO_OPENSSL
+ do {
+ (void)SSL_library_init();
+
+ SSL_METHOD const* meth = SSLv23_client_method();
+ if (meth == NULL) break;
+
+ fCtx = SSL_CTX_new(meth);
+ if (fCtx == NULL) break;
+
+ fCon = SSL_new(fCtx);
+ if (fCon == NULL) break;
+
+ BIO* bio = BIO_new_socket(socketNum, BIO_NOCLOSE);
+ SSL_set_bio(fCon, bio, bio);
+
+ SSL_set_connect_state(fCon);
+
+ fHasBeenSetup = True;
+ return True;
+ } while (0);
+#endif
+
+ // An error occurred:
+ reset();
+ return False;
+}
diff --git a/liveMedia/TextRTPSink.cpp b/liveMedia/TextRTPSink.cpp
new file mode 100644
index 0000000..10b349e
--- /dev/null
+++ b/liveMedia/TextRTPSink.cpp
@@ -0,0 +1,36 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic RTP sink for text codecs (abstract base class)
+// Implementation
+
+#include "TextRTPSink.hh"
+
+TextRTPSink::TextRTPSink(UsageEnvironment& env,
+ Groupsock* rtpgs, unsigned char rtpPayloadType,
+ unsigned rtpTimestampFrequency,
+ char const* rtpPayloadFormatName)
+ : MultiFramedRTPSink(env, rtpgs, rtpPayloadType, rtpTimestampFrequency,
+ rtpPayloadFormatName) {
+}
+
+TextRTPSink::~TextRTPSink() {
+}
+
+char const* TextRTPSink::sdpMediaType() const {
+ return "text";
+}
diff --git a/liveMedia/TheoraVideoRTPSink.cpp b/liveMedia/TheoraVideoRTPSink.cpp
new file mode 100644
index 0000000..aed4bc3
--- /dev/null
+++ b/liveMedia/TheoraVideoRTPSink.cpp
@@ -0,0 +1,176 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for Theora video
+// Implementation
+
+#include "TheoraVideoRTPSink.hh"
+#include "Base64.hh"
+#include "VorbisAudioRTPSource.hh" // for parseVorbisOrTheoraConfigStr()
+#include "VorbisAudioRTPSink.hh" // for generateVorbisOrTheoraConfigStr()
+
+TheoraVideoRTPSink* TheoraVideoRTPSink
+::createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat,
+ u_int8_t* identificationHeader, unsigned identificationHeaderSize,
+ u_int8_t* commentHeader, unsigned commentHeaderSize,
+ u_int8_t* setupHeader, unsigned setupHeaderSize,
+ u_int32_t identField) {
+ return new TheoraVideoRTPSink(env, RTPgs,
+ rtpPayloadFormat,
+ identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize, identField);
+}
+
+TheoraVideoRTPSink* TheoraVideoRTPSink
+::createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat,
+ char const* configStr) {
+ // Begin by decoding and unpacking the configuration string:
+ u_int8_t* identificationHeader; unsigned identificationHeaderSize;
+ u_int8_t* commentHeader; unsigned commentHeaderSize;
+ u_int8_t* setupHeader; unsigned setupHeaderSize;
+ u_int32_t identField;
+
+ parseVorbisOrTheoraConfigStr(configStr,
+ identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize,
+ identField);
+
+ TheoraVideoRTPSink* resultSink
+ = new TheoraVideoRTPSink(env, RTPgs, rtpPayloadFormat,
+ identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize,
+ identField);
+ delete[] identificationHeader; delete[] commentHeader; delete[] setupHeader;
+
+ return resultSink;
+}
+
+TheoraVideoRTPSink
+::TheoraVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat,
+ u_int8_t* identificationHeader, unsigned identificationHeaderSize,
+ u_int8_t* commentHeader, unsigned commentHeaderSize,
+ u_int8_t* setupHeader, unsigned setupHeaderSize,
+ u_int32_t identField)
+ : VideoRTPSink(env, RTPgs, rtpPayloadFormat, 90000, "THEORA"),
+ fIdent(identField), fFmtpSDPLine(NULL) {
+ static const char *pf_to_str[] = {
+ "YCbCr-4:2:0",
+ "Reserved",
+ "YCbCr-4:2:2",
+ "YCbCr-4:4:4",
+ };
+
+ unsigned width = 1280; // default value
+ unsigned height = 720; // default value
+ unsigned pf = 0; // default value
+ if (identificationHeaderSize >= 42) {
+ // Parse this header to get the "width", "height", "pf" (pixel format), and
+ // 'nominal bitrate' parameters:
+ u_int8_t* p = identificationHeader; // alias
+ width = (p[14]<<16)|(p[15]<<8)|p[16];
+ height = (p[17]<<16)|(p[18]<<8)|p[19];
+ pf = (p[41]&0x18)>>3;
+ unsigned nominalBitrate = (p[37]<<16)|(p[38]<<8)|p[39];
+ if (nominalBitrate > 0) estimatedBitrate() = nominalBitrate/1000;
+ }
+
+ // Generate a 'config' string from the supplied configuration headers:
+ char* base64PackedHeaders
+ = generateVorbisOrTheoraConfigStr(identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize,
+ identField);
+ if (base64PackedHeaders == NULL) return;
+
+ // Then use this 'config' string to construct our "a=fmtp:" SDP line:
+ unsigned fmtpSDPLineMaxSize = 200 + strlen(base64PackedHeaders);// 200 => more than enough space
+ fFmtpSDPLine = new char[fmtpSDPLineMaxSize];
+ sprintf(fFmtpSDPLine, "a=fmtp:%d sampling=%s;width=%u;height=%u;delivery-method=out_band/rtsp;configuration=%s\r\n", rtpPayloadType(), pf_to_str[pf], width, height, base64PackedHeaders);
+ delete[] base64PackedHeaders;
+}
+
+TheoraVideoRTPSink::~TheoraVideoRTPSink() {
+ delete[] fFmtpSDPLine;
+}
+
+char const* TheoraVideoRTPSink::auxSDPLine() {
+ return fFmtpSDPLine;
+}
+
+void TheoraVideoRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ // Set the 4-byte "payload header", as defined in http://svn.xiph.org/trunk/theora/doc/draft-ietf-avt-rtp-theora-00.txt
+ u_int8_t header[6];
+
+ // The three bytes of the header are our "Ident":
+ header[0] = fIdent>>16; header[1] = fIdent>>8; header[2] = fIdent;
+
+ // The final byte contains the "F", "TDT", and "numPkts" fields:
+ u_int8_t F; // Fragment type
+ if (numRemainingBytes > 0) {
+ if (fragmentationOffset > 0) {
+ F = 2<<6; // continuation fragment
+ } else {
+ F = 1<<6; // start fragment
+ }
+ } else {
+ if (fragmentationOffset > 0) {
+ F = 3<<6; // end fragment
+ } else {
+ F = 0<<6; // not fragmented
+ }
+ }
+ u_int8_t const TDT = 0<<4; // Theora Data Type (always a "Raw Theora payload")
+ u_int8_t numPkts = F == 0 ? (numFramesUsedSoFar() + 1): 0; // set to 0 when we're a fragment
+ header[3] = F|TDT|numPkts;
+
+ // There's also a 2-byte 'frame-specific' header: The length of the
+ // Theora data:
+ header[4] = numBytesInFrame >>8;
+ header[5] = numBytesInFrame;
+ setSpecialHeaderBytes(header, sizeof(header));
+
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the RTP 'M' ('marker') bit:
+ setMarkerBit();
+ }
+
+ // Important: Also call our base class's doSpecialFrameHandling(),
+ // to set the packet's timestamp:
+ MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
+ frameStart, numBytesInFrame,
+ framePresentationTime,
+ numRemainingBytes);
+}
+
+Boolean TheoraVideoRTPSink::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // Only one frame per packet:
+ return False;
+}
+
+unsigned TheoraVideoRTPSink::specialHeaderSize() const {
+ return 6;
+}
diff --git a/liveMedia/TheoraVideoRTPSource.cpp b/liveMedia/TheoraVideoRTPSource.cpp
new file mode 100644
index 0000000..06cc26f
--- /dev/null
+++ b/liveMedia/TheoraVideoRTPSource.cpp
@@ -0,0 +1,113 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Theora Video RTP Sources
+// Implementation
+
+#include "TheoraVideoRTPSource.hh"
+
+////////// TheoraBufferedPacket and TheoraBufferedPacketFactory //////////
+
+class TheoraBufferedPacket: public BufferedPacket {
+public:
+ TheoraBufferedPacket();
+ virtual ~TheoraBufferedPacket();
+
+private: // redefined virtual functions
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+};
+
+class TheoraBufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+
+///////// TheoraVideoRTPSource implementation ////////
+
+TheoraVideoRTPSource*
+TheoraVideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat) {
+ return new TheoraVideoRTPSource(env, RTPgs, rtpPayloadFormat);
+}
+
+TheoraVideoRTPSource
+::TheoraVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat)
+ : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, 90000,
+ new TheoraBufferedPacketFactory),
+ fCurPacketIdent(0) {
+}
+
+TheoraVideoRTPSource::~TheoraVideoRTPSource() {
+}
+
+Boolean TheoraVideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ resultSpecialHeaderSize = 4;
+ if (packetSize < resultSpecialHeaderSize) return False; // packet was too small
+
+ // The first 3 bytes of the header are the "Ident" field:
+ fCurPacketIdent = (headerStart[0]<<16) | (headerStart[1]<<8) | headerStart[2];
+
+ // The 4th byte is F|TDT|numPkts.
+ // Reject any packet with TDT == 3:
+ if ((headerStart[3]&0x30) == 0x30) return False;
+
+ u_int8_t F = headerStart[3]>>6;
+ fCurrentPacketBeginsFrame = F <= 1; // "Not Fragmented" or "Start Fragment"
+ fCurrentPacketCompletesFrame = F == 0 || F == 3; // "Not Fragmented" or "End Fragment"
+
+ return True;
+}
+
+char const* TheoraVideoRTPSource::MIMEtype() const {
+ return "video/THEORA";
+}
+
+
+////////// TheoraBufferedPacket and TheoraBufferedPacketFactory implementation //////////
+
+TheoraBufferedPacket::TheoraBufferedPacket() {
+}
+
+TheoraBufferedPacket::~TheoraBufferedPacket() {
+}
+
+unsigned TheoraBufferedPacket
+::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
+ if (dataSize < 2) {
+ // There's not enough space for a 2-byte header. TARFU! Just return the data that's left:
+ return dataSize;
+ }
+
+ unsigned frameSize = (framePtr[0]<<8) | framePtr[1];
+ framePtr += 2;
+ if (frameSize > dataSize - 2) return dataSize - 2; // inconsistent frame size => just return all the data that's left
+
+ return frameSize;
+}
+
+BufferedPacket* TheoraBufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* /*ourSource*/) {
+ return new TheoraBufferedPacket();
+}
diff --git a/liveMedia/VP8VideoRTPSink.cpp b/liveMedia/VP8VideoRTPSink.cpp
new file mode 100644
index 0000000..80a8b81
--- /dev/null
+++ b/liveMedia/VP8VideoRTPSink.cpp
@@ -0,0 +1,68 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for VP8 video
+// Implementation
+
+#include "VP8VideoRTPSink.hh"
+
+VP8VideoRTPSink
+::VP8VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat)
+ : VideoRTPSink(env, RTPgs, rtpPayloadFormat, 90000, "VP8") {
+}
+
+VP8VideoRTPSink::~VP8VideoRTPSink() {
+}
+
+VP8VideoRTPSink*
+VP8VideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) {
+ return new VP8VideoRTPSink(env, RTPgs, rtpPayloadFormat);
+}
+
+Boolean VP8VideoRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // A packet can contain only one frame
+ return False;
+}
+
+void VP8VideoRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* /*frameStart*/,
+ unsigned /*numBytesInFrame*/,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ // Set the "VP8 Payload Descriptor" (just the minimal required 1-byte version):
+ u_int8_t vp8PayloadDescriptor = fragmentationOffset == 0 ? 0x10 : 0x00;
+ // X = R = N = 0; PartID = 0; S = 1 iff this is the first (or only) fragment of the frame
+ setSpecialHeaderBytes(&vp8PayloadDescriptor, 1);
+
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the RTP 'M' ('marker') bit:
+ setMarkerBit();
+ }
+
+ // Also set the RTP timestamp:
+ setTimestamp(framePresentationTime);
+}
+
+
+unsigned VP8VideoRTPSink::specialHeaderSize() const {
+ // We include only the required 1-byte form of the "VP8 Payload Descriptor":
+ return 1;
+}
diff --git a/liveMedia/VP8VideoRTPSource.cpp b/liveMedia/VP8VideoRTPSource.cpp
new file mode 100644
index 0000000..d2c6a03
--- /dev/null
+++ b/liveMedia/VP8VideoRTPSource.cpp
@@ -0,0 +1,86 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// VP8 Video RTP Sources
+// Implementation
+
+#include "VP8VideoRTPSource.hh"
+
+VP8VideoRTPSource*
+VP8VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new VP8VideoRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+VP8VideoRTPSource
+::VP8VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency) {
+}
+
+VP8VideoRTPSource::~VP8VideoRTPSource() {
+}
+
+#define incrHeader do { ++resultSpecialHeaderSize; ++headerStart; if (--packetSize == 0) return False; } while (0)
+
+Boolean VP8VideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ // The special header is from 1 to 6 bytes long.
+ if (packetSize == 0) return False; // error
+ resultSpecialHeaderSize = 1; // unless we learn otherwise
+
+ u_int8_t const byte1 = *headerStart;
+ Boolean const X = (byte1&0x80) != 0;
+ Boolean const S = (byte1&0x10) != 0;
+ u_int8_t const PartID = byte1&0x0F;
+
+ fCurrentPacketBeginsFrame = S && PartID == 0;
+ fCurrentPacketCompletesFrame = packet->rtpMarkerBit(); // RTP header's "M" bit
+
+ if (X) {
+ incrHeader;
+
+ u_int8_t const byte2 = *headerStart;
+ Boolean const I = (byte2&0x80) != 0;
+ Boolean const L = (byte2&0x40) != 0;
+ Boolean const T = (byte2&0x20) != 0;
+ Boolean const K = (byte2&0x10) != 0;
+
+ if (I) {
+ incrHeader;
+ if ((*headerStart)&0x80) { // extension flag in the PictureID is set
+ incrHeader;
+ }
+ }
+
+ if (L) incrHeader;
+ if (T||K) incrHeader;
+ }
+
+ return True;
+}
+
+char const* VP8VideoRTPSource::MIMEtype() const {
+ return "video/VP8";
+}
diff --git a/liveMedia/VP9VideoRTPSink.cpp b/liveMedia/VP9VideoRTPSink.cpp
new file mode 100644
index 0000000..ab1d3bb
--- /dev/null
+++ b/liveMedia/VP9VideoRTPSink.cpp
@@ -0,0 +1,71 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for VP9 video
+// Implementation
+
+#include "VP9VideoRTPSink.hh"
+
+VP9VideoRTPSink
+::VP9VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat)
+ : VideoRTPSink(env, RTPgs, rtpPayloadFormat, 90000, "VP9") {
+}
+
+VP9VideoRTPSink::~VP9VideoRTPSink() {
+}
+
+VP9VideoRTPSink*
+VP9VideoRTPSink::createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat) {
+ return new VP9VideoRTPSink(env, RTPgs, rtpPayloadFormat);
+}
+
+Boolean VP9VideoRTPSink
+::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // A packet can contain only one frame
+ return False;
+}
+
+void VP9VideoRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* /*frameStart*/,
+ unsigned /*numBytesInFrame*/,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ // Set the "VP9 Payload Descriptor" (just the minimal required 1-byte version):
+ u_int8_t vp9PayloadDescriptor = fragmentationOffset == 0 ? 0x10 : 0x00;
+ // I = L = F = V = U = 0; S = 1 iff this is the first (or only) fragment of the frame
+
+ if (numRemainingBytes == 0) {
+ // This packet contains the last (or only) fragment of the frame.
+ // Set the E bit:
+ vp9PayloadDescriptor |= 0x08;
+ // Also set the RTP 'M' ('marker') bit:
+ setMarkerBit();
+ }
+
+ setSpecialHeaderBytes(&vp9PayloadDescriptor, 1);
+
+ // Also set the RTP timestamp:
+ setTimestamp(framePresentationTime);
+}
+
+
+unsigned VP9VideoRTPSink::specialHeaderSize() const {
+ // We include only the required 1-byte form of the "VP9 Payload Descriptor":
+ return 1;
+}
diff --git a/liveMedia/VP9VideoRTPSource.cpp b/liveMedia/VP9VideoRTPSource.cpp
new file mode 100644
index 0000000..150209f
--- /dev/null
+++ b/liveMedia/VP9VideoRTPSource.cpp
@@ -0,0 +1,108 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// VP9 Video RTP Sources
+// Implementation
+
+#include "VP9VideoRTPSource.hh"
+
+VP9VideoRTPSource*
+VP9VideoRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new VP9VideoRTPSource(env, RTPgs, rtpPayloadFormat,
+ rtpTimestampFrequency);
+}
+
+VP9VideoRTPSource
+::VP9VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency) {
+}
+
+VP9VideoRTPSource::~VP9VideoRTPSource() {
+}
+
+#define incrHeader do { ++resultSpecialHeaderSize; ++headerStart; if (--packetSize == 0) return False; } while (0)
+
+Boolean VP9VideoRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ // Figure out the size of the special header.
+ if (packetSize == 0) return False; // error
+ resultSpecialHeaderSize = 1; // unless we learn otherwise
+
+ u_int8_t const byte1 = *headerStart;
+ Boolean const I = (byte1&0x80) != 0;
+ Boolean const L = (byte1&0x40) != 0;
+ Boolean const F = (byte1&0x20) != 0;
+ Boolean const B = (byte1&0x10) != 0;
+ Boolean const E = (byte1&0x08) != 0;
+ Boolean const V = (byte1&0x04) != 0;
+ Boolean const U = (byte1&0x02) != 0;
+
+ fCurrentPacketBeginsFrame = B;
+ fCurrentPacketCompletesFrame = E;
+ // use this instead of the RTP header's 'M' bit (which might not be accurate)
+
+ if (I) { // PictureID present
+ incrHeader;
+ Boolean const M = ((*headerStart)&0x80) != 0;
+ if (M) incrHeader;
+ }
+
+ if (L) { // Layer indices present
+ incrHeader;
+ if (F) { // Reference indices present
+ incrHeader;
+ unsigned R = (*headerStart)&0x03;
+ while (R-- > 0) {
+ incrHeader;
+ Boolean const X = ((*headerStart)&0x10) != 0;
+ if (X) incrHeader;
+ }
+ }
+ }
+
+ if (V) { // Scalability Structure (SS) present
+ incrHeader;
+ unsigned patternLength = *headerStart;
+ while (patternLength-- > 0) {
+ incrHeader;
+ unsigned R = (*headerStart)&0x03;
+ while (R-- > 0) {
+ incrHeader;
+ Boolean const X = ((*headerStart)&0x10) != 0;
+ if (X) incrHeader;
+ }
+ }
+ }
+
+ if (U) { // Scalability Structure Update (SU) present
+ return False; // This structure isn't yet defined in the VP9 payload format I-D
+ }
+
+ return True;
+}
+
+char const* VP9VideoRTPSource::MIMEtype() const {
+ return "video/VP9";
+}
diff --git a/liveMedia/VideoRTPSink.cpp b/liveMedia/VideoRTPSink.cpp
new file mode 100644
index 0000000..f9402a1
--- /dev/null
+++ b/liveMedia/VideoRTPSink.cpp
@@ -0,0 +1,36 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic RTP sink for video codecs (abstract base class)
+// Implementation
+
+#include "VideoRTPSink.hh"
+
+VideoRTPSink::VideoRTPSink(UsageEnvironment& env,
+ Groupsock* rtpgs, unsigned char rtpPayloadType,
+ unsigned rtpTimestampFrequency,
+ char const* rtpPayloadFormatName)
+ : MultiFramedRTPSink(env, rtpgs, rtpPayloadType, rtpTimestampFrequency,
+ rtpPayloadFormatName) {
+}
+
+VideoRTPSink::~VideoRTPSink() {
+}
+
+char const* VideoRTPSink::sdpMediaType() const {
+ return "video";
+}
diff --git a/liveMedia/VorbisAudioRTPSink.cpp b/liveMedia/VorbisAudioRTPSink.cpp
new file mode 100644
index 0000000..e4142f7
--- /dev/null
+++ b/liveMedia/VorbisAudioRTPSink.cpp
@@ -0,0 +1,266 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for Vorbis audio
+// Implementation
+
+#include "VorbisAudioRTPSink.hh"
+#include "Base64.hh"
+#include "VorbisAudioRTPSource.hh" // for parseVorbisOrTheoraConfigStr()
+
+VorbisAudioRTPSink* VorbisAudioRTPSink
+::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned numChannels,
+ u_int8_t* identificationHeader, unsigned identificationHeaderSize,
+ u_int8_t* commentHeader, unsigned commentHeaderSize,
+ u_int8_t* setupHeader, unsigned setupHeaderSize,
+ u_int32_t identField) {
+ return new VorbisAudioRTPSink(env, RTPgs,
+ rtpPayloadFormat, rtpTimestampFrequency, numChannels,
+ identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize,
+ identField);
+}
+
+VorbisAudioRTPSink* VorbisAudioRTPSink
+::createNew(UsageEnvironment& env, Groupsock* RTPgs,u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency, unsigned numChannels,
+ char const* configStr) {
+ // Begin by decoding and unpacking the configuration string:
+ u_int8_t* identificationHeader; unsigned identificationHeaderSize;
+ u_int8_t* commentHeader; unsigned commentHeaderSize;
+ u_int8_t* setupHeader; unsigned setupHeaderSize;
+ u_int32_t identField;
+
+ parseVorbisOrTheoraConfigStr(configStr,
+ identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize,
+ identField);
+
+ VorbisAudioRTPSink* resultSink
+ = new VorbisAudioRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, numChannels,
+ identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize,
+ identField);
+ delete[] identificationHeader; delete[] commentHeader; delete[] setupHeader;
+
+ return resultSink;
+}
+
+VorbisAudioRTPSink
+::VorbisAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency, unsigned numChannels,
+ u_int8_t* identificationHeader, unsigned identificationHeaderSize,
+ u_int8_t* commentHeader, unsigned commentHeaderSize,
+ u_int8_t* setupHeader, unsigned setupHeaderSize,
+ u_int32_t identField)
+ : AudioRTPSink(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency, "VORBIS", numChannels),
+ fIdent(identField), fFmtpSDPLine(NULL) {
+ if (identificationHeaderSize >= 28) {
+ // Get the 'bitrate' values from this header, and use them to set our estimated bitrate:
+ u_int32_t val;
+ u_int8_t* p;
+
+ p = &identificationHeader[16];
+ val = ((p[3]*256 + p[2])*256 + p[1])*256 + p[0]; // i.e., little-endian
+ int bitrate_maximum = (int)val;
+ if (bitrate_maximum < 0) bitrate_maximum = 0;
+
+ p = &identificationHeader[20];
+ val = ((p[3]*256 + p[2])*256 + p[1])*256 + p[0]; // i.e., little-endian
+ int bitrate_nominal = (int)val;
+ if (bitrate_nominal < 0) bitrate_nominal = 0;
+
+ p = &identificationHeader[24];
+ val = ((p[3]*256 + p[2])*256 + p[1])*256 + p[0]; // i.e., little-endian
+ int bitrate_minimum = (int)val;
+ if (bitrate_minimum < 0) bitrate_minimum = 0;
+
+ int bitrate
+ = bitrate_nominal > 0 ? bitrate_nominal
+ : bitrate_maximum > 0 ? bitrate_maximum
+ : bitrate_minimum > 0 ? bitrate_minimum : 0;
+ if (bitrate > 0) estimatedBitrate() = ((unsigned)bitrate)/1000;
+ }
+
+ // Generate a 'config' string from the supplied configuration headers:
+ char* base64PackedHeaders
+ = generateVorbisOrTheoraConfigStr(identificationHeader, identificationHeaderSize,
+ commentHeader, commentHeaderSize,
+ setupHeader, setupHeaderSize,
+ identField);
+ if (base64PackedHeaders == NULL) return;
+
+ // Then use this 'config' string to construct our "a=fmtp:" SDP line:
+ unsigned fmtpSDPLineMaxSize = 50 + strlen(base64PackedHeaders); // 50 => more than enough space
+ fFmtpSDPLine = new char[fmtpSDPLineMaxSize];
+ sprintf(fFmtpSDPLine, "a=fmtp:%d configuration=%s\r\n", rtpPayloadType(), base64PackedHeaders);
+ delete[] base64PackedHeaders;
+}
+
+VorbisAudioRTPSink::~VorbisAudioRTPSink() {
+ delete[] fFmtpSDPLine;
+}
+
+char const* VorbisAudioRTPSink::auxSDPLine() {
+ return fFmtpSDPLine;
+}
+
+void VorbisAudioRTPSink
+::doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes) {
+ // Set the 4-byte "payload header", as defined in RFC 5215, section 2.2:
+ u_int8_t header[4];
+
+ // The first three bytes of the header are our "Ident":
+ header[0] = fIdent>>16; header[1] = fIdent>>8; header[2] = fIdent;
+
+ // The final byte contains the "F", "VDT", and "numPkts" fields:
+ u_int8_t F; // Fragment type
+ if (numRemainingBytes > 0) {
+ if (fragmentationOffset > 0) {
+ F = 2<<6; // continuation fragment
+ } else {
+ F = 1<<6; // start fragment
+ }
+ } else {
+ if (fragmentationOffset > 0) {
+ F = 3<<6; // end fragment
+ } else {
+ F = 0<<6; // not fragmented
+ }
+ }
+ u_int8_t const VDT = 0<<4; // Vorbis Data Type (always a "Raw Vorbis payload")
+ u_int8_t numPkts = F == 0 ? (numFramesUsedSoFar() + 1): 0; // set to 0 when we're a fragment
+ header[3] = F|VDT|numPkts;
+
+ setSpecialHeaderBytes(header, sizeof header);
+
+ // There's also a 2-byte 'frame-specific' header: The length of the Vorbis data:
+ u_int8_t frameSpecificHeader[2];
+ frameSpecificHeader[0] = numBytesInFrame>>8;
+ frameSpecificHeader[1] = numBytesInFrame;
+ setFrameSpecificHeaderBytes(frameSpecificHeader, 2);
+
+ // Important: Also call our base class's doSpecialFrameHandling(),
+ // to set the packet's timestamp:
+ MultiFramedRTPSink::doSpecialFrameHandling(fragmentationOffset,
+ frameStart, numBytesInFrame,
+ framePresentationTime,
+ numRemainingBytes);
+}
+
+Boolean VorbisAudioRTPSink::frameCanAppearAfterPacketStart(unsigned char const* /*frameStart*/,
+ unsigned /*numBytesInFrame*/) const {
+ // We allow more than one frame to be packed into an outgoing RTP packet, but no more than 15:
+ return numFramesUsedSoFar() <= 15;
+}
+
+unsigned VorbisAudioRTPSink::specialHeaderSize() const {
+ return 4;
+}
+
+unsigned VorbisAudioRTPSink::frameSpecificHeaderSize() const {
+ return 2;
+}
+
+
+////////// generateVorbisOrTheoraConfigStr() implementation //////////
+
+char* generateVorbisOrTheoraConfigStr(u_int8_t* identificationHeader, unsigned identificationHeaderSize,
+ u_int8_t* commentHeader, unsigned commentHeaderSize,
+ u_int8_t* setupHeader, unsigned setupHeaderSize,
+ u_int32_t identField) {
+ // First, count how many headers (<=3) are included, and how many bytes will be used
+ // to encode these headers' sizes:
+ unsigned numHeaders = 0;
+ unsigned sizeSize[2]; // The number of bytes used to encode the lengths of the first two headers (but not the length of the 3rd)
+ sizeSize[0] = sizeSize[1] = 0;
+ if (identificationHeaderSize > 0) {
+ sizeSize[numHeaders++] = identificationHeaderSize < 128 ? 1 : identificationHeaderSize < 16384 ? 2 : 3;
+ }
+ if (commentHeaderSize > 0) {
+ sizeSize[numHeaders++] = commentHeaderSize < 128 ? 1 : commentHeaderSize < 16384 ? 2 : 3;
+ }
+ if (setupHeaderSize > 0) {
+ ++numHeaders;
+ } else {
+ sizeSize[1] = 0; // We have at most two headers, so the second one's length isn't encoded
+ }
+ if (numHeaders == 0) return NULL; // With no headers, we can't set up a configuration
+ if (numHeaders == 1) sizeSize[0] = 0; // With only one header, its length isn't encoded
+
+ // Then figure out the size of the packed configuration headers, and allocate space for this:
+ unsigned length = identificationHeaderSize + commentHeaderSize + setupHeaderSize;
+ // The "length" field in the packed headers
+ if (length > (unsigned)0xFFFF) return NULL; // too big for a 16-bit field; we can't handle this
+ unsigned packedHeadersSize
+ = 4 // "Number of packed headers" field
+ + 3 // "ident" field
+ + 2 // "length" field
+ + 1 // "n. of headers" field
+ + sizeSize[0] + sizeSize[1] // "length1" and "length2" (if present) fields
+ + length;
+ u_int8_t* packedHeaders = new u_int8_t[packedHeadersSize];
+ if (packedHeaders == NULL) return NULL;
+
+ // Fill in the 'packed headers':
+ u_int8_t* p = packedHeaders;
+ *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 1; // "Number of packed headers": 1
+ *p++ = identField>>16; *p++ = identField>>8; *p++ = identField; // "Ident" (24 bits)
+ *p++ = length>>8; *p++ = length; // "length" (16 bits)
+ *p++ = numHeaders-1; // "n. of headers"
+ if (numHeaders > 1) {
+ // Fill in the "length1" header:
+ unsigned length1 = identificationHeaderSize > 0 ? identificationHeaderSize : commentHeaderSize;
+ if (length1 >= 16384) {
+ *p++ = 0x80; // flag, but no more, because we know length1 <= 32767
+ }
+ if (length1 >= 128) {
+ *p++ = 0x80|((length1&0x3F80)>>7); // flag + the second 7 bits
+ }
+ *p++ = length1&0x7F; // the low 7 bits
+
+ if (numHeaders > 2) { // numHeaders == 3
+ // Fill in the "length2" header (for the 'Comment' header):
+ unsigned length2 = commentHeaderSize;
+ if (length2 >= 16384) {
+ *p++ = 0x80; // flag, but no more, because we know length2 <= 32767
+ }
+ if (length2 >= 128) {
+ *p++ = 0x80|((length2&0x3F80)>>7); // flag + the second 7 bits
+ }
+ *p++ = length2&0x7F; // the low 7 bits
+ }
+ }
+ // Copy each header:
+ if (identificationHeader != NULL) memmove(p, identificationHeader, identificationHeaderSize); p += identificationHeaderSize;
+ if (commentHeader != NULL) memmove(p, commentHeader, commentHeaderSize); p += commentHeaderSize;
+ if (setupHeader != NULL) memmove(p, setupHeader, setupHeaderSize);
+
+ // Having set up the 'packed configuration headers', Base-64-encode this, for our result:
+ char* base64PackedHeaders = base64Encode((char const*)packedHeaders, packedHeadersSize);
+ delete[] packedHeaders;
+
+ return base64PackedHeaders;
+}
diff --git a/liveMedia/VorbisAudioRTPSource.cpp b/liveMedia/VorbisAudioRTPSource.cpp
new file mode 100644
index 0000000..aa0e7e9
--- /dev/null
+++ b/liveMedia/VorbisAudioRTPSource.cpp
@@ -0,0 +1,197 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Vorbis Audio RTP Sources
+// Implementation
+
+#include "VorbisAudioRTPSource.hh"
+#include "Base64.hh"
+
+////////// VorbisBufferedPacket and VorbisBufferedPacketFactory //////////
+
+class VorbisBufferedPacket: public BufferedPacket {
+public:
+ VorbisBufferedPacket();
+ virtual ~VorbisBufferedPacket();
+
+private: // redefined virtual functions
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+};
+
+class VorbisBufferedPacketFactory: public BufferedPacketFactory {
+private: // redefined virtual functions
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+
+///////// VorbisAudioRTPSource implementation ////////
+
+VorbisAudioRTPSource*
+VorbisAudioRTPSource::createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency) {
+ return new VorbisAudioRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency);
+}
+
+VorbisAudioRTPSource
+::VorbisAudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency)
+ : MultiFramedRTPSource(env, RTPgs, rtpPayloadFormat, rtpTimestampFrequency,
+ new VorbisBufferedPacketFactory),
+ fCurPacketIdent(0) {
+}
+
+VorbisAudioRTPSource::~VorbisAudioRTPSource() {
+}
+
+Boolean VorbisAudioRTPSource
+::processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize) {
+ unsigned char* headerStart = packet->data();
+ unsigned packetSize = packet->dataSize();
+
+ resultSpecialHeaderSize = 4;
+ if (packetSize < resultSpecialHeaderSize) return False; // packet was too small
+
+ // The first 3 bytes of the header are the "Ident" field:
+ fCurPacketIdent = (headerStart[0]<<16) | (headerStart[1]<<8) | headerStart[2];
+
+ // The 4th byte is F|VDT|numPkts.
+ // Reject any packet with VDT == 3:
+ if ((headerStart[3]&0x30) == 0x30) return False;
+
+ u_int8_t F = headerStart[3]>>6;
+ fCurrentPacketBeginsFrame = F <= 1; // "Not Fragmented" or "Start Fragment"
+ fCurrentPacketCompletesFrame = F == 0 || F == 3; // "Not Fragmented" or "End Fragment"
+
+ return True;
+}
+
+char const* VorbisAudioRTPSource::MIMEtype() const {
+ return "audio/VORBIS";
+}
+
+
+////////// VorbisBufferedPacket and VorbisBufferedPacketFactory implementation //////////
+
+VorbisBufferedPacket::VorbisBufferedPacket() {
+}
+
+VorbisBufferedPacket::~VorbisBufferedPacket() {
+}
+
+unsigned VorbisBufferedPacket
+::nextEnclosedFrameSize(unsigned char*& framePtr, unsigned dataSize) {
+ if (dataSize < 2) {
+ // There's not enough space for a 2-byte header. TARFU! Just return the data that's left:
+ return dataSize;
+ }
+
+ unsigned frameSize = (framePtr[0]<<8) | framePtr[1];
+ framePtr += 2;
+ if (frameSize > dataSize - 2) return dataSize - 2; // inconsistent frame size => just return all the data that's left
+
+ return frameSize;
+}
+
+BufferedPacket* VorbisBufferedPacketFactory
+::createNewPacket(MultiFramedRTPSource* /*ourSource*/) {
+ return new VorbisBufferedPacket();
+}
+
+
+////////// parseVorbisOrTheoraConfigStr() implementation //////////
+
+#define ADVANCE(n) do { p += (n); rem -= (n); } while (0)
+#define GET_ENCODED_VAL(n) do { u_int8_t byte; n = 0; do { if (rem == 0) break; byte = *p; n = (n*128) + (byte&0x7F); ADVANCE(1); } while (byte&0x80); } while (0); if (rem == 0) break
+
+void parseVorbisOrTheoraConfigStr(char const* configStr,
+ u_int8_t*& identificationHdr, unsigned& identificationHdrSize,
+ u_int8_t*& commentHdr, unsigned& commentHdrSize,
+ u_int8_t*& setupHdr, unsigned& setupHdrSize,
+ u_int32_t& identField) {
+ identificationHdr = commentHdr = setupHdr = NULL; // default values, if an error occur
+ identificationHdrSize = commentHdrSize = setupHdrSize = 0; // ditto
+ identField = 0; // ditto
+
+ // Begin by Base64-decoding the configuration string:
+ unsigned configDataSize;
+ u_int8_t* configData = base64Decode(configStr, configDataSize);
+ u_int8_t* p = configData;
+ unsigned rem = configDataSize;
+
+ do {
+ if (rem < 4) break;
+ u_int32_t numPackedHeaders = (p[0]<<24)|(p[1]<<16)|(p[2]<<8)|p[3]; ADVANCE(4);
+ if (numPackedHeaders == 0) break;
+
+ // Use the first 'packed header' only.
+ if (rem < 3) break;
+ identField = (p[0]<<16)|(p[1]<<8)|p[2]; ADVANCE(3);
+
+ if (rem < 2) break;
+ u_int16_t length = (p[0]<<8)|p[1]; ADVANCE(2);
+
+ unsigned numHeaders;
+ GET_ENCODED_VAL(numHeaders);
+
+ Boolean success = False;
+ for (unsigned i = 0; i < numHeaders+1 && i < 3; ++i) {
+ success = False;
+ unsigned headerSize;
+ if (i < numHeaders) {
+ // The header size is encoded:
+ GET_ENCODED_VAL(headerSize);
+ if (headerSize > length) break;
+ length -= headerSize;
+ } else {
+ // The last header is implicit:
+ headerSize = length;
+ }
+
+ // Allocate space for the header bytes; we'll fill it in later
+ if (i == 0) {
+ identificationHdrSize = headerSize;
+ identificationHdr = new u_int8_t[identificationHdrSize];
+ } else if (i == 1) {
+ commentHdrSize = headerSize;
+ commentHdr = new u_int8_t[commentHdrSize];
+ } else { // i == 2
+ setupHdrSize = headerSize;
+ setupHdr = new u_int8_t[setupHdrSize];
+ }
+
+ success = True;
+ }
+ if (!success) break;
+
+ // Copy the remaining config bytes into the appropriate 'header' buffers:
+ if (identificationHdr != NULL) {
+ memmove(identificationHdr, p, identificationHdrSize); ADVANCE(identificationHdrSize);
+ if (commentHdr != NULL) {
+ memmove(commentHdr, p, commentHdrSize); ADVANCE(commentHdrSize);
+ if (setupHdr != NULL) {
+ memmove(setupHdr, p, setupHdrSize); ADVANCE(setupHdrSize);
+ }
+ }
+ }
+ } while (0);
+
+ delete[] configData;
+}
diff --git a/liveMedia/WAVAudioFileServerMediaSubsession.cpp b/liveMedia/WAVAudioFileServerMediaSubsession.cpp
new file mode 100644
index 0000000..849db42
--- /dev/null
+++ b/liveMedia/WAVAudioFileServerMediaSubsession.cpp
@@ -0,0 +1,228 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an WAV audio file.
+// Implementation
+
+#include "WAVAudioFileServerMediaSubsession.hh"
+#include "WAVAudioFileSource.hh"
+#include "uLawAudioFilter.hh"
+#include "SimpleRTPSink.hh"
+
+WAVAudioFileServerMediaSubsession* WAVAudioFileServerMediaSubsession
+::createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource,
+ Boolean convertToULaw) {
+ return new WAVAudioFileServerMediaSubsession(env, fileName,
+ reuseFirstSource, convertToULaw);
+}
+
+WAVAudioFileServerMediaSubsession
+::WAVAudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName,
+ Boolean reuseFirstSource, Boolean convertToULaw)
+ : FileServerMediaSubsession(env, fileName, reuseFirstSource),
+ fConvertToULaw(convertToULaw) {
+}
+
+WAVAudioFileServerMediaSubsession
+::~WAVAudioFileServerMediaSubsession() {
+}
+
+void WAVAudioFileServerMediaSubsession
+::seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes) {
+ WAVAudioFileSource* wavSource;
+ if (fBitsPerSample > 8) {
+ // "inputSource" is a filter; its input source is the original WAV file source:
+ wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource());
+ } else {
+ // "inputSource" is the original WAV file source:
+ wavSource = (WAVAudioFileSource*)inputSource;
+ }
+
+ unsigned seekSampleNumber = (unsigned)(seekNPT*fSamplingFrequency);
+ unsigned seekByteNumber = seekSampleNumber*((fNumChannels*fBitsPerSample)/8);
+
+ wavSource->seekToPCMByte(seekByteNumber);
+
+ setStreamSourceDuration(inputSource, streamDuration, numBytes);
+}
+
+void WAVAudioFileServerMediaSubsession
+::setStreamSourceDuration(FramedSource* inputSource, double streamDuration, u_int64_t& numBytes) {
+ WAVAudioFileSource* wavSource;
+ if (fBitsPerSample > 8) {
+ // "inputSource" is a filter; its input source is the original WAV file source:
+ wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource());
+ } else {
+ // "inputSource" is the original WAV file source:
+ wavSource = (WAVAudioFileSource*)inputSource;
+ }
+
+ unsigned numDurationSamples = (unsigned)(streamDuration*fSamplingFrequency);
+ unsigned numDurationBytes = numDurationSamples*((fNumChannels*fBitsPerSample)/8);
+ numBytes = (u_int64_t)numDurationBytes;
+
+ wavSource->limitNumBytesToStream(numDurationBytes);
+}
+
+void WAVAudioFileServerMediaSubsession
+::setStreamSourceScale(FramedSource* inputSource, float scale) {
+ int iScale = (int)scale;
+ WAVAudioFileSource* wavSource;
+ if (fBitsPerSample > 8) {
+ // "inputSource" is a filter; its input source is the original WAV file source:
+ wavSource = (WAVAudioFileSource*)(((FramedFilter*)inputSource)->inputSource());
+ } else {
+ // "inputSource" is the original WAV file source:
+ wavSource = (WAVAudioFileSource*)inputSource;
+ }
+
+ wavSource->setScaleFactor(iScale);
+}
+
+FramedSource* WAVAudioFileServerMediaSubsession
+::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
+ FramedSource* resultSource = NULL;
+ do {
+ WAVAudioFileSource* wavSource = WAVAudioFileSource::createNew(envir(), fFileName);
+ if (wavSource == NULL) break;
+
+ // Get attributes of the audio source:
+
+ fAudioFormat = wavSource->getAudioFormat();
+ fBitsPerSample = wavSource->bitsPerSample();
+ // We handle only 4,8,16,20,24 bits-per-sample audio:
+ if (fBitsPerSample%4 != 0 || fBitsPerSample < 4 || fBitsPerSample > 24 || fBitsPerSample == 12) {
+ envir() << "The input file contains " << fBitsPerSample << " bit-per-sample audio, which we don't handle\n";
+ break;
+ }
+ fSamplingFrequency = wavSource->samplingFrequency();
+ fNumChannels = wavSource->numChannels();
+ unsigned bitsPerSecond = fSamplingFrequency*fBitsPerSample*fNumChannels;
+
+ fFileDuration = (float)((8.0*wavSource->numPCMBytes())/(fSamplingFrequency*fNumChannels*fBitsPerSample));
+
+ // Add in any filter necessary to transform the data prior to streaming:
+ resultSource = wavSource; // by default
+ if (fAudioFormat == WA_PCM) {
+ if (fBitsPerSample == 16) {
+ // Note that samples in the WAV audio file are in little-endian order.
+ if (fConvertToULaw) {
+ // Add a filter that converts from raw 16-bit PCM audio to 8-bit u-law audio:
+ resultSource = uLawFromPCMAudioSource::createNew(envir(), wavSource, 1/*little-endian*/);
+ bitsPerSecond /= 2;
+ } else {
+ // Add a filter that converts from little-endian to network (big-endian) order:
+ resultSource = EndianSwap16::createNew(envir(), wavSource);
+ }
+ } else if (fBitsPerSample == 20 || fBitsPerSample == 24) {
+ // Add a filter that converts from little-endian to network (big-endian) order:
+ resultSource = EndianSwap24::createNew(envir(), wavSource);
+ }
+ }
+
+ estBitrate = (bitsPerSecond+500)/1000; // kbps
+ return resultSource;
+ } while (0);
+
+ // An error occurred:
+ Medium::close(resultSource);
+ return NULL;
+}
+
+RTPSink* WAVAudioFileServerMediaSubsession
+::createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* /*inputSource*/) {
+ do {
+ char const* mimeType;
+ unsigned char payloadFormatCode = rtpPayloadTypeIfDynamic; // by default, unless a static RTP payload type can be used
+ if (fAudioFormat == WA_PCM) {
+ if (fBitsPerSample == 16) {
+ if (fConvertToULaw) {
+ mimeType = "PCMU";
+ if (fSamplingFrequency == 8000 && fNumChannels == 1) {
+ payloadFormatCode = 0; // a static RTP payload type
+ }
+ } else {
+ mimeType = "L16";
+ if (fSamplingFrequency == 44100 && fNumChannels == 2) {
+ payloadFormatCode = 10; // a static RTP payload type
+ } else if (fSamplingFrequency == 44100 && fNumChannels == 1) {
+ payloadFormatCode = 11; // a static RTP payload type
+ }
+ }
+ } else if (fBitsPerSample == 20) {
+ mimeType = "L20";
+ } else if (fBitsPerSample == 24) {
+ mimeType = "L24";
+ } else { // fBitsPerSample == 8 (we assume that fBitsPerSample == 4 is only for WA_IMA_ADPCM)
+ mimeType = "L8";
+ }
+ } else if (fAudioFormat == WA_PCMU) {
+ mimeType = "PCMU";
+ if (fSamplingFrequency == 8000 && fNumChannels == 1) {
+ payloadFormatCode = 0; // a static RTP payload type
+ }
+ } else if (fAudioFormat == WA_PCMA) {
+ mimeType = "PCMA";
+ if (fSamplingFrequency == 8000 && fNumChannels == 1) {
+ payloadFormatCode = 8; // a static RTP payload type
+ }
+ } else if (fAudioFormat == WA_IMA_ADPCM) {
+ mimeType = "DVI4";
+ // Use a static payload type, if one is defined:
+ if (fNumChannels == 1) {
+ if (fSamplingFrequency == 8000) {
+ payloadFormatCode = 5; // a static RTP payload type
+ } else if (fSamplingFrequency == 16000) {
+ payloadFormatCode = 6; // a static RTP payload type
+ } else if (fSamplingFrequency == 11025) {
+ payloadFormatCode = 16; // a static RTP payload type
+ } else if (fSamplingFrequency == 22050) {
+ payloadFormatCode = 17; // a static RTP payload type
+ }
+ }
+ } else { //unknown format
+ break;
+ }
+
+ return SimpleRTPSink::createNew(envir(), rtpGroupsock,
+ payloadFormatCode, fSamplingFrequency,
+ "audio", mimeType, fNumChannels);
+ } while (0);
+
+ // An error occurred:
+ return NULL;
+}
+
+void WAVAudioFileServerMediaSubsession::testScaleFactor(float& scale) {
+ if (fFileDuration <= 0.0) {
+ // The file is non-seekable, so is probably a live input source.
+ // We don't support scale factors other than 1
+ scale = 1;
+ } else {
+ // We support any integral scale, other than 0
+ int iScale = scale < 0.0 ? (int)(scale - 0.5) : (int)(scale + 0.5); // round
+ if (iScale == 0) iScale = 1;
+ scale = (float)iScale;
+ }
+}
+
+float WAVAudioFileServerMediaSubsession::duration() const {
+ return fFileDuration;
+}
diff --git a/liveMedia/WAVAudioFileSource.cpp b/liveMedia/WAVAudioFileSource.cpp
new file mode 100644
index 0000000..ae51820
--- /dev/null
+++ b/liveMedia/WAVAudioFileSource.cpp
@@ -0,0 +1,353 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A WAV audio file source
+// Implementation
+
+#include "WAVAudioFileSource.hh"
+#include "InputFile.hh"
+#include "GroupsockHelper.hh"
+
+////////// WAVAudioFileSource //////////
+
+WAVAudioFileSource*
+WAVAudioFileSource::createNew(UsageEnvironment& env, char const* fileName) {
+ do {
+ FILE* fid = OpenInputFile(env, fileName);
+ if (fid == NULL) break;
+
+ WAVAudioFileSource* newSource = new WAVAudioFileSource(env, fid);
+ if (newSource != NULL && newSource->bitsPerSample() == 0) {
+ // The WAV file header was apparently invalid.
+ Medium::close(newSource);
+ break;
+ }
+
+ newSource->fFileSize = (unsigned)GetFileSize(fileName, fid);
+
+ return newSource;
+ } while (0);
+
+ return NULL;
+}
+
+unsigned WAVAudioFileSource::numPCMBytes() const {
+ if (fFileSize < fWAVHeaderSize) return 0;
+ return fFileSize - fWAVHeaderSize;
+}
+
+void WAVAudioFileSource::setScaleFactor(int scale) {
+ if (!fFidIsSeekable) return; // we can't do 'trick play' operations on non-seekable files
+
+ fScaleFactor = scale;
+
+ if (fScaleFactor < 0 && TellFile64(fFid) > 0) {
+ // Because we're reading backwards, seek back one sample, to ensure that
+ // (i) we start reading the last sample before the start point, and
+ // (ii) we don't hit end-of-file on the first read.
+ int bytesPerSample = (fNumChannels*fBitsPerSample)/8;
+ if (bytesPerSample == 0) bytesPerSample = 1;
+ SeekFile64(fFid, -bytesPerSample, SEEK_CUR);
+ }
+}
+
+void WAVAudioFileSource::seekToPCMByte(unsigned byteNumber) {
+ byteNumber += fWAVHeaderSize;
+ if (byteNumber > fFileSize) byteNumber = fFileSize;
+
+ SeekFile64(fFid, byteNumber, SEEK_SET);
+}
+
+void WAVAudioFileSource::limitNumBytesToStream(unsigned numBytesToStream) {
+ fNumBytesToStream = numBytesToStream;
+ fLimitNumBytesToStream = fNumBytesToStream > 0;
+}
+
+unsigned char WAVAudioFileSource::getAudioFormat() {
+ return fAudioFormat;
+}
+
+
+#define nextc fgetc(fid)
+
+static Boolean get4Bytes(FILE* fid, u_int32_t& result) { // little-endian
+ int c0, c1, c2, c3;
+ if ((c0 = nextc) == EOF || (c1 = nextc) == EOF ||
+ (c2 = nextc) == EOF || (c3 = nextc) == EOF) return False;
+ result = (c3<<24)|(c2<<16)|(c1<<8)|c0;
+ return True;
+}
+
+static Boolean get2Bytes(FILE* fid, u_int16_t& result) {//little-endian
+ int c0, c1;
+ if ((c0 = nextc) == EOF || (c1 = nextc) == EOF) return False;
+ result = (c1<<8)|c0;
+ return True;
+}
+
+static Boolean skipBytes(FILE* fid, int num) {
+ while (num-- > 0) {
+ if (nextc == EOF) return False;
+ }
+ return True;
+}
+
+WAVAudioFileSource::WAVAudioFileSource(UsageEnvironment& env, FILE* fid)
+ : AudioInputDevice(env, 0, 0, 0, 0)/* set the real parameters later */,
+ fFid(fid), fFidIsSeekable(False), fLastPlayTime(0), fHaveStartedReading(False), fWAVHeaderSize(0), fFileSize(0),
+ fScaleFactor(1), fLimitNumBytesToStream(False), fNumBytesToStream(0), fAudioFormat(WA_UNKNOWN) {
+ // Check the WAV file header for validity.
+ // Note: The following web pages contain info about the WAV format:
+ // http://www.ringthis.com/dev/wave_format.htm
+ // http://www.lightlink.com/tjweber/StripWav/Canon.html
+ // http://www.onicos.com/staff/iz/formats/wav.html
+
+ Boolean success = False; // until we learn otherwise
+ do {
+ // RIFF Chunk:
+ if (nextc != 'R' || nextc != 'I' || nextc != 'F' || nextc != 'F') break;
+ if (!skipBytes(fid, 4)) break;
+ if (nextc != 'W' || nextc != 'A' || nextc != 'V' || nextc != 'E') break;
+
+ // Skip over any chunk that's not a FORMAT ('fmt ') chunk:
+ u_int32_t tmp;
+ if (!get4Bytes(fid, tmp)) break;
+ while (tmp != 0x20746d66/*'fmt ', little-endian*/) {
+ // Skip this chunk:
+ u_int32_t chunkLength;
+ if (!get4Bytes(fid, chunkLength)) break;
+ if (!skipBytes(fid, chunkLength)) break;
+ if (!get4Bytes(fid, tmp)) break;
+ }
+
+ // FORMAT Chunk (the 4-byte header code has already been parsed):
+ unsigned formatLength;
+ if (!get4Bytes(fid, formatLength)) break;
+ unsigned short audioFormat;
+ if (!get2Bytes(fid, audioFormat)) break;
+
+ fAudioFormat = (unsigned char)audioFormat;
+ if (fAudioFormat != WA_PCM && fAudioFormat != WA_PCMA && fAudioFormat != WA_PCMU && fAudioFormat != WA_IMA_ADPCM) {
+ // It's a format that we don't (yet) understand
+ env.setResultMsg("Audio format is not one that we handle (PCM/PCMU/PCMA or IMA ADPCM)");
+ break;
+ }
+ unsigned short numChannels;
+ if (!get2Bytes(fid, numChannels)) break;
+ fNumChannels = (unsigned char)numChannels;
+ if (fNumChannels < 1 || fNumChannels > 2) { // invalid # channels
+ char errMsg[100];
+ sprintf(errMsg, "Bad # channels: %d", fNumChannels);
+ env.setResultMsg(errMsg);
+ break;
+ }
+ if (!get4Bytes(fid, fSamplingFrequency)) break;
+ if (fSamplingFrequency == 0) {
+ env.setResultMsg("Bad sampling frequency: 0");
+ break;
+ }
+ if (!skipBytes(fid, 6)) break; // "nAvgBytesPerSec" (4 bytes) + "nBlockAlign" (2 bytes)
+ unsigned short bitsPerSample;
+ if (!get2Bytes(fid, bitsPerSample)) break;
+ fBitsPerSample = (unsigned char)bitsPerSample;
+ if (fBitsPerSample == 0) {
+ env.setResultMsg("Bad bits-per-sample: 0");
+ break;
+ }
+ if (!skipBytes(fid, formatLength - 16)) break;
+
+ // FACT chunk (optional):
+ int c = nextc;
+ if (c == 'f') {
+ if (nextc != 'a' || nextc != 'c' || nextc != 't') break;
+ unsigned factLength;
+ if (!get4Bytes(fid, factLength)) break;
+ if (!skipBytes(fid, factLength)) break;
+ c = nextc;
+ }
+
+ // EYRE chunk (optional):
+ if (c == 'e') {
+ if (nextc != 'y' || nextc != 'r' || nextc != 'e') break;
+ unsigned eyreLength;
+ if (!get4Bytes(fid, eyreLength)) break;
+ if (!skipBytes(fid, eyreLength)) break;
+ c = nextc;
+ }
+
+ // DATA Chunk:
+ if (c != 'd' || nextc != 'a' || nextc != 't' || nextc != 'a') break;
+ if (!skipBytes(fid, 4)) break;
+
+ // The header is good; the remaining data are the sample bytes.
+ fWAVHeaderSize = (unsigned)TellFile64(fid);
+ success = True;
+ } while (0);
+
+ if (!success) {
+ env.setResultMsg("Bad WAV file format");
+ // Set "fBitsPerSample" to zero, to indicate failure:
+ fBitsPerSample = 0;
+ return;
+ }
+
+ fPlayTimePerSample = 1e6/(double)fSamplingFrequency;
+
+ // Although PCM is a sample-based format, we group samples into
+ // 'frames' for efficient delivery to clients. Set up our preferred
+ // frame size to be close to 20 ms, if possible, but always no greater
+ // than 1400 bytes (to ensure that it will fit in a single RTP packet)
+ unsigned maxSamplesPerFrame = (1400*8)/(fNumChannels*fBitsPerSample);
+ unsigned desiredSamplesPerFrame = (unsigned)(0.02*fSamplingFrequency);
+ unsigned samplesPerFrame = desiredSamplesPerFrame < maxSamplesPerFrame ? desiredSamplesPerFrame : maxSamplesPerFrame;
+ fPreferredFrameSize = (samplesPerFrame*fNumChannels*fBitsPerSample)/8;
+
+ fFidIsSeekable = FileIsSeekable(fFid);
+#ifndef READ_FROM_FILES_SYNCHRONOUSLY
+ // Now that we've finished reading the WAV header, all future reads (of audio samples) from the file will be asynchronous:
+ makeSocketNonBlocking(fileno(fFid));
+#endif
+}
+
+WAVAudioFileSource::~WAVAudioFileSource() {
+ if (fFid == NULL) return;
+
+#ifndef READ_FROM_FILES_SYNCHRONOUSLY
+ envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid));
+#endif
+
+ CloseInputFile(fFid);
+}
+
+void WAVAudioFileSource::doGetNextFrame() {
+ if (feof(fFid) || ferror(fFid) || (fLimitNumBytesToStream && fNumBytesToStream == 0)) {
+ handleClosure();
+ return;
+ }
+
+ fFrameSize = 0; // until it's set later
+#ifdef READ_FROM_FILES_SYNCHRONOUSLY
+ doReadFromFile();
+#else
+ if (!fHaveStartedReading) {
+ // Await readable data from the file:
+ envir().taskScheduler().turnOnBackgroundReadHandling(fileno(fFid),
+ (TaskScheduler::BackgroundHandlerProc*)&fileReadableHandler, this);
+ fHaveStartedReading = True;
+ }
+#endif
+}
+
+void WAVAudioFileSource::doStopGettingFrames() {
+ envir().taskScheduler().unscheduleDelayedTask(nextTask());
+#ifndef READ_FROM_FILES_SYNCHRONOUSLY
+ envir().taskScheduler().turnOffBackgroundReadHandling(fileno(fFid));
+ fHaveStartedReading = False;
+#endif
+}
+
+void WAVAudioFileSource::fileReadableHandler(WAVAudioFileSource* source, int /*mask*/) {
+ if (!source->isCurrentlyAwaitingData()) {
+ source->doStopGettingFrames(); // we're not ready for the data yet
+ return;
+ }
+ source->doReadFromFile();
+}
+
+void WAVAudioFileSource::doReadFromFile() {
+ // Try to read as many bytes as will fit in the buffer provided (or "fPreferredFrameSize" if less)
+ if (fLimitNumBytesToStream && fNumBytesToStream < fMaxSize) {
+ fMaxSize = fNumBytesToStream;
+ }
+ if (fPreferredFrameSize < fMaxSize) {
+ fMaxSize = fPreferredFrameSize;
+ }
+ unsigned bytesPerSample = (fNumChannels*fBitsPerSample)/8;
+ if (bytesPerSample == 0) bytesPerSample = 1; // because we can't read less than a byte at a time
+
+ // For 'trick play', read one sample at a time; otherwise (normal case) read samples in bulk:
+ unsigned bytesToRead = fScaleFactor == 1 ? fMaxSize - fMaxSize%bytesPerSample : bytesPerSample;
+ unsigned numBytesRead;
+ while (1) { // loop for 'trick play' only
+#ifdef READ_FROM_FILES_SYNCHRONOUSLY
+ numBytesRead = fread(fTo, 1, bytesToRead, fFid);
+#else
+ if (fFidIsSeekable) {
+ numBytesRead = fread(fTo, 1, bytesToRead, fFid);
+ } else {
+ // For non-seekable files (e.g., pipes), call "read()" rather than "fread()", to ensure that the read doesn't block:
+ numBytesRead = read(fileno(fFid), fTo, bytesToRead);
+ }
+#endif
+ if (numBytesRead == 0) {
+ handleClosure();
+ return;
+ }
+ fFrameSize += numBytesRead;
+ fTo += numBytesRead;
+ fMaxSize -= numBytesRead;
+ fNumBytesToStream -= numBytesRead;
+
+ // If we did an asynchronous read, and didn't read an integral number of samples, then we need to wait for another read:
+#ifndef READ_FROM_FILES_SYNCHRONOUSLY
+ if (fFrameSize%bytesPerSample > 0) return;
+#endif
+
+ // If we're doing 'trick play', then seek to the appropriate place for reading the next sample,
+ // and keep reading until we fill the provided buffer:
+ if (fScaleFactor != 1) {
+ SeekFile64(fFid, (fScaleFactor-1)*bytesPerSample, SEEK_CUR);
+ if (fMaxSize < bytesPerSample) break;
+ } else {
+ break; // from the loop (normal case)
+ }
+ }
+
+ // Set the 'presentation time' and 'duration' of this frame:
+ if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
+ // This is the first frame, so use the current time:
+ gettimeofday(&fPresentationTime, NULL);
+ } else {
+ // Increment by the play time of the previous data:
+ unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime;
+ fPresentationTime.tv_sec += uSeconds/1000000;
+ fPresentationTime.tv_usec = uSeconds%1000000;
+ }
+
+ // Remember the play time of this data:
+ fDurationInMicroseconds = fLastPlayTime
+ = (unsigned)((fPlayTimePerSample*fFrameSize)/bytesPerSample);
+
+ // Inform the reader that he has data:
+#ifdef READ_FROM_FILES_SYNCHRONOUSLY
+ // To avoid possible infinite recursion, we need to return to the event loop to do this:
+ nextTask() = envir().taskScheduler().scheduleDelayedTask(0,
+ (TaskFunc*)FramedSource::afterGetting, this);
+#else
+ // Because the file read was done from the event loop, we can call the
+ // 'after getting' function directly, without risk of infinite recursion:
+ FramedSource::afterGetting(this);
+#endif
+}
+
+Boolean WAVAudioFileSource::setInputPort(int /*portIndex*/) {
+ return True;
+}
+
+double WAVAudioFileSource::getAverageLevel() const {
+ return 0.0;//##### fix this later
+}
diff --git a/liveMedia/include/AC3AudioFileServerMediaSubsession.hh b/liveMedia/include/AC3AudioFileServerMediaSubsession.hh
new file mode 100644
index 0000000..52afbc1
--- /dev/null
+++ b/liveMedia/include/AC3AudioFileServerMediaSubsession.hh
@@ -0,0 +1,48 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an AC3 audio file.
+// C++ header
+
+#ifndef _AC3_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _AC3_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+
+class AC3AudioFileServerMediaSubsession: public FileServerMediaSubsession{
+public:
+ static AC3AudioFileServerMediaSubsession*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);
+
+private:
+ AC3AudioFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource);
+ // called only by createNew();
+ virtual ~AC3AudioFileServerMediaSubsession();
+
+private: // redefined virtual functions
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+};
+
+#endif
diff --git a/liveMedia/include/AC3AudioRTPSink.hh b/liveMedia/include/AC3AudioRTPSink.hh
new file mode 100644
index 0000000..81332c1
--- /dev/null
+++ b/liveMedia/include/AC3AudioRTPSink.hh
@@ -0,0 +1,57 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for AC3 audio
+// C++ header
+
+#ifndef _AC3_AUDIO_RTP_SINK_HH
+#define _AC3_AUDIO_RTP_SINK_HH
+
+#ifndef _AUDIO_RTP_SINK_HH
+#include "AudioRTPSink.hh"
+#endif
+
+class AC3AudioRTPSink: public AudioRTPSink {
+public:
+ static AC3AudioRTPSink* createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency);
+
+protected:
+ AC3AudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency);
+ // called only by createNew()
+
+ virtual ~AC3AudioRTPSink();
+
+private: // redefined virtual functions:
+ virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual unsigned specialHeaderSize() const;
+
+private:
+ unsigned char fTotNumFragmentsUsed; // used only if a frame gets fragmented across multiple packets
+};
+
+#endif
diff --git a/liveMedia/include/AC3AudioRTPSource.hh b/liveMedia/include/AC3AudioRTPSource.hh
new file mode 100644
index 0000000..7d962d1
--- /dev/null
+++ b/liveMedia/include/AC3AudioRTPSource.hh
@@ -0,0 +1,51 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// AC3 Audio RTP Sources
+// C++ header
+
+#ifndef _AC3_AUDIO_RTP_SOURCE_HH
+#define _AC3_AUDIO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class AC3AudioRTPSource: public MultiFramedRTPSource {
+public:
+ static AC3AudioRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+
+protected:
+ virtual ~AC3AudioRTPSource();
+
+private:
+ AC3AudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+};
+
+#endif
diff --git a/liveMedia/include/AC3AudioStreamFramer.hh b/liveMedia/include/AC3AudioStreamFramer.hh
new file mode 100644
index 0000000..5674989
--- /dev/null
+++ b/liveMedia/include/AC3AudioStreamFramer.hh
@@ -0,0 +1,70 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an AC3 audio elementary stream into frames
+// C++ header
+
+#ifndef _AC3_AUDIO_STREAM_FRAMER_HH
+#define _AC3_AUDIO_STREAM_FRAMER_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+class AC3AudioStreamFramer: public FramedFilter {
+public:
+ static AC3AudioStreamFramer*
+ createNew(UsageEnvironment& env, FramedSource* inputSource,
+ unsigned char streamCode = 0);
+ // If "streamCode" != 0, then we assume that there's a 1-byte code at the beginning of each chunk of data that we read from
+ // our source. If that code is not the value we want, we discard the chunk of data.
+ // However, if "streamCode" == 0 (the default), then we don't expect this 1-byte code.
+
+ unsigned samplingRate();
+
+ void flushInput(); // called if there is a discontinuity (seeking) in the input
+
+private:
+ AC3AudioStreamFramer(UsageEnvironment& env, FramedSource* inputSource,
+ unsigned char streamCode);
+ // called only by createNew()
+ virtual ~AC3AudioStreamFramer();
+
+ static void handleNewData(void* clientData,
+ unsigned char* ptr, unsigned size,
+ struct timeval presentationTime);
+ void handleNewData(unsigned char* ptr, unsigned size);
+
+ void parseNextFrame();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ struct timeval currentFramePlayTime() const;
+
+private:
+ struct timeval fNextFramePresentationTime;
+
+private: // parsing state
+ class AC3AudioStreamParser* fParser;
+ unsigned char fOurStreamCode;
+ friend class AC3AudioStreamParser; // hack
+};
+
+#endif
diff --git a/liveMedia/include/ADTSAudioFileServerMediaSubsession.hh b/liveMedia/include/ADTSAudioFileServerMediaSubsession.hh
new file mode 100644
index 0000000..47a769c
--- /dev/null
+++ b/liveMedia/include/ADTSAudioFileServerMediaSubsession.hh
@@ -0,0 +1,48 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an AAC audio file in ADTS format
+// C++ header
+
+#ifndef _ADTS_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _ADTS_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+
+class ADTSAudioFileServerMediaSubsession: public FileServerMediaSubsession{
+public:
+ static ADTSAudioFileServerMediaSubsession*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);
+
+protected:
+ ADTSAudioFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource);
+ // called only by createNew();
+ virtual ~ADTSAudioFileServerMediaSubsession();
+
+protected: // redefined virtual functions
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+};
+
+#endif
diff --git a/liveMedia/include/ADTSAudioFileSource.hh b/liveMedia/include/ADTSAudioFileSource.hh
new file mode 100644
index 0000000..365d6cc
--- /dev/null
+++ b/liveMedia/include/ADTSAudioFileSource.hh
@@ -0,0 +1,56 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A source object for AAC audio files in ADTS format
+// C++ header
+
+#ifndef _ADTS_AUDIO_FILE_SOURCE_HH
+#define _ADTS_AUDIO_FILE_SOURCE_HH
+
+#ifndef _FRAMED_FILE_SOURCE_HH
+#include "FramedFileSource.hh"
+#endif
+
+class ADTSAudioFileSource: public FramedFileSource {
+public:
+ static ADTSAudioFileSource* createNew(UsageEnvironment& env,
+ char const* fileName);
+
+ unsigned samplingFrequency() const { return fSamplingFrequency; }
+ unsigned numChannels() const { return fNumChannels; }
+ char const* configStr() const { return fConfigStr; }
+ // returns the 'AudioSpecificConfig' for this stream (in ASCII form)
+
+private:
+ ADTSAudioFileSource(UsageEnvironment& env, FILE* fid, u_int8_t profile,
+ u_int8_t samplingFrequencyIndex, u_int8_t channelConfiguration);
+ // called only by createNew()
+
+ virtual ~ADTSAudioFileSource();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ unsigned fSamplingFrequency;
+ unsigned fNumChannels;
+ unsigned fuSecsPerFrame;
+ char fConfigStr[5];
+};
+
+#endif
diff --git a/liveMedia/include/AMRAudioFileServerMediaSubsession.hh b/liveMedia/include/AMRAudioFileServerMediaSubsession.hh
new file mode 100644
index 0000000..de81d28
--- /dev/null
+++ b/liveMedia/include/AMRAudioFileServerMediaSubsession.hh
@@ -0,0 +1,48 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an AMR audio file.
+// C++ header
+
+#ifndef _AMR_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _AMR_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+
+class AMRAudioFileServerMediaSubsession: public FileServerMediaSubsession{
+public:
+ static AMRAudioFileServerMediaSubsession*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);
+
+private:
+ AMRAudioFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource);
+ // called only by createNew();
+ virtual ~AMRAudioFileServerMediaSubsession();
+
+private: // redefined virtual functions
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+};
+
+#endif
diff --git a/liveMedia/include/AMRAudioFileSink.hh b/liveMedia/include/AMRAudioFileSink.hh
new file mode 100644
index 0000000..cb0c173
--- /dev/null
+++ b/liveMedia/include/AMRAudioFileSink.hh
@@ -0,0 +1,51 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// AMR Audio File Sinks
+// C++ header
+
+#ifndef _AMR_AUDIO_FILE_SINK_HH
+#define _AMR_AUDIO_FILE_SINK_HH
+
+#ifndef _FILE_SINK_HH
+#include "FileSink.hh"
+#endif
+
+class AMRAudioFileSink: public FileSink {
+public:
+ static AMRAudioFileSink* createNew(UsageEnvironment& env, char const* fileName,
+ unsigned bufferSize = 10000,
+ Boolean oneFilePerFrame = False);
+ // (See "FileSink.hh" for a description of these parameters.)
+
+protected:
+ AMRAudioFileSink(UsageEnvironment& env, FILE* fid, unsigned bufferSize,
+ char const* perFrameFileNamePrefix);
+ // called only by createNew()
+ virtual ~AMRAudioFileSink();
+
+protected: // redefined virtual functions:
+ virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
+ virtual void afterGettingFrame(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime);
+
+protected:
+ Boolean fHaveWrittenHeader;
+};
+
+#endif
diff --git a/liveMedia/include/AMRAudioFileSource.hh b/liveMedia/include/AMRAudioFileSource.hh
new file mode 100644
index 0000000..8ab4938
--- /dev/null
+++ b/liveMedia/include/AMRAudioFileSource.hh
@@ -0,0 +1,48 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A source object for AMR audio files (as defined in RFC 4867, section 5)
+// C++ header
+
+#ifndef _AMR_AUDIO_FILE_SOURCE_HH
+#define _AMR_AUDIO_FILE_SOURCE_HH
+
+#ifndef _AMR_AUDIO_SOURCE_HH
+#include "AMRAudioSource.hh"
+#endif
+
+class AMRAudioFileSource: public AMRAudioSource {
+public:
+ static AMRAudioFileSource* createNew(UsageEnvironment& env,
+ char const* fileName);
+
+private:
+ AMRAudioFileSource(UsageEnvironment& env, FILE* fid,
+ Boolean isWideband, unsigned numChannels);
+ // called only by createNew()
+
+ virtual ~AMRAudioFileSource();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ FILE* fFid;
+};
+
+#endif
diff --git a/liveMedia/include/AMRAudioRTPSink.hh b/liveMedia/include/AMRAudioRTPSink.hh
new file mode 100644
index 0000000..e23da76
--- /dev/null
+++ b/liveMedia/include/AMRAudioRTPSink.hh
@@ -0,0 +1,65 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for AMR audio (RFC 4867)
+// C++ header
+
+#ifndef _AMR_AUDIO_RTP_SINK_HH
+#define _AMR_AUDIO_RTP_SINK_HH
+
+#ifndef _AUDIO_RTP_SINK_HH
+#include "AudioRTPSink.hh"
+#endif
+
+class AMRAudioRTPSink: public AudioRTPSink {
+public:
+ static AMRAudioRTPSink* createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ Boolean sourceIsWideband = False,
+ unsigned numChannelsInSource = 1);
+
+ Boolean sourceIsWideband() const { return fSourceIsWideband; }
+
+protected:
+ AMRAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ Boolean sourceIsWideband, unsigned numChannelsInSource);
+ // called only by createNew()
+
+ virtual ~AMRAudioRTPSink();
+
+private: // redefined virtual functions:
+ virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual Boolean
+ frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+
+ virtual unsigned specialHeaderSize() const;
+ virtual char const* auxSDPLine();
+
+private:
+ Boolean fSourceIsWideband;
+ char* fFmtpSDPLine;
+};
+
+#endif
diff --git a/liveMedia/include/AMRAudioRTPSource.hh b/liveMedia/include/AMRAudioRTPSource.hh
new file mode 100644
index 0000000..3ab9cd8
--- /dev/null
+++ b/liveMedia/include/AMRAudioRTPSource.hh
@@ -0,0 +1,53 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// AMR Audio RTP Sources (RFC 4867)
+// C++ header
+
+#ifndef _AMR_AUDIO_RTP_SOURCE_HH
+#define _AMR_AUDIO_RTP_SOURCE_HH
+
+#ifndef _RTP_SOURCE_HH
+#include "RTPSource.hh"
+#endif
+#ifndef _AMR_AUDIO_SOURCE_HH
+#include "AMRAudioSource.hh"
+#endif
+
+class AMRAudioRTPSource {
+public:
+ static AMRAudioSource* createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ RTPSource*& resultRTPSource,
+ unsigned char rtpPayloadFormat,
+ Boolean isWideband = False,
+ unsigned numChannels = 1,
+ Boolean isOctetAligned = True,
+ unsigned interleaving = 0,
+ // relevant only if "isOctetAligned"
+ // The maximum # of frame-blocks in a group
+ // 0 means: no interleaving
+ Boolean robustSortingOrder = False,
+ // relevant only if "isOctetAligned"
+ Boolean CRCsArePresent = False
+ // relevant only if "isOctetAligned"
+ );
+ // This returns a source to read from, but "resultRTPSource" will
+ // point to RTP-related state.
+};
+
+#endif
diff --git a/liveMedia/include/AMRAudioSource.hh b/liveMedia/include/AMRAudioSource.hh
new file mode 100644
index 0000000..6bc131e
--- /dev/null
+++ b/liveMedia/include/AMRAudioSource.hh
@@ -0,0 +1,52 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A source object for AMR audio sources
+// C++ header
+
+#ifndef _AMR_AUDIO_SOURCE_HH
+#define _AMR_AUDIO_SOURCE_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class AMRAudioSource: public FramedSource {
+public:
+ Boolean isWideband() const { return fIsWideband; }
+ unsigned numChannels() const { return fNumChannels; }
+
+ u_int8_t lastFrameHeader() const { return fLastFrameHeader; }
+ // The frame header for the most recently read frame (RFC 4867, sec. 5.3)
+
+protected:
+ AMRAudioSource(UsageEnvironment& env, Boolean isWideband, unsigned numChannels);
+ // virtual base class
+ virtual ~AMRAudioSource();
+
+private:
+ // redefined virtual functions:
+ virtual char const* MIMEtype() const;
+ virtual Boolean isAMRAudioSource() const;
+
+protected:
+ Boolean fIsWideband;
+ unsigned fNumChannels;
+ u_int8_t fLastFrameHeader;
+};
+
+#endif
diff --git a/liveMedia/include/AVIFileSink.hh b/liveMedia/include/AVIFileSink.hh
new file mode 100644
index 0000000..04cbced
--- /dev/null
+++ b/liveMedia/include/AVIFileSink.hh
@@ -0,0 +1,115 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A sink that generates an AVI file from a composite media session
+// C++ header
+
+#ifndef _AVI_FILE_SINK_HH
+#define _AVI_FILE_SINK_HH
+
+#ifndef _MEDIA_SESSION_HH
+#include "MediaSession.hh"
+#endif
+
+class AVIFileSink: public Medium {
+public:
+ static AVIFileSink* createNew(UsageEnvironment& env,
+ MediaSession& inputSession,
+ char const* outputFileName,
+ unsigned bufferSize = 20000,
+ unsigned short movieWidth = 240,
+ unsigned short movieHeight = 180,
+ unsigned movieFPS = 15,
+ Boolean packetLossCompensate = False);
+
+ typedef void (afterPlayingFunc)(void* clientData);
+ Boolean startPlaying(afterPlayingFunc* afterFunc,
+ void* afterClientData);
+
+ unsigned numActiveSubsessions() const { return fNumSubsessions; }
+
+private:
+ AVIFileSink(UsageEnvironment& env, MediaSession& inputSession,
+ char const* outputFileName, unsigned bufferSize,
+ unsigned short movieWidth, unsigned short movieHeight,
+ unsigned movieFPS, Boolean packetLossCompensate);
+ // called only by createNew()
+ virtual ~AVIFileSink();
+
+ Boolean continuePlaying();
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ static void onSourceClosure(void* clientData);
+ void onSourceClosure1();
+ static void onRTCPBye(void* clientData);
+ void addIndexRecord(class AVIIndexRecord* newIndexRecord);
+ void completeOutputFile();
+
+private:
+ friend class AVISubsessionIOState;
+ MediaSession& fInputSession;
+ FILE* fOutFid;
+ class AVIIndexRecord *fIndexRecordsHead, *fIndexRecordsTail;
+ unsigned fNumIndexRecords;
+ unsigned fBufferSize;
+ Boolean fPacketLossCompensate;
+ Boolean fAreCurrentlyBeingPlayed;
+ afterPlayingFunc* fAfterFunc;
+ void* fAfterClientData;
+ unsigned fNumSubsessions;
+ unsigned fNumBytesWritten;
+ struct timeval fStartTime;
+ Boolean fHaveCompletedOutputFile;
+
+private:
+ ///// Definitions specific to the AVI file format:
+
+ unsigned addWord(unsigned word); // outputs "word" in little-endian order
+ unsigned addHalfWord(unsigned short halfWord);
+ unsigned addByte(unsigned char byte) {
+ putc(byte, fOutFid);
+ return 1;
+ }
+ unsigned addZeroWords(unsigned numWords);
+ unsigned add4ByteString(char const* str);
+ void setWord(unsigned filePosn, unsigned size);
+
+ // Define member functions for outputting various types of file header:
+#define _header(name) unsigned addFileHeader_##name()
+ _header(AVI);
+ _header(hdrl);
+ _header(avih);
+ _header(strl);
+ _header(strh);
+ _header(strf);
+ _header(JUNK);
+// _header(JUNK);
+ _header(movi);
+private:
+ unsigned short fMovieWidth, fMovieHeight;
+ unsigned fMovieFPS;
+ unsigned fRIFFSizePosition, fRIFFSizeValue;
+ unsigned fAVIHMaxBytesPerSecondPosition;
+ unsigned fAVIHFrameCountPosition;
+ unsigned fMoviSizePosition, fMoviSizeValue;
+ class AVISubsessionIOState* fCurrentIOState;
+ unsigned fJunkNumber;
+};
+
+#endif
diff --git a/liveMedia/include/AudioInputDevice.hh b/liveMedia/include/AudioInputDevice.hh
new file mode 100644
index 0000000..fd689f4
--- /dev/null
+++ b/liveMedia/include/AudioInputDevice.hh
@@ -0,0 +1,71 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Generic audio input device (such as a microphone, or an input sound card)
+// C++ header
+
+#ifndef _AUDIO_INPUT_DEVICE_HH
+#define _AUDIO_INPUT_DEVICE_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class AudioPortNames {
+public:
+ AudioPortNames();
+ virtual ~AudioPortNames();
+
+ unsigned numPorts;
+ char** portName;
+};
+
+class AudioInputDevice: public FramedSource {
+public:
+ unsigned char bitsPerSample() const { return fBitsPerSample; }
+ unsigned char numChannels() const { return fNumChannels; }
+ unsigned samplingFrequency() const { return fSamplingFrequency; }
+
+ virtual Boolean setInputPort(int portIndex) = 0;
+ virtual double getAverageLevel() const = 0;
+
+ static AudioInputDevice*
+ createNew(UsageEnvironment& env, int inputPortNumber,
+ unsigned char bitsPerSample, unsigned char numChannels,
+ unsigned samplingFrequency, unsigned granularityInMS = 20);
+ static AudioPortNames* getPortNames();
+
+ static char** allowedDeviceNames;
+ // If this is set to non-NULL, then it's a NULL-terminated array of strings
+ // of device names that we are allowed to access.
+
+protected:
+ AudioInputDevice(UsageEnvironment& env,
+ unsigned char bitsPerSample,
+ unsigned char numChannels,
+ unsigned samplingFrequency,
+ unsigned granularityInMS);
+ // we're an abstract base class
+
+ virtual ~AudioInputDevice();
+
+protected:
+ unsigned char fBitsPerSample, fNumChannels;
+ unsigned fSamplingFrequency;
+ unsigned fGranularityInMS;
+};
+
+#endif
diff --git a/liveMedia/include/AudioRTPSink.hh b/liveMedia/include/AudioRTPSink.hh
new file mode 100644
index 0000000..b1c5280
--- /dev/null
+++ b/liveMedia/include/AudioRTPSink.hh
@@ -0,0 +1,42 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic RTP sink for audio codecs (abstract base class)
+// C++ header
+
+#ifndef _AUDIO_RTP_SINK_HH
+#define _AUDIO_RTP_SINK_HH
+
+#ifndef _MULTI_FRAMED_RTP_SINK_HH
+#include "MultiFramedRTPSink.hh"
+#endif
+
+class AudioRTPSink: public MultiFramedRTPSink {
+protected:
+ AudioRTPSink(UsageEnvironment& env,
+ Groupsock* rtpgs, unsigned char rtpPayloadType,
+ unsigned rtpTimestampFrequency,
+ char const* rtpPayloadFormatName,
+ unsigned numChannels = 1);
+ // (we're an abstract base class)
+ virtual ~AudioRTPSink();
+
+private: // redefined virtual functions:
+ virtual char const* sdpMediaType() const;
+};
+
+#endif
diff --git a/liveMedia/include/Base64.hh b/liveMedia/include/Base64.hh
new file mode 100644
index 0000000..17c068c
--- /dev/null
+++ b/liveMedia/include/Base64.hh
@@ -0,0 +1,43 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Base64 encoding and decoding
+// C++ header
+
+#ifndef _BASE64_HH
+#define _BASE64_HH
+
+#ifndef _BOOLEAN_HH
+#include "Boolean.hh"
+#endif
+
+unsigned char* base64Decode(char const* in, unsigned& resultSize,
+ Boolean trimTrailingZeros = True);
+ // returns a newly allocated array - of size "resultSize" - that
+ // the caller is responsible for delete[]ing.
+
+unsigned char* base64Decode(char const* in, unsigned inSize,
+ unsigned& resultSize,
+ Boolean trimTrailingZeros = True);
+ // As above, but includes the size of the input string (i.e., the number of bytes to decode) as a parameter.
+ // This saves an extra call to "strlen()" if we already know the length of the input string.
+
+char* base64Encode(char const* orig, unsigned origLength);
+ // returns a 0-terminated string that
+ // the caller is responsible for delete[]ing.
+
+#endif
diff --git a/liveMedia/include/BasicUDPSink.hh b/liveMedia/include/BasicUDPSink.hh
new file mode 100644
index 0000000..4766019
--- /dev/null
+++ b/liveMedia/include/BasicUDPSink.hh
@@ -0,0 +1,62 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simple UDP sink (i.e., without RTP or other headers added); one frame per packet
+// C++ header
+
+#ifndef _BASIC_UDP_SINK_HH
+#define _BASIC_UDP_SINK_HH
+
+#ifndef _MEDIA_SINK_HH
+#include "MediaSink.hh"
+#endif
+#ifndef _GROUPSOCK_HH
+#include <Groupsock.hh>
+#endif
+
+class BasicUDPSink: public MediaSink {
+public:
+ static BasicUDPSink* createNew(UsageEnvironment& env, Groupsock* gs,
+ unsigned maxPayloadSize = 1450);
+protected:
+ BasicUDPSink(UsageEnvironment& env, Groupsock* gs, unsigned maxPayloadSize);
+ // called only by createNew()
+ virtual ~BasicUDPSink();
+
+private: // redefined virtual functions:
+ virtual Boolean continuePlaying();
+
+private:
+ void continuePlaying1();
+
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ unsigned durationInMicroseconds);
+
+ static void sendNext(void* firstArg);
+
+private:
+ Groupsock* fGS;
+ unsigned fMaxPayloadSize;
+ unsigned char* fOutputBuffer;
+ struct timeval fNextSendTime;
+};
+
+#endif
diff --git a/liveMedia/include/BasicUDPSource.hh b/liveMedia/include/BasicUDPSource.hh
new file mode 100644
index 0000000..e8db203
--- /dev/null
+++ b/liveMedia/include/BasicUDPSource.hh
@@ -0,0 +1,55 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simple UDP source, where every UDP payload is a complete frame
+// C++ header
+
+#ifndef _BASIC_UDP_SOURCE_HH
+#define _BASIC_UDP_SOURCE_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+#ifndef _GROUPSOCK_HH
+#include "Groupsock.hh"
+#endif
+
+class BasicUDPSource: public FramedSource {
+public:
+ static BasicUDPSource* createNew(UsageEnvironment& env, Groupsock* inputGS);
+
+ virtual ~BasicUDPSource();
+
+ Groupsock* gs() const { return fInputGS; }
+
+private:
+ BasicUDPSource(UsageEnvironment& env, Groupsock* inputGS);
+ // called only by createNew()
+
+ static void incomingPacketHandler(BasicUDPSource* source, int mask);
+ void incomingPacketHandler1();
+
+private: // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+
+private:
+ Groupsock* fInputGS;
+ Boolean fHaveStartedReading;
+};
+
+#endif
diff --git a/liveMedia/include/BitVector.hh b/liveMedia/include/BitVector.hh
new file mode 100644
index 0000000..4026a08
--- /dev/null
+++ b/liveMedia/include/BitVector.hh
@@ -0,0 +1,67 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Bit Vector data structure
+// C++ header
+
+#ifndef _BIT_VECTOR_HH
+#define _BIT_VECTOR_HH
+
+#ifndef _BOOLEAN_HH
+#include "Boolean.hh"
+#endif
+
+class BitVector {
+public:
+ BitVector(unsigned char* baseBytePtr,
+ unsigned baseBitOffset,
+ unsigned totNumBits);
+
+ void setup(unsigned char* baseBytePtr,
+ unsigned baseBitOffset,
+ unsigned totNumBits);
+
+ void putBits(unsigned from, unsigned numBits); // "numBits" <= 32
+ void put1Bit(unsigned bit);
+
+ unsigned getBits(unsigned numBits); // "numBits" <= 32
+ unsigned get1Bit();
+ Boolean get1BitBoolean() { return get1Bit() != 0; }
+
+ void skipBits(unsigned numBits);
+
+ unsigned curBitIndex() const { return fCurBitIndex; }
+ unsigned totNumBits() const { return fTotNumBits; }
+ unsigned numBitsRemaining() const { return fTotNumBits - fCurBitIndex; }
+
+ unsigned get_expGolomb();
+ // Returns the value of the next bits, assuming that they were encoded using an exponential-Golomb code of order 0
+ int get_expGolombSigned(); // signed version of the above
+
+private:
+ unsigned char* fBaseBytePtr;
+ unsigned fBaseBitOffset;
+ unsigned fTotNumBits;
+ unsigned fCurBitIndex;
+};
+
+// A general bit copy operation:
+void shiftBits(unsigned char* toBasePtr, unsigned toBitOffset,
+ unsigned char const* fromBasePtr, unsigned fromBitOffset,
+ unsigned numBits);
+
+#endif
diff --git a/liveMedia/include/ByteStreamFileSource.hh b/liveMedia/include/ByteStreamFileSource.hh
new file mode 100644
index 0000000..b1e813a
--- /dev/null
+++ b/liveMedia/include/ByteStreamFileSource.hh
@@ -0,0 +1,82 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A file source that is a plain byte stream (rather than frames)
+// C++ header
+
+#ifndef _BYTE_STREAM_FILE_SOURCE_HH
+#define _BYTE_STREAM_FILE_SOURCE_HH
+
+#ifndef _FRAMED_FILE_SOURCE_HH
+#include "FramedFileSource.hh"
+#endif
+
+class ByteStreamFileSource: public FramedFileSource {
+public:
+ static ByteStreamFileSource* createNew(UsageEnvironment& env,
+ char const* fileName,
+ unsigned preferredFrameSize = 0,
+ unsigned playTimePerFrame = 0);
+ // "preferredFrameSize" == 0 means 'no preference'
+ // "playTimePerFrame" is in microseconds
+
+ static ByteStreamFileSource* createNew(UsageEnvironment& env,
+ FILE* fid,
+ unsigned preferredFrameSize = 0,
+ unsigned playTimePerFrame = 0);
+ // an alternative version of "createNew()" that's used if you already have
+ // an open file.
+
+ u_int64_t fileSize() const { return fFileSize; }
+ // 0 means zero-length, unbounded, or unknown
+
+ void seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream = 0);
+ // if "numBytesToStream" is >0, then we limit the stream to that number of bytes, before treating it as EOF
+ void seekToByteRelative(int64_t offset, u_int64_t numBytesToStream = 0);
+ void seekToEnd(); // to force EOF handling on the next read
+
+protected:
+ ByteStreamFileSource(UsageEnvironment& env,
+ FILE* fid,
+ unsigned preferredFrameSize,
+ unsigned playTimePerFrame);
+ // called only by createNew()
+
+ virtual ~ByteStreamFileSource();
+
+ static void fileReadableHandler(ByteStreamFileSource* source, int mask);
+ void doReadFromFile();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+
+protected:
+ u_int64_t fFileSize;
+
+private:
+ unsigned fPreferredFrameSize;
+ unsigned fPlayTimePerFrame;
+ Boolean fFidIsSeekable;
+ unsigned fLastPlayTime;
+ Boolean fHaveStartedReading;
+ Boolean fLimitNumBytesToStream;
+ u_int64_t fNumBytesToStream; // used iff "fLimitNumBytesToStream" is True
+};
+
+#endif
diff --git a/liveMedia/include/ByteStreamMemoryBufferSource.hh b/liveMedia/include/ByteStreamMemoryBufferSource.hh
new file mode 100644
index 0000000..a908813
--- /dev/null
+++ b/liveMedia/include/ByteStreamMemoryBufferSource.hh
@@ -0,0 +1,70 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class for streaming data from a (static) memory buffer, as if it were a file.
+// C++ header
+
+#ifndef _BYTE_STREAM_MEMORY_BUFFER_SOURCE_HH
+#define _BYTE_STREAM_MEMORY_BUFFER_SOURCE_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class ByteStreamMemoryBufferSource: public FramedSource {
+public:
+ static ByteStreamMemoryBufferSource* createNew(UsageEnvironment& env,
+ u_int8_t* buffer, u_int64_t bufferSize,
+ Boolean deleteBufferOnClose = True,
+ unsigned preferredFrameSize = 0,
+ unsigned playTimePerFrame = 0);
+ // "preferredFrameSize" == 0 means 'no preference'
+ // "playTimePerFrame" is in microseconds
+
+ u_int64_t bufferSize() const { return fBufferSize; }
+
+ void seekToByteAbsolute(u_int64_t byteNumber, u_int64_t numBytesToStream = 0);
+ // if "numBytesToStream" is >0, then we limit the stream to that number of bytes, before treating it as EOF
+ void seekToByteRelative(int64_t offset, u_int64_t numBytesToStream = 0);
+
+protected:
+ ByteStreamMemoryBufferSource(UsageEnvironment& env,
+ u_int8_t* buffer, u_int64_t bufferSize,
+ Boolean deleteBufferOnClose,
+ unsigned preferredFrameSize,
+ unsigned playTimePerFrame);
+ // called only by createNew()
+
+ virtual ~ByteStreamMemoryBufferSource();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ u_int8_t* fBuffer;
+ u_int64_t fBufferSize;
+ u_int64_t fCurIndex;
+ Boolean fDeleteBufferOnClose;
+ unsigned fPreferredFrameSize;
+ unsigned fPlayTimePerFrame;
+ unsigned fLastPlayTime;
+ Boolean fLimitNumBytesToStream;
+ u_int64_t fNumBytesToStream; // used iff "fLimitNumBytesToStream" is True
+};
+
+#endif
diff --git a/liveMedia/include/ByteStreamMultiFileSource.hh b/liveMedia/include/ByteStreamMultiFileSource.hh
new file mode 100644
index 0000000..470720e
--- /dev/null
+++ b/liveMedia/include/ByteStreamMultiFileSource.hh
@@ -0,0 +1,69 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A source that consists of multiple byte-stream files, read sequentially.
+// (The input is an array of file names, with a terminating 'file name' of NULL.)
+// C++ header
+
+#ifndef _BYTE_STREAM_MULTI_FILE_SOURCE_HH
+#define _BYTE_STREAM_MULTI_FILE_SOURCE_HH
+
+#ifndef _BYTE_STREAM_FILE_SOURCE_HH
+#include "ByteStreamFileSource.hh"
+#endif
+
+class ByteStreamMultiFileSource: public FramedSource {
+public:
+ static ByteStreamMultiFileSource*
+ createNew(UsageEnvironment& env, char const** fileNameArray,
+ unsigned preferredFrameSize = 0, unsigned playTimePerFrame = 0);
+ // "fileNameArray" is a pointer to an array of (char const*) file names, with
+ // A 'file name' of NULL indicating the end of the array
+
+ Boolean haveStartedNewFile() const { return fHaveStartedNewFile; }
+ // True iff the most recently delivered frame was the first from a newly-opened file
+
+protected:
+ ByteStreamMultiFileSource(UsageEnvironment& env, char const** fileNameArray,
+ unsigned preferredFrameSize, unsigned playTimePerFrame);
+ // called only by createNew()
+
+ virtual ~ByteStreamMultiFileSource();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ static void onSourceClosure(void* clientData);
+ void onSourceClosure1();
+ static void afterGettingFrame(void* clientData,
+ unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+private:
+ unsigned fPreferredFrameSize;
+ unsigned fPlayTimePerFrame;
+ unsigned fNumSources;
+ unsigned fCurrentlyReadSourceNumber;
+ Boolean fHaveStartedNewFile;
+ char const** fFileNameArray;
+ ByteStreamFileSource** fSourceArray;
+};
+
+#endif
diff --git a/liveMedia/include/DVVideoFileServerMediaSubsession.hh b/liveMedia/include/DVVideoFileServerMediaSubsession.hh
new file mode 100644
index 0000000..d0d4eae
--- /dev/null
+++ b/liveMedia/include/DVVideoFileServerMediaSubsession.hh
@@ -0,0 +1,51 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a DV video file.
+// C++ header
+
+#ifndef _DV_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _DV_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+
+class DVVideoFileServerMediaSubsession: public FileServerMediaSubsession{
+public:
+ static DVVideoFileServerMediaSubsession*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);
+
+private:
+ DVVideoFileServerMediaSubsession(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);
+ // called only by createNew();
+ virtual ~DVVideoFileServerMediaSubsession();
+
+private: // redefined virtual functions
+ virtual char const* getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource);
+ virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes);
+ virtual void setStreamSourceDuration(FramedSource* inputSource, double streamDuration, u_int64_t& numBytes);
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId, unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* inputSource);
+ virtual float duration() const;
+
+private:
+ float fFileDuration; // in seconds
+};
+
+#endif
diff --git a/liveMedia/include/DVVideoRTPSink.hh b/liveMedia/include/DVVideoRTPSink.hh
new file mode 100644
index 0000000..74e2f39
--- /dev/null
+++ b/liveMedia/include/DVVideoRTPSink.hh
@@ -0,0 +1,57 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for DV video (RFC 3189)
+// (Thanks to Ben Hutchings for prototyping this.)
+// C++ header
+
+#ifndef _DV_VIDEO_RTP_SINK_HH
+#define _DV_VIDEO_RTP_SINK_HH
+
+#ifndef _VIDEO_RTP_SINK_HH
+#include "VideoRTPSink.hh"
+#endif
+#ifndef _DV_VIDEO_STREAM_FRAMER_HH
+#include "DVVideoStreamFramer.hh"
+#endif
+
+class DVVideoRTPSink: public VideoRTPSink {
+public:
+ static DVVideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat);
+ char const* auxSDPLineFromFramer(DVVideoStreamFramer* framerSource);
+
+protected:
+ DVVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat);
+ // called only by createNew()
+
+ virtual ~DVVideoRTPSink();
+
+private: // redefined virtual functions:
+ virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual unsigned computeOverflowForNewFrame(unsigned newFrameSize) const;
+ virtual char const* auxSDPLine();
+
+private:
+ char* fFmtpSDPLine;
+};
+
+#endif
diff --git a/liveMedia/include/DVVideoRTPSource.hh b/liveMedia/include/DVVideoRTPSource.hh
new file mode 100644
index 0000000..c5afa14
--- /dev/null
+++ b/liveMedia/include/DVVideoRTPSource.hh
@@ -0,0 +1,51 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// DV Video RTP Sources
+// C++ header
+
+#ifndef _DV_VIDEO_RTP_SOURCE_HH
+#define _DV_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class DVVideoRTPSource: public MultiFramedRTPSource {
+public:
+ static DVVideoRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+
+protected:
+ virtual ~DVVideoRTPSource();
+
+private:
+ DVVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+};
+
+#endif
diff --git a/liveMedia/include/DVVideoStreamFramer.hh b/liveMedia/include/DVVideoStreamFramer.hh
new file mode 100644
index 0000000..738f893
--- /dev/null
+++ b/liveMedia/include/DVVideoStreamFramer.hh
@@ -0,0 +1,72 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that parses a DV input stream into DV frames to deliver to the downstream object
+// C++ header
+
+#ifndef _DV_VIDEO_STREAM_FRAMER_HH
+#define _DV_VIDEO_STREAM_FRAMER_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+#define DV_DIF_BLOCK_SIZE 80
+#define DV_NUM_BLOCKS_PER_SEQUENCE 150
+#define DV_SAVED_INITIAL_BLOCKS_SIZE ((DV_NUM_BLOCKS_PER_SEQUENCE+6-1)*DV_DIF_BLOCK_SIZE)
+ /* enough data to ensure that it contains an intact 6-block header (which occurs at the start of a 150-block sequence) */
+
+class DVVideoStreamFramer: public FramedFilter {
+public:
+ static DVVideoStreamFramer*
+ createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean sourceIsSeekable = False, Boolean leavePresentationTimesUnmodified = False);
+ // Set "sourceIsSeekable" to True if the input source is a seekable object (e.g. a file), and the server that uses us
+ // does a seek-to-zero on the source before reading from it. (Our RTSP server implementation does this.)
+ char const* profileName();
+ Boolean getFrameParameters(unsigned& frameSize/*bytes*/, double& frameDuration/*microseconds*/);
+
+protected:
+ DVVideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean sourceIsSeekable, Boolean leavePresentationTimesUnmodified);
+ // called only by createNew(), or by subclass constructors
+ virtual ~DVVideoStreamFramer();
+
+protected:
+ // redefined virtual functions:
+ virtual Boolean isDVVideoStreamFramer() const;
+ virtual void doGetNextFrame();
+
+protected:
+ void getAndDeliverData(); // used to implement "doGetNextFrame()"
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime);
+ void getProfile();
+
+protected:
+ Boolean fLeavePresentationTimesUnmodified;
+ void const* fOurProfile;
+ struct timeval fNextFramePresentationTime;
+ unsigned char fSavedInitialBlocks[DV_SAVED_INITIAL_BLOCKS_SIZE];
+ char fInitialBlocksPresent;
+ Boolean fSourceIsSeekable;
+};
+
+#endif
diff --git a/liveMedia/include/DeviceSource.hh b/liveMedia/include/DeviceSource.hh
new file mode 100644
index 0000000..d4713d7
--- /dev/null
+++ b/liveMedia/include/DeviceSource.hh
@@ -0,0 +1,66 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A template for a MediaSource encapsulating an audio/video input device
+//
+// NOTE: Sections of this code labeled "%%% TO BE WRITTEN %%%" are incomplete, and needto be written by the programmer
+// (depending on the features of the particulardevice).
+// C++ header
+
+#ifndef _DEVICE_SOURCE_HH
+#define _DEVICE_SOURCE_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+// The following class can be used to define specific encoder parameters
+class DeviceParameters {
+ //%%% TO BE WRITTEN %%%
+};
+
+class DeviceSource: public FramedSource {
+public:
+ static DeviceSource* createNew(UsageEnvironment& env,
+ DeviceParameters params);
+
+public:
+ static EventTriggerId eventTriggerId;
+ // Note that this is defined here to be a static class variable, because this code is intended to illustrate how to
+ // encapsulate a *single* device - not a set of devices.
+ // You can, however, redefine this to be a non-static member variable.
+
+protected:
+ DeviceSource(UsageEnvironment& env, DeviceParameters params);
+ // called only by createNew(), or by subclass constructors
+ virtual ~DeviceSource();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ //virtual void doStopGettingFrames(); // optional
+
+private:
+ static void deliverFrame0(void* clientData);
+ void deliverFrame();
+
+private:
+ static unsigned referenceCount; // used to count how many instances of this class currently exist
+ DeviceParameters fParams;
+};
+
+#endif
diff --git a/liveMedia/include/DigestAuthentication.hh b/liveMedia/include/DigestAuthentication.hh
new file mode 100644
index 0000000..7a1dc80
--- /dev/null
+++ b/liveMedia/include/DigestAuthentication.hh
@@ -0,0 +1,75 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class used for digest authentication.
+// C++ header
+
+#ifndef _DIGEST_AUTHENTICATION_HH
+#define _DIGEST_AUTHENTICATION_HH
+
+#ifndef _BOOLEAN_HH
+#include <Boolean.hh>
+#endif
+
+// A class used for digest authentication.
+// The "realm", and "nonce" fields are supplied by the server
+// (in a "401 Unauthorized" response).
+// The "username" and "password" fields are supplied by the client.
+class Authenticator {
+public:
+ Authenticator();
+ Authenticator(char const* username, char const* password, Boolean passwordIsMD5 = False);
+ // If "passwordIsMD5" is True, then "password" is actually the value computed
+ // by md5(<username>:<realm>:<actual-password>)
+ Authenticator(const Authenticator& orig);
+ Authenticator& operator=(const Authenticator& rightSide);
+ Boolean operator<(const Authenticator* rightSide);
+ virtual ~Authenticator();
+
+ void reset();
+ void setRealmAndNonce(char const* realm, char const* nonce);
+ void setRealmAndRandomNonce(char const* realm);
+ // as above, except that the nonce is created randomly.
+ // (This is used by servers.)
+ void setUsernameAndPassword(char const* username, char const* password, Boolean passwordIsMD5 = False);
+ // If "passwordIsMD5" is True, then "password" is actually the value computed
+ // by md5(<username>:<realm>:<actual-password>)
+
+ char const* realm() const { return fRealm; }
+ char const* nonce() const { return fNonce; }
+ char const* username() const { return fUsername; }
+ char const* password() const { return fPassword; }
+
+ char const* computeDigestResponse(char const* cmd, char const* url) const;
+ // The returned string from this function must later be freed by calling:
+ void reclaimDigestResponse(char const* responseStr) const;
+
+private:
+ void resetRealmAndNonce();
+ void resetUsernameAndPassword();
+ void assignRealmAndNonce(char const* realm, char const* nonce);
+ void assignUsernameAndPassword(char const* username, char const* password, Boolean passwordIsMD5);
+ void assign(char const* realm, char const* nonce,
+ char const* username, char const* password, Boolean passwordIsMD5);
+
+private:
+ char* fRealm; char* fNonce;
+ char* fUsername; char* fPassword;
+ Boolean fPasswordIsMD5;
+};
+
+#endif
diff --git a/liveMedia/include/FileServerMediaSubsession.hh b/liveMedia/include/FileServerMediaSubsession.hh
new file mode 100644
index 0000000..80da682
--- /dev/null
+++ b/liveMedia/include/FileServerMediaSubsession.hh
@@ -0,0 +1,43 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a file.
+// C++ header
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _SERVER_MEDIA_SESSION_HH
+#include "ServerMediaSession.hh"
+#endif
+#ifndef _ON_DEMAND_SERVER_MEDIA_SUBSESSION_HH
+#include "OnDemandServerMediaSubsession.hh"
+#endif
+
+class FileServerMediaSubsession: public OnDemandServerMediaSubsession {
+protected: // we're a virtual base class
+ FileServerMediaSubsession(UsageEnvironment& env, char const* fileName,
+ Boolean reuseFirstSource);
+ virtual ~FileServerMediaSubsession();
+
+protected:
+ char const* fFileName;
+ u_int64_t fFileSize; // if known
+};
+
+#endif
diff --git a/liveMedia/include/FileSink.hh b/liveMedia/include/FileSink.hh
new file mode 100644
index 0000000..bad86e7
--- /dev/null
+++ b/liveMedia/include/FileSink.hh
@@ -0,0 +1,71 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// File Sinks
+// C++ header
+
+#ifndef _FILE_SINK_HH
+#define _FILE_SINK_HH
+
+#ifndef _MEDIA_SINK_HH
+#include "MediaSink.hh"
+#endif
+
+class FileSink: public MediaSink {
+public:
+ static FileSink* createNew(UsageEnvironment& env, char const* fileName,
+ unsigned bufferSize = 20000,
+ Boolean oneFilePerFrame = False);
+ // "bufferSize" should be at least as large as the largest expected
+ // input frame.
+ // "oneFilePerFrame" - if True - specifies that each input frame will
+ // be written to a separate file (using the presentation time as a
+ // file name suffix). The default behavior ("oneFilePerFrame" == False)
+ // is to output all incoming data into a single file.
+
+ virtual void addData(unsigned char const* data, unsigned dataSize,
+ struct timeval presentationTime);
+ // (Available in case a client wants to add extra data to the output file)
+
+protected:
+ FileSink(UsageEnvironment& env, FILE* fid, unsigned bufferSize,
+ char const* perFrameFileNamePrefix);
+ // called only by createNew()
+ virtual ~FileSink();
+
+protected: // redefined virtual functions:
+ virtual Boolean continuePlaying();
+
+protected:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ virtual void afterGettingFrame(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime);
+
+ FILE* fOutFid;
+ unsigned char* fBuffer;
+ unsigned fBufferSize;
+ char* fPerFrameFileNamePrefix; // used if "oneFilePerFrame" is True
+ char* fPerFrameFileNameBuffer; // used if "oneFilePerFrame" is True
+ struct timeval fPrevPresentationTime;
+ unsigned fSamePresentationTimeCounter;
+};
+
+#endif
diff --git a/liveMedia/include/FramedFileSource.hh b/liveMedia/include/FramedFileSource.hh
new file mode 100644
index 0000000..f24adf6
--- /dev/null
+++ b/liveMedia/include/FramedFileSource.hh
@@ -0,0 +1,37 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Framed File Sources
+// C++ header
+
+#ifndef _FRAMED_FILE_SOURCE_HH
+#define _FRAMED_FILE_SOURCE_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class FramedFileSource: public FramedSource {
+protected:
+ FramedFileSource(UsageEnvironment& env, FILE* fid); // abstract base class
+ virtual ~FramedFileSource();
+
+protected:
+ FILE* fFid;
+};
+
+#endif
diff --git a/liveMedia/include/FramedFilter.hh b/liveMedia/include/FramedFilter.hh
new file mode 100644
index 0000000..b3a4de7
--- /dev/null
+++ b/liveMedia/include/FramedFilter.hh
@@ -0,0 +1,52 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Framed Filters
+// C++ header
+
+#ifndef _FRAMED_FILTER_HH
+#define _FRAMED_FILTER_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class FramedFilter: public FramedSource {
+public:
+ FramedSource* inputSource() const { return fInputSource; }
+
+ void reassignInputSource(FramedSource* newInputSource) { fInputSource = newInputSource; }
+
+ // Call before destruction if you want to prevent the destructor from closing the input source
+ void detachInputSource();
+
+protected:
+ FramedFilter(UsageEnvironment& env, FramedSource* inputSource);
+ // abstract base class
+ virtual ~FramedFilter();
+
+protected:
+ // Redefined virtual functions (with default 'null' implementations):
+ virtual char const* MIMEtype() const;
+ virtual void getAttributes() const;
+ virtual void doStopGettingFrames();
+
+protected:
+ FramedSource* fInputSource;
+};
+
+#endif
diff --git a/liveMedia/include/FramedSource.hh b/liveMedia/include/FramedSource.hh
new file mode 100644
index 0000000..0e017ab
--- /dev/null
+++ b/liveMedia/include/FramedSource.hh
@@ -0,0 +1,95 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Framed Sources
+// C++ header
+
+#ifndef _FRAMED_SOURCE_HH
+#define _FRAMED_SOURCE_HH
+
+#ifndef _NET_COMMON_H
+#include "NetCommon.h"
+#endif
+#ifndef _MEDIA_SOURCE_HH
+#include "MediaSource.hh"
+#endif
+
+class FramedSource: public MediaSource {
+public:
+ static Boolean lookupByName(UsageEnvironment& env, char const* sourceName,
+ FramedSource*& resultSource);
+
+ typedef void (afterGettingFunc)(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ typedef void (onCloseFunc)(void* clientData);
+ void getNextFrame(unsigned char* to, unsigned maxSize,
+ afterGettingFunc* afterGettingFunc,
+ void* afterGettingClientData,
+ onCloseFunc* onCloseFunc,
+ void* onCloseClientData);
+
+ static void handleClosure(void* clientData);
+ void handleClosure();
+ // This should be called (on ourself) if the source is discovered
+ // to be closed (i.e., no longer readable)
+
+ void stopGettingFrames();
+
+ virtual unsigned maxFrameSize() const;
+ // size of the largest possible frame that we may serve, or 0
+ // if no such maximum is known (default)
+
+ virtual void doGetNextFrame() = 0;
+ // called by getNextFrame()
+
+ Boolean isCurrentlyAwaitingData() const {return fIsCurrentlyAwaitingData;}
+
+ static void afterGetting(FramedSource* source);
+ // doGetNextFrame() should arrange for this to be called after the
+ // frame has been read (*iff* it is read successfully)
+
+protected:
+ FramedSource(UsageEnvironment& env); // abstract base class
+ virtual ~FramedSource();
+
+ virtual void doStopGettingFrames();
+
+protected:
+ // The following variables are typically accessed/set by doGetNextFrame()
+ unsigned char* fTo; // in
+ unsigned fMaxSize; // in
+ unsigned fFrameSize; // out
+ unsigned fNumTruncatedBytes; // out
+ struct timeval fPresentationTime; // out
+ unsigned fDurationInMicroseconds; // out
+
+private:
+ // redefined virtual functions:
+ virtual Boolean isFramedSource() const;
+
+private:
+ afterGettingFunc* fAfterGettingFunc;
+ void* fAfterGettingClientData;
+ onCloseFunc* fOnCloseFunc;
+ void* fOnCloseClientData;
+
+ Boolean fIsCurrentlyAwaitingData;
+};
+
+#endif
diff --git a/liveMedia/include/GSMAudioRTPSink.hh b/liveMedia/include/GSMAudioRTPSink.hh
new file mode 100644
index 0000000..faa1175
--- /dev/null
+++ b/liveMedia/include/GSMAudioRTPSink.hh
@@ -0,0 +1,44 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for GSM audio
+// C++ header
+
+#ifndef _GSM_AUDIO_RTP_SINK_HH
+#define _GSM_AUDIO_RTP_SINK_HH
+
+#ifndef _AUDIO_RTP_SINK_HH
+#include "AudioRTPSink.hh"
+#endif
+
+class GSMAudioRTPSink: public AudioRTPSink {
+public:
+ static GSMAudioRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs);
+
+protected:
+ GSMAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs);
+ // called only by createNew()
+
+ virtual ~GSMAudioRTPSink();
+
+private: // redefined virtual functions:
+ virtual
+ Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+};
+
+#endif
diff --git a/liveMedia/include/GenericMediaServer.hh b/liveMedia/include/GenericMediaServer.hh
new file mode 100644
index 0000000..5f51a76
--- /dev/null
+++ b/liveMedia/include/GenericMediaServer.hh
@@ -0,0 +1,195 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic media server class, used to implement a RTSP server, and any other server that uses
+// "ServerMediaSession" objects to describe media to be served.
+// C++ header
+
+#ifndef _GENERIC_MEDIA_SERVER_HH
+#define _GENERIC_MEDIA_SERVER_HH
+
+#ifndef _MEDIA_HH
+#include "Media.hh"
+#endif
+#ifndef _SERVER_MEDIA_SESSION_HH
+#include "ServerMediaSession.hh"
+#endif
+
+#ifndef REQUEST_BUFFER_SIZE
+#define REQUEST_BUFFER_SIZE 20000 // for incoming requests
+#endif
+#ifndef RESPONSE_BUFFER_SIZE
+#define RESPONSE_BUFFER_SIZE 20000
+#endif
+
+class GenericMediaServer: public Medium {
+public:
+ void addServerMediaSession(ServerMediaSession* serverMediaSession);
+
+ virtual ServerMediaSession*
+ lookupServerMediaSession(char const* streamName, Boolean isFirstLookupInSession = True);
+
+ void removeServerMediaSession(ServerMediaSession* serverMediaSession);
+ // Removes the "ServerMediaSession" object from our lookup table, so it will no longer be accessible by new clients.
+ // (However, any *existing* client sessions that use this "ServerMediaSession" object will continue streaming.
+ // The "ServerMediaSession" object will not get deleted until all of these client sessions have closed.)
+ // (To both delete the "ServerMediaSession" object *and* close all client sessions that use it,
+ // call "deleteServerMediaSession(serverMediaSession)" instead.)
+ virtual void removeServerMediaSession(char const* streamName);
+ // ditto
+
+ void closeAllClientSessionsForServerMediaSession(ServerMediaSession* serverMediaSession);
+ // Closes (from the server) all client sessions that are currently using this "ServerMediaSession" object.
+ // Note, however, that the "ServerMediaSession" object remains accessible by new clients.
+ virtual void closeAllClientSessionsForServerMediaSession(char const* streamName);
+ // ditto
+
+ void deleteServerMediaSession(ServerMediaSession* serverMediaSession);
+ // Equivalent to:
+ // "closeAllClientSessionsForServerMediaSession(serverMediaSession); removeServerMediaSession(serverMediaSession);"
+ virtual void deleteServerMediaSession(char const* streamName);
+ // Equivalent to:
+ // "closeAllClientSessionsForServerMediaSession(streamName); removeServerMediaSession(streamName);
+
+ unsigned numClientSessions() const { return fClientSessions->numEntries(); }
+
+protected:
+ GenericMediaServer(UsageEnvironment& env, int ourSocket, Port ourPort,
+ unsigned reclamationSeconds);
+ // If "reclamationSeconds" > 0, then the "ClientSession" state for each client will get
+ // reclaimed if no activity from the client is detected in at least "reclamationSeconds".
+ // we're an abstract base class
+ virtual ~GenericMediaServer();
+ void cleanup(); // MUST be called in the destructor of any subclass of us
+
+ static int setUpOurSocket(UsageEnvironment& env, Port& ourPort);
+
+ static void incomingConnectionHandler(void*, int /*mask*/);
+ void incomingConnectionHandler();
+ void incomingConnectionHandlerOnSocket(int serverSocket);
+
+public: // should be protected, but some old compilers complain otherwise
+ // The state of a TCP connection used by a client:
+ class ClientConnection {
+ protected:
+ ClientConnection(GenericMediaServer& ourServer, int clientSocket, struct sockaddr_in clientAddr);
+ virtual ~ClientConnection();
+
+ UsageEnvironment& envir() { return fOurServer.envir(); }
+ void closeSockets();
+
+ static void incomingRequestHandler(void*, int /*mask*/);
+ void incomingRequestHandler();
+ virtual void handleRequestBytes(int newBytesRead) = 0;
+ void resetRequestBuffer();
+
+ protected:
+ friend class GenericMediaServer;
+ friend class ClientSession;
+ friend class RTSPServer; // needed to make some broken Windows compilers work; remove this in the future when we end support for Windows
+ GenericMediaServer& fOurServer;
+ int fOurSocket;
+ struct sockaddr_in fClientAddr;
+ unsigned char fRequestBuffer[REQUEST_BUFFER_SIZE];
+ unsigned char fResponseBuffer[RESPONSE_BUFFER_SIZE];
+ unsigned fRequestBytesAlreadySeen, fRequestBufferBytesLeft;
+ };
+
+ // The state of an individual client session (using one or more sequential TCP connections) handled by a server:
+ class ClientSession {
+ protected:
+ ClientSession(GenericMediaServer& ourServer, u_int32_t sessionId);
+ virtual ~ClientSession();
+
+ UsageEnvironment& envir() { return fOurServer.envir(); }
+ void noteLiveness();
+ static void noteClientLiveness(ClientSession* clientSession);
+ static void livenessTimeoutTask(ClientSession* clientSession);
+
+ protected:
+ friend class GenericMediaServer;
+ friend class ClientConnection;
+ GenericMediaServer& fOurServer;
+ u_int32_t fOurSessionId;
+ ServerMediaSession* fOurServerMediaSession;
+ TaskToken fLivenessCheckTask;
+ };
+
+protected:
+ virtual ClientConnection* createNewClientConnection(int clientSocket, struct sockaddr_in clientAddr) = 0;
+ virtual ClientSession* createNewClientSession(u_int32_t sessionId) = 0;
+
+ ClientSession* createNewClientSessionWithId();
+ // Generates a new (unused) random session id, and calls the "createNewClientSession()"
+ // virtual function with this session id as parameter.
+
+ // Lookup a "ClientSession" object by sessionId (integer, and string):
+ ClientSession* lookupClientSession(u_int32_t sessionId);
+ ClientSession* lookupClientSession(char const* sessionIdStr);
+
+ // An iterator over our "ServerMediaSession" objects:
+ class ServerMediaSessionIterator {
+ public:
+ ServerMediaSessionIterator(GenericMediaServer& server);
+ virtual ~ServerMediaSessionIterator();
+ ServerMediaSession* next();
+ private:
+ HashTable::Iterator* fOurIterator;
+ };
+
+protected:
+ friend class ClientConnection;
+ friend class ClientSession;
+ friend class ServerMediaSessionIterator;
+ int fServerSocket;
+ Port fServerPort;
+ unsigned fReclamationSeconds;
+
+private:
+ HashTable* fServerMediaSessions; // maps 'stream name' strings to "ServerMediaSession" objects
+ HashTable* fClientConnections; // the "ClientConnection" objects that we're using
+ HashTable* fClientSessions; // maps 'session id' strings to "ClientSession" objects
+ u_int32_t fPreviousClientSessionId;
+};
+
+// A data structure used for optional user/password authentication:
+
+class UserAuthenticationDatabase {
+public:
+ UserAuthenticationDatabase(char const* realm = NULL,
+ Boolean passwordsAreMD5 = False);
+ // If "passwordsAreMD5" is True, then each password stored into, or removed from,
+ // the database is actually the value computed
+ // by md5(<username>:<realm>:<actual-password>)
+ virtual ~UserAuthenticationDatabase();
+
+ virtual void addUserRecord(char const* username, char const* password);
+ virtual void removeUserRecord(char const* username);
+
+ virtual char const* lookupPassword(char const* username);
+ // returns NULL if the user name was not present
+
+ char const* realm() { return fRealm; }
+ Boolean passwordsAreMD5() { return fPasswordsAreMD5; }
+
+protected:
+ HashTable* fTable;
+ char* fRealm;
+ Boolean fPasswordsAreMD5;
+};
+
+#endif
diff --git a/liveMedia/include/H261VideoRTPSource.hh b/liveMedia/include/H261VideoRTPSource.hh
new file mode 100644
index 0000000..6f1b62b
--- /dev/null
+++ b/liveMedia/include/H261VideoRTPSource.hh
@@ -0,0 +1,56 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.261 Video RTP Sources
+// C++ header
+
+#ifndef _H261_VIDEO_RTP_SOURCE_HH
+#define _H261_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class H261VideoRTPSource: public MultiFramedRTPSource {
+public:
+ static H261VideoRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat = 31,
+ unsigned rtpTimestampFrequency = 90000);
+
+ u_int32_t lastSpecialHeader() const {return fLastSpecialHeader;}
+
+protected:
+ virtual ~H261VideoRTPSource();
+
+private:
+ H261VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ u_int32_t fLastSpecialHeader;
+};
+
+#endif
diff --git a/liveMedia/include/H263plusVideoFileServerMediaSubsession.hh b/liveMedia/include/H263plusVideoFileServerMediaSubsession.hh
new file mode 100644
index 0000000..0ed5963
--- /dev/null
+++ b/liveMedia/include/H263plusVideoFileServerMediaSubsession.hh
@@ -0,0 +1,48 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a H.263 video file.
+// C++ header
+
+#ifndef _H263PLUS_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _H263PLUS_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+
+class H263plusVideoFileServerMediaSubsession: public FileServerMediaSubsession{
+public:
+ static H263plusVideoFileServerMediaSubsession*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);
+
+private:
+ H263plusVideoFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource);
+ // called only by createNew();
+ virtual ~H263plusVideoFileServerMediaSubsession();
+
+private: // redefined virtual functions
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+};
+
+#endif
diff --git a/liveMedia/include/H263plusVideoRTPSink.hh b/liveMedia/include/H263plusVideoRTPSink.hh
new file mode 100644
index 0000000..34e6be1
--- /dev/null
+++ b/liveMedia/include/H263plusVideoRTPSink.hh
@@ -0,0 +1,54 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for H.263+ video (RFC 4629)
+// C++ header
+
+#ifndef _H263_PLUS_VIDEO_RTP_SINK_HH
+#define _H263_PLUS_VIDEO_RTP_SINK_HH
+
+#ifndef _VIDEO_RTP_SINK_HH
+#include "VideoRTPSink.hh"
+#endif
+
+class H263plusVideoRTPSink: public VideoRTPSink {
+public:
+ static H263plusVideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency = 90000);
+
+protected:
+ H263plusVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency);
+ // called only by createNew()
+
+ virtual ~H263plusVideoRTPSink();
+
+private: // redefined virtual functions:
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual
+ Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ virtual unsigned specialHeaderSize() const;
+};
+
+#endif
diff --git a/liveMedia/include/H263plusVideoRTPSource.hh b/liveMedia/include/H263plusVideoRTPSource.hh
new file mode 100644
index 0000000..cb1b4d3
--- /dev/null
+++ b/liveMedia/include/H263plusVideoRTPSource.hh
@@ -0,0 +1,60 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.263+ Video RTP Sources
+// C++ header
+
+#ifndef _H263_PLUS_VIDEO_RTP_SOURCE_HH
+#define _H263_PLUS_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+#define SPECIAL_HEADER_BUFFER_SIZE 1000
+
+class H263plusVideoRTPSource: public MultiFramedRTPSource {
+public:
+ static H263plusVideoRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency = 90000);
+
+ // A data structure that stores copies of the special header bytes
+ // from the most recent frame's RTP packets:
+ unsigned char fNumSpecialHeaders;
+ unsigned fSpecialHeaderBytesLength;
+ unsigned char fSpecialHeaderBytes[SPECIAL_HEADER_BUFFER_SIZE];
+ unsigned fPacketSizes[256];
+
+protected:
+ virtual ~H263plusVideoRTPSource();
+
+private:
+ H263plusVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+};
+
+#endif
diff --git a/liveMedia/include/H263plusVideoStreamFramer.hh b/liveMedia/include/H263plusVideoStreamFramer.hh
new file mode 100644
index 0000000..171ff0b
--- /dev/null
+++ b/liveMedia/include/H263plusVideoStreamFramer.hh
@@ -0,0 +1,64 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an H263 video elementary stream into frames.
+// Author Benhard Feiten
+
+#ifndef _H263PLUS_VIDEO_STREAM_FRAMER_HH
+#define _H263PLUS_VIDEO_STREAM_FRAMER_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+
+class H263plusVideoStreamFramer: public FramedFilter {
+public:
+
+ static H263plusVideoStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource);
+
+ Boolean& pictureEndMarker() { return fPictureEndMarker; } // a hack for implementing the RTP 'M' bit
+
+protected:
+ // Constructor called only by createNew(), or by subclass constructors
+ H263plusVideoStreamFramer(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean createParser = True);
+ virtual ~H263plusVideoStreamFramer();
+
+
+public:
+ static void continueReadProcessing(void* clientData,
+ unsigned char* ptr, unsigned size,
+ struct timeval presentationTime);
+ void continueReadProcessing();
+
+private:
+ virtual void doGetNextFrame();
+ virtual Boolean isH263plusVideoStreamFramer() const;
+
+protected:
+ double fFrameRate;
+ unsigned fPictureCount; // hack used to implement doGetNextFrame() ??
+ Boolean fPictureEndMarker;
+
+private:
+ class H263plusVideoStreamParser* fParser;
+ struct timeval fPresentationTimeBase;
+};
+
+#endif
diff --git a/liveMedia/include/H264VideoFileServerMediaSubsession.hh b/liveMedia/include/H264VideoFileServerMediaSubsession.hh
new file mode 100644
index 0000000..ca1fd65
--- /dev/null
+++ b/liveMedia/include/H264VideoFileServerMediaSubsession.hh
@@ -0,0 +1,61 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a H264 Elementary Stream video file.
+// C++ header
+
+#ifndef _H264_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _H264_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+
+class H264VideoFileServerMediaSubsession: public FileServerMediaSubsession {
+public:
+ static H264VideoFileServerMediaSubsession*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);
+
+ // Used to implement "getAuxSDPLine()":
+ void checkForAuxSDPLine1();
+ void afterPlayingDummy1();
+
+protected:
+ H264VideoFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource);
+ // called only by createNew();
+ virtual ~H264VideoFileServerMediaSubsession();
+
+ void setDoneFlag() { fDoneFlag = ~0; }
+
+protected: // redefined virtual functions
+ virtual char const* getAuxSDPLine(RTPSink* rtpSink,
+ FramedSource* inputSource);
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+
+private:
+ char* fAuxSDPLine;
+ char fDoneFlag; // used when setting up "fAuxSDPLine"
+ RTPSink* fDummyRTPSink; // ditto
+};
+
+#endif
diff --git a/liveMedia/include/H264VideoFileSink.hh b/liveMedia/include/H264VideoFileSink.hh
new file mode 100644
index 0000000..881288a
--- /dev/null
+++ b/liveMedia/include/H264VideoFileSink.hh
@@ -0,0 +1,47 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.264 Video File Sinks
+// C++ header
+
+#ifndef _H264_VIDEO_FILE_SINK_HH
+#define _H264_VIDEO_FILE_SINK_HH
+
+#ifndef _H264_OR_5_VIDEO_FILE_SINK_HH
+#include "H264or5VideoFileSink.hh"
+#endif
+
+class H264VideoFileSink: public H264or5VideoFileSink {
+public:
+ static H264VideoFileSink* createNew(UsageEnvironment& env, char const* fileName,
+ char const* sPropParameterSetsStr = NULL,
+ // "sPropParameterSetsStr" is an optional 'SDP format' string
+ // (comma-separated Base64-encoded) representing SPS and/or PPS NAL-units
+ // to prepend to the output
+ unsigned bufferSize = 100000,
+ Boolean oneFilePerFrame = False);
+ // See "FileSink.hh" for a description of these parameters.
+
+protected:
+ H264VideoFileSink(UsageEnvironment& env, FILE* fid,
+ char const* sPropParameterSetsStr,
+ unsigned bufferSize, char const* perFrameFileNamePrefix);
+ // called only by createNew()
+ virtual ~H264VideoFileSink();
+};
+
+#endif
diff --git a/liveMedia/include/H264VideoRTPSink.hh b/liveMedia/include/H264VideoRTPSink.hh
new file mode 100644
index 0000000..6ed8cbb
--- /dev/null
+++ b/liveMedia/include/H264VideoRTPSink.hh
@@ -0,0 +1,59 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for H.264 video (RFC 3984)
+// C++ header
+
+#ifndef _H264_VIDEO_RTP_SINK_HH
+#define _H264_VIDEO_RTP_SINK_HH
+
+#ifndef _H264_OR_5_VIDEO_RTP_SINK_HH
+#include "H264or5VideoRTPSink.hh"
+#endif
+
+class H264VideoRTPSink: public H264or5VideoRTPSink {
+public:
+ static H264VideoRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat);
+ static H264VideoRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int8_t const* sps, unsigned spsSize, u_int8_t const* pps, unsigned ppsSize);
+ // an optional variant of "createNew()", useful if we know, in advance,
+ // the stream's SPS and PPS NAL units.
+ // This avoids us having to 'pre-read' from the input source in order to get these values.
+ static H264VideoRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ char const* sPropParameterSetsStr);
+ // an optional variant of "createNew()", useful if we know, in advance,
+ // the stream's SPS and PPS NAL units.
+ // This avoids us having to 'pre-read' from the input source in order to get these values.
+
+protected:
+ H264VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int8_t const* sps = NULL, unsigned spsSize = 0,
+ u_int8_t const* pps = NULL, unsigned ppsSize = 0);
+ // called only by createNew()
+ virtual ~H264VideoRTPSink();
+
+protected: // redefined virtual functions:
+ virtual char const* auxSDPLine();
+
+private: // redefined virtual functions:
+ virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
+};
+
+#endif
diff --git a/liveMedia/include/H264VideoRTPSource.hh b/liveMedia/include/H264VideoRTPSource.hh
new file mode 100644
index 0000000..22f9ce1
--- /dev/null
+++ b/liveMedia/include/H264VideoRTPSource.hh
@@ -0,0 +1,70 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.264 Video RTP Sources
+// C++ header
+
+#ifndef _H264_VIDEO_RTP_SOURCE_HH
+#define _H264_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class H264VideoRTPSource: public MultiFramedRTPSource {
+public:
+ static H264VideoRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency = 90000);
+
+protected:
+ H264VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+ virtual ~H264VideoRTPSource();
+
+protected:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ friend class H264BufferedPacket;
+ unsigned char fCurPacketNALUnitType;
+};
+
+class SPropRecord {
+public:
+ ~SPropRecord() { delete[] sPropBytes; }
+
+ unsigned sPropLength; // in bytes
+ unsigned char* sPropBytes;
+};
+
+SPropRecord* parseSPropParameterSets(char const* sPropParameterSetsStr,
+ // result parameter:
+ unsigned& numSPropRecords);
+ // Returns the binary value of each 'parameter set' specified in a
+ // "sprop-parameter-sets" string (in the SDP description for a H.264/RTP stream).
+ // The value is returned as an array (length "numSPropRecords") of "SPropRecord"s.
+ // This array is dynamically allocated by this routine, and must be delete[]d by the caller.
+
+#endif
diff --git a/liveMedia/include/H264VideoStreamDiscreteFramer.hh b/liveMedia/include/H264VideoStreamDiscreteFramer.hh
new file mode 100644
index 0000000..6895c25
--- /dev/null
+++ b/liveMedia/include/H264VideoStreamDiscreteFramer.hh
@@ -0,0 +1,48 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simplified version of "H264VideoStreamFramer" that takes only complete,
+// discrete frames (rather than an arbitrary byte stream) as input.
+// This avoids the parsing and data copying overhead of the full
+// "H264VideoStreamFramer".
+// C++ header
+
+#ifndef _H264_VIDEO_STREAM_DISCRETE_FRAMER_HH
+#define _H264_VIDEO_STREAM_DISCRETE_FRAMER_HH
+
+#ifndef _H264_OR_5_VIDEO_STREAM_DISCRETE_FRAMER_HH
+#include "H264or5VideoStreamDiscreteFramer.hh"
+#endif
+
+class H264VideoStreamDiscreteFramer: public H264or5VideoStreamDiscreteFramer {
+public:
+ static H264VideoStreamDiscreteFramer*
+ createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput = False, Boolean insertAccessUnitDelimiters = False);
+
+protected:
+ H264VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters);
+ // called only by createNew()
+ virtual ~H264VideoStreamDiscreteFramer();
+
+private:
+ // redefined virtual functions:
+ virtual Boolean isH264VideoStreamFramer() const;
+};
+
+#endif
diff --git a/liveMedia/include/H264VideoStreamFramer.hh b/liveMedia/include/H264VideoStreamFramer.hh
new file mode 100644
index 0000000..36273e2
--- /dev/null
+++ b/liveMedia/include/H264VideoStreamFramer.hh
@@ -0,0 +1,45 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up a H.264 Video Elementary Stream into NAL units.
+// C++ header
+
+#ifndef _H264_VIDEO_STREAM_FRAMER_HH
+#define _H264_VIDEO_STREAM_FRAMER_HH
+
+#ifndef _H264_OR_5_VIDEO_STREAM_FRAMER_HH
+#include "H264or5VideoStreamFramer.hh"
+#endif
+
+class H264VideoStreamFramer: public H264or5VideoStreamFramer {
+public:
+ static H264VideoStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput = False,
+ Boolean insertAccessUnitDelimiters = False);
+
+protected:
+ H264VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean createParser,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters);
+ // called only by "createNew()"
+ virtual ~H264VideoStreamFramer();
+
+ // redefined virtual functions:
+ virtual Boolean isH264VideoStreamFramer() const;
+};
+
+#endif
diff --git a/liveMedia/include/H264or5VideoFileSink.hh b/liveMedia/include/H264or5VideoFileSink.hh
new file mode 100644
index 0000000..e9dc860
--- /dev/null
+++ b/liveMedia/include/H264or5VideoFileSink.hh
@@ -0,0 +1,46 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.264 or H.265 Video File Sinks
+// C++ header
+
+#ifndef _H264_OR_5_VIDEO_FILE_SINK_HH
+#define _H264_OR_5_VIDEO_FILE_SINK_HH
+
+#ifndef _FILE_SINK_HH
+#include "FileSink.hh"
+#endif
+
+class H264or5VideoFileSink: public FileSink {
+protected:
+ H264or5VideoFileSink(UsageEnvironment& env, FILE* fid,
+ unsigned bufferSize, char const* perFrameFileNamePrefix,
+ char const* sPropParameterSetsStr1,
+ char const* sPropParameterSetsStr2 = NULL,
+ char const* sPropParameterSetsStr3 = NULL);
+ // we're an abstract base class
+ virtual ~H264or5VideoFileSink();
+
+protected: // redefined virtual functions:
+ virtual void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes, struct timeval presentationTime);
+
+private:
+ char const* fSPropParameterSetsStr[3];
+ Boolean fHaveWrittenFirstFrame;
+};
+
+#endif
diff --git a/liveMedia/include/H264or5VideoRTPSink.hh b/liveMedia/include/H264or5VideoRTPSink.hh
new file mode 100644
index 0000000..3a979ef
--- /dev/null
+++ b/liveMedia/include/H264or5VideoRTPSink.hh
@@ -0,0 +1,60 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for H.264 or H.265 video
+// C++ header
+
+#ifndef _H264_OR_5_VIDEO_RTP_SINK_HH
+#define _H264_OR_5_VIDEO_RTP_SINK_HH
+
+#ifndef _VIDEO_RTP_SINK_HH
+#include "VideoRTPSink.hh"
+#endif
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+class H264or5VideoRTPSink: public VideoRTPSink {
+protected:
+ H264or5VideoRTPSink(int hNumber, // 264 or 265
+ UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int8_t const* vps = NULL, unsigned vpsSize = 0,
+ u_int8_t const* sps = NULL, unsigned spsSize = 0,
+ u_int8_t const* pps = NULL, unsigned ppsSize = 0);
+ // we're an abstrace base class
+ virtual ~H264or5VideoRTPSink();
+
+private: // redefined virtual functions:
+ virtual Boolean continuePlaying();
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+
+protected:
+ int fHNumber;
+ FramedFilter* fOurFragmenter;
+ char* fFmtpSDPLine;
+ u_int8_t* fVPS; unsigned fVPSSize;
+ u_int8_t* fSPS; unsigned fSPSSize;
+ u_int8_t* fPPS; unsigned fPPSSize;
+};
+
+#endif
diff --git a/liveMedia/include/H264or5VideoStreamDiscreteFramer.hh b/liveMedia/include/H264or5VideoStreamDiscreteFramer.hh
new file mode 100644
index 0000000..444fea4
--- /dev/null
+++ b/liveMedia/include/H264or5VideoStreamDiscreteFramer.hh
@@ -0,0 +1,56 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simplified version of "H264or5VideoStreamFramer" that takes only complete,
+// discrete frames (rather than an arbitrary byte stream) as input.
+// This avoids the parsing and data copying overhead of the full
+// "H264or5VideoStreamFramer".
+// C++ header
+
+#ifndef _H264_OR_5_VIDEO_STREAM_DISCRETE_FRAMER_HH
+#define _H264_OR_5_VIDEO_STREAM_DISCRETE_FRAMER_HH
+
+#ifndef _H264_OR_5_VIDEO_STREAM_FRAMER_HH
+#include "H264or5VideoStreamFramer.hh"
+#endif
+
+class H264or5VideoStreamDiscreteFramer: public H264or5VideoStreamFramer {
+protected:
+ H264or5VideoStreamDiscreteFramer(int hNumber, UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput,
+ Boolean insertAccessUnitDelimiters);
+ // we're an abstract base class
+ virtual ~H264or5VideoStreamDiscreteFramer();
+
+protected:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+protected:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+ virtual Boolean nalUnitEndsAccessUnit(u_int8_t nal_unit_type);
+};
+
+#endif
diff --git a/liveMedia/include/H264or5VideoStreamFramer.hh b/liveMedia/include/H264or5VideoStreamFramer.hh
new file mode 100644
index 0000000..5813f84
--- /dev/null
+++ b/liveMedia/include/H264or5VideoStreamFramer.hh
@@ -0,0 +1,92 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up a H.264 or H.265 Video Elementary Stream into NAL units.
+// C++ header
+
+#ifndef _H264_OR_5_VIDEO_STREAM_FRAMER_HH
+#define _H264_OR_5_VIDEO_STREAM_FRAMER_HH
+
+#ifndef _MPEG_VIDEO_STREAM_FRAMER_HH
+#include "MPEGVideoStreamFramer.hh"
+#endif
+
+class H264or5VideoStreamFramer: public MPEGVideoStreamFramer {
+public:
+ void getVPSandSPSandPPS(u_int8_t*& vps, unsigned& vpsSize,
+ u_int8_t*& sps, unsigned& spsSize,
+ u_int8_t*& pps, unsigned& ppsSize) const {
+ // Returns pointers to copies of the most recently seen VPS (video parameter set)
+ // SPS (sequence parameter set) and PPS (picture parameter set) NAL units.
+ // (NULL pointers are returned if the NAL units have not yet been seen.)
+ vps = fLastSeenVPS; vpsSize = fLastSeenVPSSize;
+ sps = fLastSeenSPS; spsSize = fLastSeenSPSSize;
+ pps = fLastSeenPPS; ppsSize = fLastSeenPPSSize;
+ }
+
+ void setVPSandSPSandPPS(u_int8_t* vps, unsigned vpsSize,
+ u_int8_t* sps, unsigned spsSize,
+ u_int8_t* pps, unsigned ppsSize) {
+ // Assigns copies of the VPS, SPS and PPS NAL units. If this function is not called,
+ // then these NAL units are assigned only if/when they appear in the input stream.
+ saveCopyOfVPS(vps, vpsSize);
+ saveCopyOfSPS(sps, spsSize);
+ saveCopyOfPPS(pps, ppsSize);
+ }
+
+protected:
+ H264or5VideoStreamFramer(int hNumber, // 264 or 265
+ UsageEnvironment& env, FramedSource* inputSource,
+ Boolean createParser,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters);
+ // We're an abstract base class.
+ virtual ~H264or5VideoStreamFramer();
+
+ void saveCopyOfVPS(u_int8_t* from, unsigned size);
+ void saveCopyOfSPS(u_int8_t* from, unsigned size);
+ void saveCopyOfPPS(u_int8_t* from, unsigned size);
+
+ void setPresentationTime();
+
+ Boolean isVPS(u_int8_t nal_unit_type);
+ Boolean isSPS(u_int8_t nal_unit_type);
+ Boolean isPPS(u_int8_t nal_unit_type);
+ Boolean isVCL(u_int8_t nal_unit_type);
+
+protected: // redefined virtual functions
+ virtual void doGetNextFrame();
+
+protected:
+ int fHNumber;
+ Boolean fIncludeStartCodeInOutput, fInsertAccessUnitDelimiters;
+ u_int8_t* fLastSeenVPS;
+ unsigned fLastSeenVPSSize;
+ u_int8_t* fLastSeenSPS;
+ unsigned fLastSeenSPSSize;
+ u_int8_t* fLastSeenPPS;
+ unsigned fLastSeenPPSSize;
+ struct timeval fNextPresentationTime; // the presentation time to be used for the next NAL unit to be parsed/delivered after this
+ friend class H264or5VideoStreamParser; // hack
+};
+
+// A general routine for making a copy of a (H.264 or H.265) NAL unit,
+// removing 'emulation' bytes from the copy:
+unsigned removeH264or5EmulationBytes(u_int8_t* to, unsigned toMaxSize,
+ u_int8_t const* from, unsigned fromSize);
+ // returns the size of the copy; it will be <= min(toMaxSize,fromSize)
+
+#endif
diff --git a/liveMedia/include/H265VideoFileServerMediaSubsession.hh b/liveMedia/include/H265VideoFileServerMediaSubsession.hh
new file mode 100644
index 0000000..388258c
--- /dev/null
+++ b/liveMedia/include/H265VideoFileServerMediaSubsession.hh
@@ -0,0 +1,61 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a H265 Elementary Stream video file.
+// C++ header
+
+#ifndef _H265_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _H265_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+
+class H265VideoFileServerMediaSubsession: public FileServerMediaSubsession {
+public:
+ static H265VideoFileServerMediaSubsession*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);
+
+ // Used to implement "getAuxSDPLine()":
+ void checkForAuxSDPLine1();
+ void afterPlayingDummy1();
+
+protected:
+ H265VideoFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource);
+ // called only by createNew();
+ virtual ~H265VideoFileServerMediaSubsession();
+
+ void setDoneFlag() { fDoneFlag = ~0; }
+
+protected: // redefined virtual functions
+ virtual char const* getAuxSDPLine(RTPSink* rtpSink,
+ FramedSource* inputSource);
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+
+private:
+ char* fAuxSDPLine;
+ char fDoneFlag; // used when setting up "fAuxSDPLine"
+ RTPSink* fDummyRTPSink; // ditto
+};
+
+#endif
diff --git a/liveMedia/include/H265VideoFileSink.hh b/liveMedia/include/H265VideoFileSink.hh
new file mode 100644
index 0000000..1316d7d
--- /dev/null
+++ b/liveMedia/include/H265VideoFileSink.hh
@@ -0,0 +1,51 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.265 Video File Sinks
+// C++ header
+
+#ifndef _H265_VIDEO_FILE_SINK_HH
+#define _H265_VIDEO_FILE_SINK_HH
+
+#ifndef _H264_OR_5_VIDEO_FILE_SINK_HH
+#include "H264or5VideoFileSink.hh"
+#endif
+
+class H265VideoFileSink: public H264or5VideoFileSink {
+public:
+ static H265VideoFileSink* createNew(UsageEnvironment& env, char const* fileName,
+ char const* sPropVPSStr = NULL,
+ char const* sPropSPSStr = NULL,
+ char const* sPropPPSStr = NULL,
+ // The "sProp*Str" parameters are optional 'SDP format' strings
+ // (comma-separated Base64-encoded) representing VPS, SPS, and/or PPS NAL-units
+ // to prepend to the output
+ unsigned bufferSize = 100000,
+ Boolean oneFilePerFrame = False);
+ // See "FileSink.hh" for a description of these parameters.
+
+protected:
+ H265VideoFileSink(UsageEnvironment& env, FILE* fid,
+ char const* sPropVPSStr,
+ char const* sPropSPSStr,
+ char const* sPropPPSStr,
+ unsigned bufferSize, char const* perFrameFileNamePrefix);
+ // called only by createNew()
+ virtual ~H265VideoFileSink();
+};
+
+#endif
diff --git a/liveMedia/include/H265VideoRTPSink.hh b/liveMedia/include/H265VideoRTPSink.hh
new file mode 100644
index 0000000..ad01160
--- /dev/null
+++ b/liveMedia/include/H265VideoRTPSink.hh
@@ -0,0 +1,62 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for H.265 video
+// C++ header
+
+#ifndef _H265_VIDEO_RTP_SINK_HH
+#define _H265_VIDEO_RTP_SINK_HH
+
+#ifndef _H264_OR_5_VIDEO_RTP_SINK_HH
+#include "H264or5VideoRTPSink.hh"
+#endif
+
+class H265VideoRTPSink: public H264or5VideoRTPSink {
+public:
+ static H265VideoRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat);
+ static H265VideoRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int8_t const* vps, unsigned vpsSize,
+ u_int8_t const* sps, unsigned spsSize,
+ u_int8_t const* pps, unsigned ppsSize);
+ // an optional variant of "createNew()", useful if we know, in advance,
+ // the stream's VPS, SPS and PPS NAL units.
+ // This avoids us having to 'pre-read' from the input source in order to get these values.
+ static H265VideoRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ char const* sPropVPSStr, char const* sPropSPSStr, char const* sPropPPSStr);
+ // an optional variant of "createNew()", useful if we know, in advance,
+ // the stream's VPS, SPS and PPS NAL units.
+ // This avoids us having to 'pre-read' from the input source in order to get these values.
+
+protected:
+ H265VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int8_t const* vps = NULL, unsigned vpsSize = 0,
+ u_int8_t const* sps = NULL, unsigned spsSize = 0,
+ u_int8_t const* pps = NULL, unsigned ppsSize = 0);
+ // called only by createNew()
+ virtual ~H265VideoRTPSink();
+
+protected: // redefined virtual functions:
+ virtual char const* auxSDPLine();
+
+private: // redefined virtual functions:
+ virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
+};
+
+#endif
diff --git a/liveMedia/include/H265VideoRTPSource.hh b/liveMedia/include/H265VideoRTPSource.hh
new file mode 100644
index 0000000..3131e7b
--- /dev/null
+++ b/liveMedia/include/H265VideoRTPSource.hh
@@ -0,0 +1,67 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// H.265 Video RTP Sources
+// C++ header
+
+#ifndef _H265_VIDEO_RTP_SOURCE_HH
+#define _H265_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class H265VideoRTPSource: public MultiFramedRTPSource {
+public:
+ static H265VideoRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ Boolean expectDONFields = False,
+ unsigned rtpTimestampFrequency = 90000);
+ // "expectDONFields" is True iff we expect incoming H.265/RTP packets to contain
+ // DONL and DOND fields. I.e., if "tx-mode == "MST" or sprop-depack-buf-nalus > 0".
+
+ u_int64_t currentNALUnitAbsDon() const { return fCurrentNALUnitAbsDon; }
+ // the 'absolute decoding order number (AbsDon)' for the most-recently delivered NAL unit
+
+protected:
+ H265VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ Boolean expectDONFields,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+ virtual ~H265VideoRTPSource();
+
+protected:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ void computeAbsDonFromDON(u_int16_t DON);
+
+private:
+ friend class H265BufferedPacket;
+ Boolean fExpectDONFields;
+ unsigned char fCurPacketNALUnitType;
+ u_int16_t fPreviousNALUnitDON;
+ u_int64_t fCurrentNALUnitAbsDon;
+};
+
+#endif
diff --git a/liveMedia/include/H265VideoStreamDiscreteFramer.hh b/liveMedia/include/H265VideoStreamDiscreteFramer.hh
new file mode 100644
index 0000000..b621cc9
--- /dev/null
+++ b/liveMedia/include/H265VideoStreamDiscreteFramer.hh
@@ -0,0 +1,48 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simplified version of "H265VideoStreamFramer" that takes only complete,
+// discrete frames (rather than an arbitrary byte stream) as input.
+// This avoids the parsing and data copying overhead of the full
+// "H265VideoStreamFramer".
+// C++ header
+
+#ifndef _H265_VIDEO_STREAM_DISCRETE_FRAMER_HH
+#define _H265_VIDEO_STREAM_DISCRETE_FRAMER_HH
+
+#ifndef _H264_OR_5_VIDEO_STREAM_DISCRETE_FRAMER_HH
+#include "H264or5VideoStreamDiscreteFramer.hh"
+#endif
+
+class H265VideoStreamDiscreteFramer: public H264or5VideoStreamDiscreteFramer {
+public:
+ static H265VideoStreamDiscreteFramer*
+ createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput = False, Boolean insertAccessUnitDelimiters = False);
+
+protected:
+ H265VideoStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters);
+ // called only by createNew()
+ virtual ~H265VideoStreamDiscreteFramer();
+
+private:
+ // redefined virtual functions:
+ virtual Boolean isH265VideoStreamFramer() const;
+};
+
+#endif
diff --git a/liveMedia/include/H265VideoStreamFramer.hh b/liveMedia/include/H265VideoStreamFramer.hh
new file mode 100644
index 0000000..122f390
--- /dev/null
+++ b/liveMedia/include/H265VideoStreamFramer.hh
@@ -0,0 +1,45 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up a H.265 Video Elementary Stream into NAL units.
+// C++ header
+
+#ifndef _H265_VIDEO_STREAM_FRAMER_HH
+#define _H265_VIDEO_STREAM_FRAMER_HH
+
+#ifndef _H264_OR_5_VIDEO_STREAM_FRAMER_HH
+#include "H264or5VideoStreamFramer.hh"
+#endif
+
+class H265VideoStreamFramer: public H264or5VideoStreamFramer {
+public:
+ static H265VideoStreamFramer* createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean includeStartCodeInOutput = False,
+ Boolean insertAccessUnitDelimiters = False);
+
+protected:
+ H265VideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean createParser,
+ Boolean includeStartCodeInOutput, Boolean insertAccessUnitDelimiters);
+ // called only by "createNew()"
+ virtual ~H265VideoStreamFramer();
+
+ // redefined virtual functions:
+ virtual Boolean isH265VideoStreamFramer() const;
+};
+
+#endif
diff --git a/liveMedia/include/HLSSegmenter.hh b/liveMedia/include/HLSSegmenter.hh
new file mode 100644
index 0000000..a6fa2bc
--- /dev/null
+++ b/liveMedia/include/HLSSegmenter.hh
@@ -0,0 +1,76 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A media sink that takes - as input - a MPEG Transport Stream, and outputs a series
+// of MPEG Transport Stream files, each representing a segment of the input stream,
+// suitable for HLS (Apple's "HTTP Live Streaming").
+// C++ header
+
+#ifndef _HLS_SEGMENTER_HH
+#define _HLS_SEGMENTER_HH
+
+#ifndef _MEDIA_SINK_HH
+#include "MediaSink.hh"
+#endif
+
+class HLSSegmenter: public MediaSink {
+public:
+ typedef void (onEndOfSegmentFunc)(void* clientData,
+ char const* segmentFileName, double segmentDuration);
+ static HLSSegmenter* createNew(UsageEnvironment& env,
+ unsigned segmentationDuration, char const* fileNamePrefix,
+ onEndOfSegmentFunc* onEndOfSegmentFunc = NULL,
+ void* onEndOfSegmentClientData = NULL);
+
+private:
+ HLSSegmenter(UsageEnvironment& env, unsigned segmentationDuration, char const* fileNamePrefix,
+ onEndOfSegmentFunc* onEndOfSegmentFunc, void* onEndOfSegmentClientData);
+ // called only by createNew()
+ virtual ~HLSSegmenter();
+
+ static void ourEndOfSegmentHandler(void* clientData, double segmentDuration);
+ void ourEndOfSegmentHandler(double segmentDuration);
+
+ Boolean openNextOutputSegment();
+
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ virtual void afterGettingFrame(unsigned frameSize,
+ unsigned numTruncatedBytes);
+
+ static void ourOnSourceClosure(void* clientData);
+ void ourOnSourceClosure();
+
+private: // redefined virtual functions:
+ virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
+ virtual Boolean continuePlaying();
+
+private:
+ unsigned fSegmentationDuration;
+ char const* fFileNamePrefix;
+ onEndOfSegmentFunc* fOnEndOfSegmentFunc;
+ void* fOnEndOfSegmentClientData;
+ Boolean fHaveConfiguredUpstreamSource;
+ unsigned fCurrentSegmentCounter;
+ char* fOutputSegmentFileName;
+ FILE* fOutFid;
+ unsigned char* fOutputFileBuffer;
+};
+
+#endif
diff --git a/liveMedia/include/HMAC_SHA1.hh b/liveMedia/include/HMAC_SHA1.hh
new file mode 100644
index 0000000..0b10dd7
--- /dev/null
+++ b/liveMedia/include/HMAC_SHA1.hh
@@ -0,0 +1,19 @@
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// This code may not be copied or used in any form without permission from Live Networks, Inc.
+//
+// A function for computing the HMAC_SHA1 digest
+// Definition
+
+#ifndef _HMAC_SHA1_HH
+#define _HMAC_SHA1_HH
+
+#ifndef NO_OPENSSL
+#ifndef _HMAC_HASH_HH
+#include "HMAC_hash.hh"
+#endif
+
+#define SHA1_DIGEST_LEN 20
+
+HMAC_hash HMAC_SHA1;
+#endif
+#endif
diff --git a/liveMedia/include/HMAC_hash.hh b/liveMedia/include/HMAC_hash.hh
new file mode 100644
index 0000000..5cef9c3
--- /dev/null
+++ b/liveMedia/include/HMAC_hash.hh
@@ -0,0 +1,22 @@
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// This code may not be copied or used in any form without permission from Live Networks, Inc.
+//
+// Generic HMA_HASH functions
+// Definition
+
+#ifndef _HMAC_HASH_HH
+#define _HMAC_HASH_HH
+
+#ifndef _NET_COMMON_H
+#include "NetCommon.h"
+#endif
+
+// All HMAC hash functions have the following signature:
+typedef void HMAC_hash(u_int8_t const* key, unsigned keyLength,
+ u_int8_t const* text, unsigned textLength,
+ u_int8_t* resultDigest);
+ // "resultDigest" must point to an array of sufficient size to hold the digest
+
+#define HMAC_BLOCK_SIZE 64
+
+#endif
diff --git a/liveMedia/include/InputFile.hh b/liveMedia/include/InputFile.hh
new file mode 100644
index 0000000..176c5ed
--- /dev/null
+++ b/liveMedia/include/InputFile.hh
@@ -0,0 +1,67 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Common routines for opening/closing named input files
+// C++ header
+
+#ifndef _INPUT_FILE_HH
+#define _INPUT_FILE_HH
+
+#include <UsageEnvironment.hh>
+#include <stdio.h>
+
+#if (defined(__WIN32__) || defined(_WIN32) || defined(_WIN32_WCE))
+#ifndef _WIN32_WCE
+// Include header files that might be needed by Windows (in code that uses this header file):
+#include <io.h>
+#include <fcntl.h>
+#endif
+
+#define READ_FROM_FILES_SYNCHRONOUSLY 1
+ // Because Windows is a silly toy operating system that doesn't (reliably) treat
+ // open files as being readable sockets (which can be handled within the default
+ // "BasicTaskScheduler" event loop, using "select()"), we implement file reading
+ // in Windows using synchronous, rather than asynchronous, I/O. This can severely
+ // limit the scalability of servers using this code that run on Windows.
+ // If this is a problem for you, then either use a better operating system,
+ // or else write your own Windows-specific event loop ("TaskScheduler" subclass)
+ // that can handle readable data in Windows open files as an event.
+#endif
+
+#ifndef _WIN32_WCE
+#include <sys/stat.h>
+#endif
+
+FILE* OpenInputFile(UsageEnvironment& env, char const* fileName);
+
+void CloseInputFile(FILE* fid);
+
+#undef GetFileSize // because some platforms already define this as a macro
+u_int64_t GetFileSize(char const* fileName, FILE* fid);
+ // 0 means zero-length, unbounded, or unknown
+
+int64_t SeekFile64(FILE *fid, int64_t offset, int whence);
+ // A platform-independent routine for seeking within (possibly) large files
+
+int64_t TellFile64(FILE *fid);
+ // A platform-independent routine for reporting the position within
+ // (possibly) large files
+
+Boolean FileIsSeekable(FILE *fid);
+ // Tests whether "fid" is seekable, by trying to seek within it.
+
+#endif
diff --git a/liveMedia/include/JPEG2000VideoRTPSink.hh b/liveMedia/include/JPEG2000VideoRTPSink.hh
new file mode 100644
index 0000000..c726fd3
--- /dev/null
+++ b/liveMedia/include/JPEG2000VideoRTPSink.hh
@@ -0,0 +1,46 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+
+
+#ifndef _JPEG2000_VIDEO_RTP_SINK_HH
+#define _JPEG2000_VIDEO_RTP_SINK_HH
+
+#ifndef _VIDEO_RTP_SINK_HH
+#include "VideoRTPSink.hh"
+#endif
+
+class JPEG2000VideoRTPSink: public VideoRTPSink {
+public:
+ static JPEG2000VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs);
+
+protected:
+ JPEG2000VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs);
+ // called only by createNew()
+
+ virtual ~JPEG2000VideoRTPSink();
+
+private: // redefined virtual functions:
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual unsigned specialHeaderSize() const;
+};
+
+#endif
diff --git a/liveMedia/include/JPEG2000VideoRTPSource.hh b/liveMedia/include/JPEG2000VideoRTPSource.hh
new file mode 100644
index 0000000..ca243d2
--- /dev/null
+++ b/liveMedia/include/JPEG2000VideoRTPSource.hh
@@ -0,0 +1,53 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+
+#ifndef _JPEG2000_VIDEO_RTP_SOURCE_HH
+#define _JPEG2000_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class JPEG2000VideoRTPSource: public MultiFramedRTPSource {
+public:
+ static JPEG2000VideoRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* sampling);
+
+protected:
+ virtual ~JPEG2000VideoRTPSource();
+
+protected:
+ JPEG2000VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* sampling);
+ // called only by createNew()
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ char* fSampling;
+};
+
+#endif
diff --git a/liveMedia/include/JPEGVideoRTPSink.hh b/liveMedia/include/JPEGVideoRTPSink.hh
new file mode 100644
index 0000000..9464be4
--- /dev/null
+++ b/liveMedia/include/JPEGVideoRTPSink.hh
@@ -0,0 +1,52 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for JPEG video (RFC 2435)
+// C++ header
+
+#ifndef _JPEG_VIDEO_RTP_SINK_HH
+#define _JPEG_VIDEO_RTP_SINK_HH
+
+#ifndef _VIDEO_RTP_SINK_HH
+#include "VideoRTPSink.hh"
+#endif
+
+class JPEGVideoRTPSink: public VideoRTPSink {
+public:
+ static JPEGVideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs);
+
+protected:
+ JPEGVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs);
+ // called only by createNew()
+
+ virtual ~JPEGVideoRTPSink();
+
+private: // redefined virtual functions:
+ virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
+
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual
+ Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ virtual unsigned specialHeaderSize() const;
+};
+
+#endif
diff --git a/liveMedia/include/JPEGVideoRTPSource.hh b/liveMedia/include/JPEGVideoRTPSource.hh
new file mode 100644
index 0000000..d6a0fcb
--- /dev/null
+++ b/liveMedia/include/JPEGVideoRTPSource.hh
@@ -0,0 +1,59 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// JPEG Video (RFC 2435) RTP Sources
+// C++ header
+
+#ifndef _JPEG_VIDEO_RTP_SOURCE_HH
+#define _JPEG_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+#define MAX_JPEG_HEADER_SIZE 1024
+
+class JPEGVideoRTPSource: public MultiFramedRTPSource {
+public:
+ static JPEGVideoRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat = 26,
+ unsigned rtpPayloadFrequency = 90000,
+ unsigned defaultWidth = 0, unsigned defaultHeight = 0);
+
+protected:
+ virtual ~JPEGVideoRTPSource();
+
+private:
+ JPEGVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ unsigned defaultWidth, unsigned defaultHeight);
+ // called only by createNew()
+
+ // Image dimensions from the SDP description, if any
+ unsigned fDefaultWidth, fDefaultHeight;
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+
+ virtual char const* MIMEtype() const;
+};
+
+#endif
diff --git a/liveMedia/include/JPEGVideoSource.hh b/liveMedia/include/JPEGVideoSource.hh
new file mode 100644
index 0000000..7a7084d
--- /dev/null
+++ b/liveMedia/include/JPEGVideoSource.hh
@@ -0,0 +1,55 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// JPEG video sources
+// C++ header
+
+#ifndef _JPEG_VIDEO_SOURCE_HH
+#define _JPEG_VIDEO_SOURCE_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class JPEGVideoSource: public FramedSource {
+public:
+ virtual u_int8_t type() = 0;
+ virtual u_int8_t qFactor() = 0;
+ virtual u_int8_t width() = 0; // # pixels/8 (or 0 for 2048 pixels)
+ virtual u_int8_t height() = 0; // # pixels/8 (or 0 for 2048 pixels)
+
+ virtual u_int8_t const* quantizationTables(u_int8_t& precision,
+ u_int16_t& length);
+ // If "qFactor()" returns a value >= 128, then this function is called
+ // to tell us the quantization tables that are being used.
+ // (The default implementation of this function just returns NULL.)
+ // "precision" and "length" are as defined in RFC 2435, section 3.1.8.
+
+ virtual u_int16_t restartInterval();
+ // If restart intervals are being used (i.e., 64 <= type() <= 127), then this function must be
+ // redefined - by a subclass - to return a non-zero value.
+
+protected:
+ JPEGVideoSource(UsageEnvironment& env); // abstract base class
+ virtual ~JPEGVideoSource();
+
+private:
+ // redefined virtual functions:
+ virtual Boolean isJPEGVideoSource() const;
+};
+
+#endif
diff --git a/liveMedia/include/Locale.hh b/liveMedia/include/Locale.hh
new file mode 100644
index 0000000..a3ab3aa
--- /dev/null
+++ b/liveMedia/include/Locale.hh
@@ -0,0 +1,75 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Support for temporarily setting the locale (e.g., to "C" or "POSIX") for (e.g.) parsing or printing
+// floating-point numbers in protocol headers, or calling toupper()/tolower() on human-input strings.
+// C++ header
+
+#ifndef _LOCALE_HH
+#define _LOCALE_HH
+
+// If you're on a system that (for whatever reason) doesn't have either the "setlocale()" or the "newlocale()" function, then
+// add "-DLOCALE_NOT_USED" to your "config.*" file.
+
+// If you're on a system that (for whatever reason) has "setlocale()" but not "newlocale()", then
+// add "-DNEWLOCALE_NOT_USED" to your "config.*" file.
+// (Note that -DLOCALE_NOT_USED implies -DNEWLOCALE_NOT_USED; you do not need both.)
+// Also, for Windows systems, we define "NEWLOCALE_NOT_USED" by default, because at least some Windows systems
+// (or their development environments) don't have "newlocale()". If, however, your Windows system *does* have "newlocale()",
+// then you can override this by defining "NEWLOCALE_USED" before #including this file.
+
+// Finally, some old development environments need a header file "xlocale.h" to use "newlocale()".
+// Should you need this header file, add "-DNEED_XLOCALE_H" to your "config.*" file.
+
+#ifdef NEWLOCALE_USED
+#undef LOCALE_NOT_USED
+#undef NEWLOCALE_NOT_USED
+#else
+#if defined(__WIN32__) || defined(_WIN32)
+#define NEWLOCALE_NOT_USED 1
+#endif
+#endif
+
+#ifndef LOCALE_NOT_USED
+#include <locale.h>
+#ifndef NEWLOCALE_NOT_USED
+#ifdef NEED_XLOCALE_H
+#include <xlocale.h>
+#endif
+#endif
+#endif
+
+
+enum LocaleCategory { All, Numeric }; // define and implement more categories later, as needed
+
+class Locale {
+public:
+ Locale(char const* newLocale, LocaleCategory category = All);
+ virtual ~Locale();
+
+private:
+#ifndef LOCALE_NOT_USED
+#ifndef NEWLOCALE_NOT_USED
+ locale_t fLocale, fPrevLocale;
+#else
+ int fCategoryNum;
+ char* fPrevLocale;
+#endif
+#endif
+};
+
+#endif
diff --git a/liveMedia/include/MIKEY.hh b/liveMedia/include/MIKEY.hh
new file mode 100644
index 0000000..7eca0c6
--- /dev/null
+++ b/liveMedia/include/MIKEY.hh
@@ -0,0 +1,77 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A data structure that implements a MIKEY message (RFC 3830)
+// C++ header
+
+#ifndef _MIKEY_HH
+#define _MIKEY_HH
+
+#ifndef _NET_COMMON_H
+#include "NetCommon.h"
+#endif
+#ifndef _BOOLEAN_HH
+#include "Boolean.hh"
+#endif
+
+class MIKEYState {
+public:
+ MIKEYState(); // initialize with default parameters
+ virtual ~MIKEYState();
+
+ static MIKEYState* createNew(u_int8_t* messageToParse, unsigned messageSize);
+ // (Attempts to) parse a binary MIKEY message, returning a new "MIKEYState" if successful
+ // (or NULL if unsuccessful).
+ // ("messageToParse" is assumed to have been dynamically allocated;
+ // this function will delete[] it.)
+
+ u_int8_t* generateMessage(unsigned& messageSize) const;
+ // Returns a binary message representing the current MIKEY state, of size "messageSize" bytes.
+ // This array is dynamically allocated by this routine, and must be delete[]d by the caller.
+
+ // Accessors for the encryption/authentication parameters:
+ Boolean encryptSRTP() const { return fEncryptSRTP; }
+ Boolean encryptSRTCP() const { return fEncryptSRTCP; }
+ u_int8_t const* keyData() const { return fKeyData; }
+ u_int32_t MKI() const { return fMKI; }
+ Boolean useAuthentication() const { return fUseAuthentication; }
+
+private:
+ MIKEYState(u_int8_t const* messageToParse, unsigned messageSize, Boolean& parsedOK);
+ // called only by "createNew()"
+
+ void addNewPayload(class MIKEYPayload* newPayload);
+ Boolean parseHDRPayload(u_int8_t const*& ptr, u_int8_t const* endPtr, u_int8_t& nextPayloadType);
+ Boolean parseNonHDRPayload(u_int8_t const*& ptr, u_int8_t const* endPtr, u_int8_t& nextPayloadType);
+
+private:
+ // Encryption/authentication parameters, either set by default
+ // (if the first (parameterless) constructor is used), or set by parsing an input message
+ // (if the second constructor is used):
+ Boolean fEncryptSRTP;
+ Boolean fEncryptSRTCP;
+ u_int8_t fKeyData[16+14]; // encryption key + salt
+ u_int32_t fMKI; // used only if encryption is used. (We assume a MKI length of 4.)
+ Boolean fUseAuthentication;
+
+ // Our internal binary representation of the MIKEY payloads:
+ class MIKEYPayload* fHeaderPayload;
+ class MIKEYPayload* fTailPayload;
+ unsigned fTotalPayloadByteCount;
+};
+
+#endif
diff --git a/liveMedia/include/MP3ADU.hh b/liveMedia/include/MP3ADU.hh
new file mode 100644
index 0000000..8f55b17
--- /dev/null
+++ b/liveMedia/include/MP3ADU.hh
@@ -0,0 +1,94 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// 'ADU' MP3 streams (for improved loss-tolerance)
+// C++ header
+
+#ifndef _MP3_ADU_HH
+#define _MP3_ADU_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+class ADUFromMP3Source: public FramedFilter {
+public:
+ static ADUFromMP3Source* createNew(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean includeADUdescriptors = True);
+
+ void resetInput();
+ // This is called whenever there's a discontinuity in the input MP3 source
+ // (e.g., due to seeking within the source). It causes any still-unprocessed
+ // MP3 frame data within our queue to be discarded, so that it does not
+ // erroneously get used by backpointers from the new MP3 frames.
+
+ Boolean setScaleFactor(int scale);
+
+protected:
+ ADUFromMP3Source(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean includeADUdescriptors);
+ // called only by createNew()
+ virtual ~ADUFromMP3Source();
+
+private:
+ // Redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual char const* MIMEtype() const;
+
+private:
+ Boolean doGetNextFrame1();
+
+private:
+ Boolean fAreEnqueueingMP3Frame;
+ class SegmentQueue* fSegments;
+ Boolean fIncludeADUdescriptors;
+ unsigned fTotalDataSizeBeforePreviousRead;
+ int fScale;
+ unsigned fFrameCounter;
+};
+
+class MP3FromADUSource: public FramedFilter {
+public:
+ static MP3FromADUSource* createNew(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean includeADUdescriptors = True);
+
+protected:
+ MP3FromADUSource(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean includeADUdescriptors);
+ // called only by createNew()
+ virtual ~MP3FromADUSource();
+
+private:
+ // Redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual char const* MIMEtype() const;
+
+private:
+ Boolean needToGetAnADU();
+ void insertDummyADUsIfNecessary();
+ Boolean generateFrameFromHeadADU();
+
+private:
+ Boolean fAreEnqueueingADU;
+ class SegmentQueue* fSegments;
+};
+
+#endif
diff --git a/liveMedia/include/MP3ADURTPSink.hh b/liveMedia/include/MP3ADURTPSink.hh
new file mode 100644
index 0000000..d3faede
--- /dev/null
+++ b/liveMedia/include/MP3ADURTPSink.hh
@@ -0,0 +1,55 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for 'ADUized' MP3 frames ("mpa-robust")
+// C++ header
+
+#ifndef _MP3_ADU_RTP_SINK_HH
+#define _MP3_ADU_RTP_SINK_HH
+
+#ifndef _AUDIO_RTP_SINK_HH
+#include "AudioRTPSink.hh"
+#endif
+
+class MP3ADURTPSink: public AudioRTPSink {
+public:
+ static MP3ADURTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char RTPPayloadType);
+
+protected:
+ virtual ~MP3ADURTPSink();
+
+private:
+ MP3ADURTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char RTPPayloadType);
+ // called only by createNew()
+
+
+private:
+ // Redefined virtual functions:
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual unsigned specialHeaderSize() const;
+
+private:
+ unsigned fCurADUSize; // used when fragmenting over multiple RTP packets
+};
+
+#endif
diff --git a/liveMedia/include/MP3ADURTPSource.hh b/liveMedia/include/MP3ADURTPSource.hh
new file mode 100644
index 0000000..c5b5125
--- /dev/null
+++ b/liveMedia/include/MP3ADURTPSource.hh
@@ -0,0 +1,49 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP source for 'ADUized' MP3 frames ("mpa-robust")
+// C++ header
+
+#ifndef _MP3_ADU_SOURCE_HH
+#define _MP3_ADU_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class MP3ADURTPSource: public MultiFramedRTPSource {
+public:
+ static MP3ADURTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency = 90000);
+
+protected:
+ virtual ~MP3ADURTPSource();
+
+private:
+ MP3ADURTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+private:
+ // redefined virtual functions:
+ virtual char const* MIMEtype() const;
+};
+
+#endif
diff --git a/liveMedia/include/MP3ADUTranscoder.hh b/liveMedia/include/MP3ADUTranscoder.hh
new file mode 100644
index 0000000..775ceef
--- /dev/null
+++ b/liveMedia/include/MP3ADUTranscoder.hh
@@ -0,0 +1,64 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Transcoder for ADUized MP3 frames
+// C++ header
+
+#ifndef _MP3_ADU_TRANSCODER_HH
+#define _MP3_ADU_TRANSCODER_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+class MP3ADUTranscoder: public FramedFilter {
+public:
+ static MP3ADUTranscoder* createNew(UsageEnvironment& env,
+ unsigned outBitrate /* in kbps */,
+ FramedSource* inputSource);
+
+ unsigned outBitrate() const { return fOutBitrate; }
+protected:
+ MP3ADUTranscoder(UsageEnvironment& env,
+ unsigned outBitrate /* in kbps */,
+ FramedSource* inputSource);
+ // called only by createNew()
+ virtual ~MP3ADUTranscoder();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void getAttributes() const;
+
+private:
+ static void afterGettingFrame(void* clientData,
+ unsigned numBytesRead, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned numBytesRead, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+private:
+ unsigned fOutBitrate; // in kbps
+ unsigned fAvailableBytesForBackpointer;
+
+ unsigned char* fOrigADU;
+ // used to store incoming ADU prior to transcoding
+};
+
+#endif
diff --git a/liveMedia/include/MP3ADUinterleaving.hh b/liveMedia/include/MP3ADUinterleaving.hh
new file mode 100644
index 0000000..6ab1f4b
--- /dev/null
+++ b/liveMedia/include/MP3ADUinterleaving.hh
@@ -0,0 +1,129 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Interleaving of MP3 ADUs
+// C++ header
+
+#ifndef _MP3_ADU_INTERLEAVING_HH
+#define _MP3_ADU_INTERLEAVING_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+// A data structure used to represent an interleaving
+#define MAX_CYCLE_SIZE 256
+class Interleaving {
+public:
+ Interleaving(unsigned cycleSize, unsigned char const* cycleArray);
+ virtual ~Interleaving();
+
+ unsigned cycleSize() const {return fCycleSize;}
+ unsigned char lookupInverseCycle(unsigned char index) const {
+ return fInverseCycle[index];
+ }
+
+private:
+ unsigned fCycleSize;
+ unsigned char fInverseCycle[MAX_CYCLE_SIZE];
+};
+
+// This class is used only as a base for the following two:
+
+class MP3ADUinterleaverBase: public FramedFilter {
+protected:
+ MP3ADUinterleaverBase(UsageEnvironment& env,
+ FramedSource* inputSource);
+ // abstract base class
+ virtual ~MP3ADUinterleaverBase();
+
+ static FramedSource* getInputSource(UsageEnvironment& env,
+ char const* inputSourceName);
+ static void afterGettingFrame(void* clientData,
+ unsigned numBytesRead,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ virtual void afterGettingFrame(unsigned numBytesRead,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) = 0;
+};
+
+// This class is used to convert an ADU sequence from non-interleaved
+// to interleaved form:
+
+class MP3ADUinterleaver: public MP3ADUinterleaverBase {
+public:
+ static MP3ADUinterleaver* createNew(UsageEnvironment& env,
+ Interleaving const& interleaving,
+ FramedSource* inputSource);
+
+protected:
+ MP3ADUinterleaver(UsageEnvironment& env,
+ Interleaving const& interleaving,
+ FramedSource* inputSource);
+ // called only by createNew()
+ virtual ~MP3ADUinterleaver();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void afterGettingFrame(unsigned numBytesRead,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+private:
+ void releaseOutgoingFrame();
+
+private:
+ Interleaving const fInterleaving;
+ class InterleavingFrames* fFrames;
+ unsigned char fPositionOfNextIncomingFrame;
+ unsigned fII, fICC;
+};
+
+// This class is used to convert an ADU sequence from interleaved
+// to non-interleaved form:
+
+class MP3ADUdeinterleaver: public MP3ADUinterleaverBase {
+public:
+ static MP3ADUdeinterleaver* createNew(UsageEnvironment& env,
+ FramedSource* inputSource);
+
+protected:
+ MP3ADUdeinterleaver(UsageEnvironment& env,
+ FramedSource* inputSource);
+ // called only by createNew()
+ virtual ~MP3ADUdeinterleaver();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void afterGettingFrame(unsigned numBytesRead,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+private:
+ void releaseOutgoingFrame();
+
+private:
+ class DeinterleavingFrames* fFrames;
+ unsigned fIIlastSeen, fICClastSeen;
+};
+
+#endif
+
diff --git a/liveMedia/include/MP3AudioFileServerMediaSubsession.hh b/liveMedia/include/MP3AudioFileServerMediaSubsession.hh
new file mode 100644
index 0000000..384428f
--- /dev/null
+++ b/liveMedia/include/MP3AudioFileServerMediaSubsession.hh
@@ -0,0 +1,73 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an MP3 audio file.
+// (Actually, any MPEG-1 or MPEG-2 audio file should work.)
+// C++ header
+
+#ifndef _MP3_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _MP3_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+#ifndef _MP3_ADU_INTERLEAVING_HH
+#include "MP3ADUinterleaving.hh"
+#endif
+#ifndef _MP3_ADU_HH
+#include "MP3ADU.hh"
+#endif
+
+class MP3AudioFileServerMediaSubsession: public FileServerMediaSubsession{
+public:
+ static MP3AudioFileServerMediaSubsession*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource,
+ Boolean generateADUs, Interleaving* interleaving);
+ // Note: "interleaving" is used only if "generateADUs" is True,
+ // (and a value of NULL means 'no interleaving')
+
+protected:
+ MP3AudioFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource,
+ Boolean generateADUs,
+ Interleaving* interleaving);
+ // called only by createNew();
+ virtual ~MP3AudioFileServerMediaSubsession();
+
+ FramedSource* createNewStreamSourceCommon(FramedSource* baseMP3Source, unsigned mp3NumBytes, unsigned& estBitrate);
+ void getBaseStreams(FramedSource* frontStream,
+ FramedSource*& sourceMP3Stream, ADUFromMP3Source*& aduStream/*if any*/);
+
+protected: // redefined virtual functions
+ virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes);
+ virtual void setStreamSourceScale(FramedSource* inputSource, float scale);
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+ virtual void testScaleFactor(float& scale);
+ virtual float duration() const;
+
+protected:
+ Boolean fGenerateADUs;
+ Interleaving* fInterleaving;
+ float fFileDuration;
+};
+
+#endif
diff --git a/liveMedia/include/MP3FileSource.hh b/liveMedia/include/MP3FileSource.hh
new file mode 100644
index 0000000..991d6c1
--- /dev/null
+++ b/liveMedia/include/MP3FileSource.hh
@@ -0,0 +1,69 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MP3 File Sources
+// C++ header
+
+#ifndef _MP3_FILE_SOURCE_HH
+#define _MP3_FILE_SOURCE_HH
+
+#ifndef _FRAMED_FILE_SOURCE_HH
+#include "FramedFileSource.hh"
+#endif
+
+class MP3StreamState; // forward
+
+class MP3FileSource: public FramedFileSource {
+public:
+ static MP3FileSource* createNew(UsageEnvironment& env, char const* fileName);
+
+ float filePlayTime() const;
+ unsigned fileSize() const;
+ void setPresentationTimeScale(unsigned scale);
+ void seekWithinFile(double seekNPT, double streamDuration);
+ // if "streamDuration" is >0.0, then we limit the stream to that duration, before treating it as EOF
+
+protected:
+ MP3FileSource(UsageEnvironment& env, FILE* fid);
+ // called only by createNew()
+
+ virtual ~MP3FileSource();
+
+protected:
+ void assignStream(FILE* fid, unsigned filesize);
+ Boolean initializeStream();
+
+ MP3StreamState* streamState() {return fStreamState;}
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual char const* MIMEtype() const;
+ virtual void getAttributes() const;
+
+private:
+ virtual Boolean doGetNextFrame1();
+
+private:
+ MP3StreamState* fStreamState;
+ Boolean fHaveJustInitialized;
+ struct timeval fFirstFramePresentationTime; // set on stream init
+ Boolean fLimitNumBytesToStream;
+ unsigned fNumBytesToStream; // used iff "fLimitNumBytesToStream" is True
+};
+
+#endif
diff --git a/liveMedia/include/MP3Transcoder.hh b/liveMedia/include/MP3Transcoder.hh
new file mode 100644
index 0000000..223f4f3
--- /dev/null
+++ b/liveMedia/include/MP3Transcoder.hh
@@ -0,0 +1,44 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MP3 Transcoder
+// C++ header
+
+#ifndef _MP3_TRANSCODER_HH
+#define _MP3_TRANSCODER_HH
+
+#ifndef _MP3_ADU_HH
+#include "MP3ADU.hh"
+#endif
+#ifndef _MP3_ADU_TRANSCODER_HH
+#include "MP3ADUTranscoder.hh"
+#endif
+
+class MP3Transcoder: public MP3FromADUSource {
+public:
+ static MP3Transcoder* createNew(UsageEnvironment& env,
+ unsigned outBitrate /* in kbps */,
+ FramedSource* inputSource);
+
+protected:
+ MP3Transcoder(UsageEnvironment& env,
+ MP3ADUTranscoder* aduTranscoder);
+ // called only by createNew()
+ virtual ~MP3Transcoder();
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2AudioRTPSink.hh b/liveMedia/include/MPEG1or2AudioRTPSink.hh
new file mode 100644
index 0000000..231a8fe
--- /dev/null
+++ b/liveMedia/include/MPEG1or2AudioRTPSink.hh
@@ -0,0 +1,48 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for MPEG audio (RFC 2250)
+// C++ header
+
+#ifndef _MPEG_1OR2_AUDIO_RTP_SINK_HH
+#define _MPEG_1OR2_AUDIO_RTP_SINK_HH
+
+#ifndef _AUDIO_RTP_SINK_HH
+#include "AudioRTPSink.hh"
+#endif
+
+class MPEG1or2AudioRTPSink: public AudioRTPSink {
+public:
+ static MPEG1or2AudioRTPSink* createNew(UsageEnvironment& env,
+ Groupsock* RTPgs);
+
+protected:
+ MPEG1or2AudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs);
+ // called only by createNew()
+
+ virtual ~MPEG1or2AudioRTPSink();
+
+private: // redefined virtual functions:
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual unsigned specialHeaderSize() const;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2AudioRTPSource.hh b/liveMedia/include/MPEG1or2AudioRTPSource.hh
new file mode 100644
index 0000000..3e863ce
--- /dev/null
+++ b/liveMedia/include/MPEG1or2AudioRTPSource.hh
@@ -0,0 +1,51 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MPEG-1 or MPEG-2 Audio RTP Sources
+// C++ header
+
+#ifndef _MPEG_1OR2_AUDIO_RTP_SOURCE_HH
+#define _MPEG_1OR2_AUDIO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class MPEG1or2AudioRTPSource: public MultiFramedRTPSource {
+public:
+ static MPEG1or2AudioRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat = 14,
+ unsigned rtpTimestampFrequency = 90000);
+
+protected:
+ virtual ~MPEG1or2AudioRTPSource();
+
+private:
+ MPEG1or2AudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2AudioStreamFramer.hh b/liveMedia/include/MPEG1or2AudioStreamFramer.hh
new file mode 100644
index 0000000..8cb23a2
--- /dev/null
+++ b/liveMedia/include/MPEG1or2AudioStreamFramer.hh
@@ -0,0 +1,70 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an MPEG (1,2) audio elementary stream into frames
+// C++ header
+
+#ifndef _MPEG_1OR2_AUDIO_STREAM_FRAMER_HH
+#define _MPEG_1OR2_AUDIO_STREAM_FRAMER_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+class MPEG1or2AudioStreamFramer: public FramedFilter {
+public:
+ static MPEG1or2AudioStreamFramer*
+ createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean syncWithInputSource = False);
+ // If "syncWithInputSource" is True, the stream's presentation time
+ // will be reset to that of the input source, whenever new data
+ // is read from it.
+
+ void flushInput(); // called if there is a discontinuity (seeking) in the input
+
+private:
+ MPEG1or2AudioStreamFramer(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean syncWithInputSource);
+ // called only by createNew()
+ virtual ~MPEG1or2AudioStreamFramer();
+
+ static void continueReadProcessing(void* clientData,
+ unsigned char* ptr, unsigned size,
+ struct timeval presentationTime);
+ void continueReadProcessing();
+
+ void resetPresentationTime(struct timeval newPresentationTime);
+ // useful if we're being synced with a separate (e.g., video) stream
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ void reset();
+ struct timeval currentFramePlayTime() const;
+
+private:
+ Boolean fSyncWithInputSource;
+ struct timeval fNextFramePresentationTime;
+
+private: // parsing state
+ class MPEG1or2AudioStreamParser* fParser;
+ friend class MPEG1or2AudioStreamParser; // hack
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2Demux.hh b/liveMedia/include/MPEG1or2Demux.hh
new file mode 100644
index 0000000..deb3643
--- /dev/null
+++ b/liveMedia/include/MPEG1or2Demux.hh
@@ -0,0 +1,150 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Demultiplexer for a MPEG 1 or 2 Program Stream
+// C++ header
+
+#ifndef _MPEG_1OR2_DEMUX_HH
+#define _MPEG_1OR2_DEMUX_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class MPEG1or2DemuxedElementaryStream; // forward
+
+class MPEG1or2Demux: public Medium {
+public:
+ static MPEG1or2Demux* createNew(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean reclaimWhenLastESDies = False);
+ // If "reclaimWhenLastESDies" is True, the the demux is deleted when
+ // all "MPEG1or2DemuxedElementaryStream"s that we created get deleted.
+
+ MPEG1or2DemuxedElementaryStream* newElementaryStream(u_int8_t streamIdTag);
+
+ // Specialized versions of the above for audio and video:
+ MPEG1or2DemuxedElementaryStream* newAudioStream();
+ MPEG1or2DemuxedElementaryStream* newVideoStream();
+
+ // A hack for getting raw, undemuxed PES packets from the Program Stream:
+ MPEG1or2DemuxedElementaryStream* newRawPESStream();
+
+ void getNextFrame(u_int8_t streamIdTag,
+ unsigned char* to, unsigned maxSize,
+ FramedSource::afterGettingFunc* afterGettingFunc,
+ void* afterGettingClientData,
+ FramedSource::onCloseFunc* onCloseFunc,
+ void* onCloseClientData);
+ // similar to FramedSource::getNextFrame(), except that it also
+ // takes a stream id tag as parameter.
+
+ void stopGettingFrames(u_int8_t streamIdTag);
+ // similar to FramedSource::stopGettingFrames(), except that it also
+ // takes a stream id tag as parameter.
+
+ static void handleClosure(void* clientData);
+ // This should be called (on ourself) if the source is discovered
+ // to be closed (i.e., no longer readable)
+
+ FramedSource* inputSource() const { return fInputSource; }
+
+ class SCR {
+ public:
+ SCR();
+
+ u_int8_t highBit;
+ u_int32_t remainingBits;
+ u_int16_t extension;
+
+ Boolean isValid;
+ };
+ SCR& lastSeenSCR() { return fLastSeenSCR; }
+
+ unsigned char mpegVersion() const { return fMPEGversion; }
+
+ void flushInput(); // should be called before any 'seek' on the underlying source
+
+private:
+ MPEG1or2Demux(UsageEnvironment& env,
+ FramedSource* inputSource, Boolean reclaimWhenLastESDies);
+ // called only by createNew()
+ virtual ~MPEG1or2Demux();
+
+ void registerReadInterest(u_int8_t streamIdTag,
+ unsigned char* to, unsigned maxSize,
+ FramedSource::afterGettingFunc* afterGettingFunc,
+ void* afterGettingClientData,
+ FramedSource::onCloseFunc* onCloseFunc,
+ void* onCloseClientData);
+
+ Boolean useSavedData(u_int8_t streamIdTag,
+ unsigned char* to, unsigned maxSize,
+ FramedSource::afterGettingFunc* afterGettingFunc,
+ void* afterGettingClientData);
+
+ static void continueReadProcessing(void* clientData,
+ unsigned char* ptr, unsigned size,
+ struct timeval presentationTime);
+ void continueReadProcessing();
+
+private:
+ friend class MPEG1or2DemuxedElementaryStream;
+ void noteElementaryStreamDeletion(MPEG1or2DemuxedElementaryStream* es);
+
+private:
+ FramedSource* fInputSource;
+ SCR fLastSeenSCR;
+ unsigned char fMPEGversion;
+
+ unsigned char fNextAudioStreamNumber;
+ unsigned char fNextVideoStreamNumber;
+ Boolean fReclaimWhenLastESDies;
+ unsigned fNumOutstandingESs;
+
+ // A descriptor for each possible stream id tag:
+ typedef struct OutputDescriptor {
+ // input parameters
+ unsigned char* to; unsigned maxSize;
+ FramedSource::afterGettingFunc* fAfterGettingFunc;
+ void* afterGettingClientData;
+ FramedSource::onCloseFunc* fOnCloseFunc;
+ void* onCloseClientData;
+
+ // output parameters
+ unsigned frameSize; struct timeval presentationTime;
+ class SavedData; // forward
+ SavedData* savedDataHead;
+ SavedData* savedDataTail;
+ unsigned savedDataTotalSize;
+
+ // status parameters
+ Boolean isPotentiallyReadable;
+ Boolean isCurrentlyActive;
+ Boolean isCurrentlyAwaitingData;
+ } OutputDescriptor_t;
+ OutputDescriptor_t fOutput[256];
+
+ unsigned fNumPendingReads;
+ Boolean fHaveUndeliveredData;
+
+private: // parsing state
+ class MPEGProgramStreamParser* fParser;
+ friend class MPEGProgramStreamParser; // hack
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2DemuxedElementaryStream.hh b/liveMedia/include/MPEG1or2DemuxedElementaryStream.hh
new file mode 100644
index 0000000..9458049
--- /dev/null
+++ b/liveMedia/include/MPEG1or2DemuxedElementaryStream.hh
@@ -0,0 +1,69 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A MPEG 1 or 2 Elementary Stream, demultiplexed from a Program Stream
+// C++ header
+
+#ifndef _MPEG_1OR2_DEMUXED_ELEMENTARY_STREAM_HH
+#define _MPEG_1OR2_DEMUXED_ELEMENTARY_STREAM_HH
+
+#ifndef _MPEG_1OR2_DEMUX_HH
+#include "MPEG1or2Demux.hh"
+#endif
+
+class MPEG1or2DemuxedElementaryStream: public FramedSource {
+public:
+ MPEG1or2Demux::SCR lastSeenSCR() const { return fLastSeenSCR; }
+
+ unsigned char mpegVersion() const { return fMPEGversion; }
+
+ MPEG1or2Demux& sourceDemux() const { return fOurSourceDemux; }
+
+private: // We are created only by a MPEG1or2Demux (a friend)
+ MPEG1or2DemuxedElementaryStream(UsageEnvironment& env,
+ u_int8_t streamIdTag,
+ MPEG1or2Demux& sourceDemux);
+ virtual ~MPEG1or2DemuxedElementaryStream();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+ virtual char const* MIMEtype() const;
+ virtual unsigned maxFrameSize() const;
+
+private:
+ static void afterGettingFrame(void* clientData,
+ unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+ void afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+private:
+ u_int8_t fOurStreamIdTag;
+ MPEG1or2Demux& fOurSourceDemux;
+ char const* fMIMEtype;
+ MPEG1or2Demux::SCR fLastSeenSCR;
+ unsigned char fMPEGversion;
+
+ friend class MPEG1or2Demux;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2DemuxedServerMediaSubsession.hh b/liveMedia/include/MPEG1or2DemuxedServerMediaSubsession.hh
new file mode 100644
index 0000000..0ac18a1
--- /dev/null
+++ b/liveMedia/include/MPEG1or2DemuxedServerMediaSubsession.hh
@@ -0,0 +1,63 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a MPEG-1 or 2 demuxer.
+// C++ header
+
+#ifndef _MPEG_1OR2_DEMUXED_SERVER_MEDIA_SUBSESSION_HH
+#define _MPEG_1OR2_DEMUXED_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _ON_DEMAND_SERVER_MEDIA_SUBSESSION_HH
+#include "OnDemandServerMediaSubsession.hh"
+#endif
+#ifndef _MPEG_1OR2_FILE_SERVER_DEMUX_HH
+#include "MPEG1or2FileServerDemux.hh"
+#endif
+
+class MPEG1or2DemuxedServerMediaSubsession: public OnDemandServerMediaSubsession{
+public:
+ static MPEG1or2DemuxedServerMediaSubsession*
+ createNew(MPEG1or2FileServerDemux& demux, u_int8_t streamIdTag,
+ Boolean reuseFirstSource,
+ Boolean iFramesOnly = False, double vshPeriod = 5.0);
+ // The last two parameters are relevant for video streams only
+
+private:
+ MPEG1or2DemuxedServerMediaSubsession(MPEG1or2FileServerDemux& demux,
+ u_int8_t streamIdTag, Boolean reuseFirstSource,
+ Boolean iFramesOnly, double vshPeriod);
+ // called only by createNew();
+ virtual ~MPEG1or2DemuxedServerMediaSubsession();
+
+private: // redefined virtual functions
+ virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes);
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+ virtual float duration() const;
+
+private:
+ MPEG1or2FileServerDemux& fOurDemux;
+ u_int8_t fStreamIdTag;
+ Boolean fIFramesOnly; // for video streams
+ double fVSHPeriod; // for video streams
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2FileServerDemux.hh b/liveMedia/include/MPEG1or2FileServerDemux.hh
new file mode 100644
index 0000000..9728abe
--- /dev/null
+++ b/liveMedia/include/MPEG1or2FileServerDemux.hh
@@ -0,0 +1,67 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A server demultiplexer for a MPEG 1 or 2 Program Stream
+// C++ header
+
+#ifndef _MPEG_1OR2_FILE_SERVER_DEMUX_HH
+#define _MPEG_1OR2_FILE_SERVER_DEMUX_HH
+
+#ifndef _SERVER_MEDIA_SESSION_HH
+#include "ServerMediaSession.hh"
+#endif
+#ifndef _MPEG_1OR2_DEMUXED_ELEMENTARY_STREAM_HH
+#include "MPEG1or2DemuxedElementaryStream.hh"
+#endif
+
+class MPEG1or2FileServerDemux: public Medium {
+public:
+ static MPEG1or2FileServerDemux*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);
+
+ ServerMediaSubsession* newAudioServerMediaSubsession(); // MPEG-1 or 2 audio
+ ServerMediaSubsession* newVideoServerMediaSubsession(Boolean iFramesOnly = False,
+ double vshPeriod = 5.0
+ /* how often (in seconds) to inject a Video_Sequence_Header,
+ if one doesn't already appear in the stream */);
+ ServerMediaSubsession* newAC3AudioServerMediaSubsession(); // AC-3 audio (from VOB)
+
+ unsigned fileSize() const { return fFileSize; }
+ float fileDuration() const { return fFileDuration; }
+
+private:
+ MPEG1or2FileServerDemux(UsageEnvironment& env, char const* fileName,
+ Boolean reuseFirstSource);
+ // called only by createNew();
+ virtual ~MPEG1or2FileServerDemux();
+
+private:
+ friend class MPEG1or2DemuxedServerMediaSubsession;
+ MPEG1or2DemuxedElementaryStream* newElementaryStream(unsigned clientSessionId,
+ u_int8_t streamIdTag);
+
+private:
+ char const* fFileName;
+ unsigned fFileSize;
+ float fFileDuration;
+ Boolean fReuseFirstSource;
+ MPEG1or2Demux* fSession0Demux;
+ MPEG1or2Demux* fLastCreatedDemux;
+ unsigned fLastClientSessionId;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2VideoFileServerMediaSubsession.hh b/liveMedia/include/MPEG1or2VideoFileServerMediaSubsession.hh
new file mode 100644
index 0000000..1e178c6
--- /dev/null
+++ b/liveMedia/include/MPEG1or2VideoFileServerMediaSubsession.hh
@@ -0,0 +1,59 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a MPEG-1 or 2 Elementary Stream video file.
+// C++ header
+
+#ifndef _MPEG_1OR2_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _MPEG_1OR2_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+
+class MPEG1or2VideoFileServerMediaSubsession: public FileServerMediaSubsession{
+public:
+ static MPEG1or2VideoFileServerMediaSubsession*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource,
+ Boolean iFramesOnly = False,
+ double vshPeriod = 5.0
+ /* how often (in seconds) to inject a Video_Sequence_Header,
+ if one doesn't already appear in the stream */);
+
+private:
+ MPEG1or2VideoFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName,
+ Boolean reuseFirstSource,
+ Boolean iFramesOnly,
+ double vshPeriod);
+ // called only by createNew();
+ virtual ~MPEG1or2VideoFileServerMediaSubsession();
+
+private: // redefined virtual functions
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+
+private:
+ Boolean fIFramesOnly;
+ double fVSHPeriod;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2VideoRTPSink.hh b/liveMedia/include/MPEG1or2VideoRTPSink.hh
new file mode 100644
index 0000000..e80f32c
--- /dev/null
+++ b/liveMedia/include/MPEG1or2VideoRTPSink.hh
@@ -0,0 +1,69 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for MPEG video (RFC 2250)
+// C++ header
+
+#ifndef _MPEG_1OR2_VIDEO_RTP_SINK_HH
+#define _MPEG_1OR2_VIDEO_RTP_SINK_HH
+
+#ifndef _VIDEO_RTP_SINK_HH
+#include "VideoRTPSink.hh"
+#endif
+
+class MPEG1or2VideoRTPSink: public VideoRTPSink {
+public:
+ static MPEG1or2VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs);
+
+protected:
+ MPEG1or2VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs);
+ // called only by createNew()
+
+ virtual ~MPEG1or2VideoRTPSink();
+
+private: // redefined virtual functions:
+ virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
+
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual Boolean allowFragmentationAfterStart() const;
+ virtual
+ Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ virtual unsigned specialHeaderSize() const;
+
+private:
+ // MPEG video-specific state, used to decide how to fill out the
+ // video-specific header, and when to include multiple 'frames' in a
+ // single outgoing RTP packet. Eventually we should somehow get this
+ // state from the source (MPEG1or2VideoStreamFramer) instead, as the source
+ // already has this info itself.
+ struct {
+ unsigned temporal_reference;
+ unsigned char picture_coding_type;
+ unsigned char vector_code_bits; // FBV,BFC,FFV,FFC from RFC 2250, sec. 3.4
+ } fPictureState;
+ Boolean fPreviousFrameWasSlice;
+ // used to implement frameCanAppearAfterPacketStart()
+ Boolean fSequenceHeaderPresent;
+ Boolean fPacketBeginsSlice, fPacketEndsSlice;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2VideoRTPSource.hh b/liveMedia/include/MPEG1or2VideoRTPSource.hh
new file mode 100644
index 0000000..b05800c
--- /dev/null
+++ b/liveMedia/include/MPEG1or2VideoRTPSource.hh
@@ -0,0 +1,53 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MPEG-1 or MPEG-2 Video RTP Sources
+// C++ header
+
+#ifndef _MPEG_1OR2_VIDEO_RTP_SOURCE_HH
+#define _MPEG_1OR2_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class MPEG1or2VideoRTPSource: public MultiFramedRTPSource {
+public:
+ static MPEG1or2VideoRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat = 32,
+ unsigned rtpPayloadFrequency = 90000);
+
+protected:
+ virtual ~MPEG1or2VideoRTPSource();
+
+private:
+ MPEG1or2VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual Boolean packetIsUsableInJitterCalculation(unsigned char* packet,
+ unsigned packetSize);
+ virtual char const* MIMEtype() const;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2VideoStreamDiscreteFramer.hh b/liveMedia/include/MPEG1or2VideoStreamDiscreteFramer.hh
new file mode 100644
index 0000000..abb556c
--- /dev/null
+++ b/liveMedia/include/MPEG1or2VideoStreamDiscreteFramer.hh
@@ -0,0 +1,76 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simplified version of "MPEG1or2VideoStreamFramer" that takes only
+// complete, discrete frames (rather than an arbitrary byte stream) as input.
+// This avoids the parsing and data copying overhead of the full
+// "MPEG1or2VideoStreamFramer".
+// C++ header
+
+#ifndef _MPEG1or2_VIDEO_STREAM_DISCRETE_FRAMER_HH
+#define _MPEG1or2_VIDEO_STREAM_DISCRETE_FRAMER_HH
+
+#ifndef _MPEG1or2_VIDEO_STREAM_FRAMER_HH
+#include "MPEG1or2VideoStreamFramer.hh"
+#endif
+
+#define VSH_MAX_SIZE 1000
+
+class MPEG1or2VideoStreamDiscreteFramer: public MPEG1or2VideoStreamFramer {
+public:
+ static MPEG1or2VideoStreamDiscreteFramer*
+ createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean iFramesOnly = False, // see MPEG1or2VideoStreamFramer.hh
+ double vshPeriod = 5.0, // see MPEG1or2VideoStreamFramer.hh
+ Boolean leavePresentationTimesUnmodified = False);
+
+protected:
+ MPEG1or2VideoStreamDiscreteFramer(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean iFramesOnly, double vshPeriod, Boolean leavePresentationTimesUnmodified);
+ // called only by createNew()
+ virtual ~MPEG1or2VideoStreamDiscreteFramer();
+
+protected:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+protected:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+protected:
+ Boolean fLeavePresentationTimesUnmodified;
+ struct timeval fLastNonBFramePresentationTime;
+ unsigned fLastNonBFrameTemporal_reference;
+
+ // A saved copy of the most recently seen 'video_sequence_header',
+ // in case we need to insert it into the stream periodically:
+ unsigned char fSavedVSHBuffer[VSH_MAX_SIZE];
+ unsigned fSavedVSHSize;
+ double fSavedVSHTimestamp;
+ Boolean fIFramesOnly;
+ double fVSHPeriod;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG1or2VideoStreamFramer.hh b/liveMedia/include/MPEG1or2VideoStreamFramer.hh
new file mode 100644
index 0000000..81a59ef
--- /dev/null
+++ b/liveMedia/include/MPEG1or2VideoStreamFramer.hh
@@ -0,0 +1,56 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an MPEG 1 or 2 video elementary stream into
+// frames for: Video_Sequence_Header, GOP_Header, Picture_Header
+// C++ header
+
+#ifndef _MPEG_1OR2_VIDEO_STREAM_FRAMER_HH
+#define _MPEG_1OR2_VIDEO_STREAM_FRAMER_HH
+
+#ifndef _MPEG_VIDEO_STREAM_FRAMER_HH
+#include "MPEGVideoStreamFramer.hh"
+#endif
+
+class MPEG1or2VideoStreamFramer: public MPEGVideoStreamFramer {
+public:
+ static MPEG1or2VideoStreamFramer*
+ createNew(UsageEnvironment& env, FramedSource* inputSource,
+ Boolean iFramesOnly = False,
+ double vshPeriod = 5.0
+ /* how often (in seconds) to inject a Video_Sequence_Header,
+ if one doesn't already appear in the stream */);
+
+protected:
+ MPEG1or2VideoStreamFramer(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean iFramesOnly, double vshPeriod,
+ Boolean createParser = True);
+ // called only by createNew(), or by subclass constructors
+ virtual ~MPEG1or2VideoStreamFramer();
+
+private:
+ // redefined virtual functions:
+ virtual Boolean isMPEG1or2VideoStreamFramer() const;
+
+private:
+ double getCurrentPTS() const;
+
+ friend class MPEG1or2VideoStreamParser; // hack
+};
+
+#endif
diff --git a/liveMedia/include/MPEG2IndexFromTransportStream.hh b/liveMedia/include/MPEG2IndexFromTransportStream.hh
new file mode 100644
index 0000000..75b5a23
--- /dev/null
+++ b/liveMedia/include/MPEG2IndexFromTransportStream.hh
@@ -0,0 +1,95 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that produces a sequence of I-frame indices from a MPEG-2 Transport Stream
+// C++ header
+
+#ifndef _MPEG2_IFRAME_INDEX_FROM_TRANSPORT_STREAM_HH
+#define _MPEG2_IFRAME_INDEX_FROM_TRANSPORT_STREAM_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+#ifndef TRANSPORT_PACKET_SIZE
+#define TRANSPORT_PACKET_SIZE 188
+#endif
+
+#ifndef MAX_PES_PACKET_SIZE
+#define MAX_PES_PACKET_SIZE 65536
+#endif
+
+class IndexRecord; // forward
+
+class MPEG2IFrameIndexFromTransportStream: public FramedFilter {
+public:
+ static MPEG2IFrameIndexFromTransportStream*
+ createNew(UsageEnvironment& env, FramedSource* inputSource);
+
+protected:
+ MPEG2IFrameIndexFromTransportStream(UsageEnvironment& env,
+ FramedSource* inputSource);
+ // called only by createNew()
+ virtual ~MPEG2IFrameIndexFromTransportStream();
+
+private:
+ // Redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+ static void handleInputClosure(void* clientData);
+ void handleInputClosure1();
+
+ void analyzePAT(unsigned char* pkt, unsigned size);
+ void analyzePMT(unsigned char* pkt, unsigned size);
+
+ Boolean deliverIndexRecord();
+ Boolean parseFrame();
+ Boolean parseToNextCode(unsigned char& nextCode);
+ void compactParseBuffer();
+ void addToTail(IndexRecord* newIndexRecord);
+
+private:
+ Boolean fIsH264; // True iff the video is H.264 (encapsulated in a Transport Stream)
+ Boolean fIsH265; // True iff the video is H.265 (encapsulated in a Transport Stream)
+ unsigned long fInputTransportPacketCounter;
+ unsigned fClosureNumber;
+ u_int8_t fLastContinuityCounter;
+ float fFirstPCR, fLastPCR;
+ Boolean fHaveSeenFirstPCR;
+ u_int16_t fPMT_PID, fVideo_PID;
+ // Note: We assume: 1 program per Transport Stream; 1 video stream per program
+ unsigned char fInputBuffer[TRANSPORT_PACKET_SIZE];
+ unsigned char* fParseBuffer;
+ unsigned fParseBufferSize;
+ unsigned fParseBufferFrameStart;
+ unsigned fParseBufferParseEnd;
+ unsigned fParseBufferDataEnd;
+ IndexRecord* fHeadIndexRecord;
+ IndexRecord* fTailIndexRecord;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG2TransportFileServerMediaSubsession.hh b/liveMedia/include/MPEG2TransportFileServerMediaSubsession.hh
new file mode 100644
index 0000000..16b6ece
--- /dev/null
+++ b/liveMedia/include/MPEG2TransportFileServerMediaSubsession.hh
@@ -0,0 +1,131 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a MPEG-2 Transport Stream file.
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _MPEG2_TRANSPORT_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+#ifndef _MPEG2_TRANSPORT_STREAM_FRAMER_HH
+#include "MPEG2TransportStreamFramer.hh"
+#endif
+#ifndef _BYTE_STREAM_FILE_SOURCE_HH
+#include "ByteStreamFileSource.hh"
+#endif
+#ifndef _MPEG2_TRANSPORT_STREAM_TRICK_MODE_FILTER_HH
+#include "MPEG2TransportStreamTrickModeFilter.hh"
+#endif
+#ifndef _MPEG2_TRANSPORT_STREAM_FROM_ES_SOURCE_HH
+#include "MPEG2TransportStreamFromESSource.hh"
+#endif
+
+class ClientTrickPlayState; // forward
+
+class MPEG2TransportFileServerMediaSubsession: public FileServerMediaSubsession {
+public:
+ static MPEG2TransportFileServerMediaSubsession*
+ createNew(UsageEnvironment& env,
+ char const* dataFileName, char const* indexFileName,
+ Boolean reuseFirstSource);
+
+protected:
+ MPEG2TransportFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName,
+ MPEG2TransportStreamIndexFile* indexFile,
+ Boolean reuseFirstSource);
+ // called only by createNew();
+ virtual ~MPEG2TransportFileServerMediaSubsession();
+
+ virtual ClientTrickPlayState* newClientTrickPlayState();
+
+private: // redefined virtual functions
+ // Note that because - to implement 'trick play' operations - we're operating on
+ // more than just the input source, we reimplement some functions that are
+ // already implemented in "OnDemandServerMediaSubsession", rather than
+ // reimplementing "seekStreamSource()" and "setStreamSourceScale()":
+ virtual void startStream(unsigned clientSessionId, void* streamToken,
+ TaskFunc* rtcpRRHandler,
+ void* rtcpRRHandlerClientData,
+ unsigned short& rtpSeqNum,
+ unsigned& rtpTimestamp,
+ ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
+ void* serverRequestAlternativeByteHandlerClientData);
+ virtual void pauseStream(unsigned clientSessionId, void* streamToken);
+ virtual void seekStream(unsigned clientSessionId, void* streamToken, double& seekNPT, double streamDuration, u_int64_t& numBytes);
+ virtual void setStreamScale(unsigned clientSessionId, void* streamToken, float scale);
+ virtual void deleteStream(unsigned clientSessionId, void*& streamToken);
+
+ // The virtual functions that are usually implemented by "ServerMediaSubsession"s:
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+
+ virtual void testScaleFactor(float& scale);
+ virtual float duration() const;
+
+private:
+ ClientTrickPlayState* lookupClient(unsigned clientSessionId);
+
+private:
+ MPEG2TransportStreamIndexFile* fIndexFile;
+ float fDuration;
+ HashTable* fClientSessionHashTable; // indexed by client session id
+};
+
+
+// This class encapsulates the 'trick play' state for each current client (for
+// a given "MPEG2TransportFileServerMediaSubsession" - i.e., Transport Stream file).
+// It is used only within the implementation of "MPEG2TransportFileServerMediaSubsession", but is included here,
+// in case subclasses of "MPEG2TransportFileServerMediaSubsession" want to use it.
+
+class ClientTrickPlayState {
+public:
+ ClientTrickPlayState(MPEG2TransportStreamIndexFile* indexFile);
+
+ // Functions to bring "fNPT", "fTSRecordNum" and "fIxRecordNum" in sync:
+ unsigned long updateStateFromNPT(double npt, double seekDuration);
+ void updateStateOnScaleChange();
+ void updateStateOnPlayChange(Boolean reverseToPreviousVSH);
+
+ void handleStreamDeletion();
+ void setSource(MPEG2TransportStreamFramer* framer);
+
+ void setNextScale(float nextScale) { fNextScale = nextScale; }
+ Boolean areChangingScale() const { return fNextScale != fScale; }
+
+protected:
+ void updateTSRecordNum();
+ void reseekOriginalTransportStreamSource();
+
+protected:
+ MPEG2TransportStreamIndexFile* fIndexFile;
+ ByteStreamFileSource* fOriginalTransportStreamSource;
+ MPEG2TransportStreamTrickModeFilter* fTrickModeFilter;
+ MPEG2TransportStreamFromESSource* fTrickPlaySource;
+ MPEG2TransportStreamFramer* fFramer;
+ float fScale, fNextScale, fNPT;
+ unsigned long fTSRecordNum, fIxRecordNum;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG2TransportStreamAccumulator.hh b/liveMedia/include/MPEG2TransportStreamAccumulator.hh
new file mode 100644
index 0000000..b76e0ef
--- /dev/null
+++ b/liveMedia/include/MPEG2TransportStreamAccumulator.hh
@@ -0,0 +1,85 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Collects a stream of incoming MPEG Transport Stream packets into
+// a chunk sufficiently large to send in a single outgoing (RTP or UDP) packet.
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_STREAM_ACCUMULATOR_HH
+#define _MPEG_TRANSPORT_STREAM_ACCUMULATOR_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+class MPEG2TransportStreamAccumulator: public FramedFilter {
+public:
+ static MPEG2TransportStreamAccumulator* createNew(UsageEnvironment& env,
+ FramedSource* inputSource,
+ unsigned maxPacketSize = 1456);
+
+protected:
+ MPEG2TransportStreamAccumulator(UsageEnvironment& env,
+ FramedSource* inputSource, unsigned maxPacketSize);
+ // called only by createNew()
+ virtual ~MPEG2TransportStreamAccumulator();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+private:
+ unsigned const fDesiredPacketSize;
+ unsigned fNumBytesGathered;
+};
+
+#endif
+
+#ifndef _MP3_TRANSCODER_HH
+#define _MP3_TRANSCODER_HH
+
+#ifndef _MP3_ADU_HH
+#include "MP3ADU.hh"
+#endif
+#ifndef _MP3_ADU_TRANSCODER_HH
+#include "MP3ADUTranscoder.hh"
+#endif
+
+class MP3Transcoder: public MP3FromADUSource {
+public:
+ static MP3Transcoder* createNew(UsageEnvironment& env,
+ unsigned outBitrate /* in kbps */,
+ FramedSource* inputSource);
+
+protected:
+ MP3Transcoder(UsageEnvironment& env,
+ MP3ADUTranscoder* aduTranscoder);
+ // called only by createNew()
+ virtual ~MP3Transcoder();
+};
+
+#endif
diff --git a/liveMedia/include/MPEG2TransportStreamDemux.hh b/liveMedia/include/MPEG2TransportStreamDemux.hh
new file mode 100644
index 0000000..8d83d5d
--- /dev/null
+++ b/liveMedia/include/MPEG2TransportStreamDemux.hh
@@ -0,0 +1,50 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Demultiplexer for a MPEG Transport Stream
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_STREAM_DEMUX_HH
+#define _MPEG2_TRANSPORT_STREAM_DEMUX_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class MPEG2TransportStreamDemux: public Medium {
+public:
+ static MPEG2TransportStreamDemux* createNew(UsageEnvironment& env,
+ FramedSource* inputSource,
+ FramedSource::onCloseFunc* onCloseFunc,
+ void* onCloseClientData);
+
+private:
+ MPEG2TransportStreamDemux(UsageEnvironment& env, FramedSource* inputSource,
+ FramedSource::onCloseFunc* onCloseFunc, void* onCloseClientData);
+ // called only by createNew()
+ virtual ~MPEG2TransportStreamDemux();
+
+ static void handleEndOfFile(void* clientData);
+ void handleEndOfFile();
+
+private:
+ class MPEG2TransportStreamParser* fParser;
+ FramedSource::onCloseFunc* fOnCloseFunc;
+ void* fOnCloseClientData;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG2TransportStreamFramer.hh b/liveMedia/include/MPEG2TransportStreamFramer.hh
new file mode 100644
index 0000000..8114022
--- /dev/null
+++ b/liveMedia/include/MPEG2TransportStreamFramer.hh
@@ -0,0 +1,78 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that passes through (unchanged) chunks that contain an integral number
+// of MPEG-2 Transport Stream packets, but returning (in "fDurationInMicroseconds")
+// an updated estimate of the time gap between chunks.
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_STREAM_FRAMER_HH
+#define _MPEG2_TRANSPORT_STREAM_FRAMER_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+#ifndef _HASH_TABLE_HH
+#include "HashTable.hh"
+#endif
+
+class MPEG2TransportStreamFramer: public FramedFilter {
+public:
+ static MPEG2TransportStreamFramer*
+ createNew(UsageEnvironment& env, FramedSource* inputSource);
+
+ u_int64_t tsPacketCount() const { return fTSPacketCount; }
+
+ void changeInputSource(FramedSource* newInputSource) { fInputSource = newInputSource; }
+
+ void clearPIDStatusTable();
+ void setNumTSPacketsToStream(unsigned long numTSRecordsToStream);
+ void setPCRLimit(float pcrLimit);
+
+protected:
+ MPEG2TransportStreamFramer(UsageEnvironment& env, FramedSource* inputSource);
+ // called only by createNew()
+ virtual ~MPEG2TransportStreamFramer();
+
+private:
+ // Redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ struct timeval presentationTime);
+
+ Boolean updateTSPacketDurationEstimate(unsigned char* pkt, double timeNow);
+
+private:
+ u_int64_t fTSPacketCount;
+ double fTSPacketDurationEstimate;
+ HashTable* fPIDStatusTable;
+ u_int64_t fTSPCRCount;
+ Boolean fLimitNumTSPacketsToStream;
+ unsigned long fNumTSPacketsToStream; // used iff "fLimitNumTSPacketsToStream" is True
+ Boolean fLimitTSPacketsToStreamByPCR;
+ float fPCRLimit; // used iff "fLimitTSPacketsToStreamByPCR" is True
+};
+
+#endif
diff --git a/liveMedia/include/MPEG2TransportStreamFromESSource.hh b/liveMedia/include/MPEG2TransportStreamFromESSource.hh
new file mode 100644
index 0000000..5b1fc59
--- /dev/null
+++ b/liveMedia/include/MPEG2TransportStreamFromESSource.hh
@@ -0,0 +1,66 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter for converting one or more MPEG Elementary Streams
+// to a MPEG-2 Transport Stream
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_STREAM_FROM_ES_SOURCE_HH
+#define _MPEG2_TRANSPORT_STREAM_FROM_ES_SOURCE_HH
+
+#ifndef _MPEG2_TRANSPORT_STREAM_MULTIPLEXOR_HH
+#include "MPEG2TransportStreamMultiplexor.hh"
+#endif
+
+class MPEG2TransportStreamFromESSource: public MPEG2TransportStreamMultiplexor {
+public:
+ static MPEG2TransportStreamFromESSource* createNew(UsageEnvironment& env);
+
+ void addNewVideoSource(FramedSource* inputSource, int mpegVersion, int16_t PID = -1);
+ // Note: For MPEG-4 video, set "mpegVersion" to 4; for H.264 video, set "mpegVersion" to 5;
+ // for H.265 video, set "mpegVersion" to 6
+ void addNewAudioSource(FramedSource* inputSource, int mpegVersion, int16_t PID = -1);
+ // Note: For Opus audio, set "mpegVersion" to 3
+
+ // Note: In these functions, if "PID" is not -1, then it (currently, just the low 8 bits)
+ // is used as the stream's PID. Otherwise (if "PID" is -1) the 'stream_id' is used as
+ // the PID.
+
+ static unsigned maxInputESFrameSize;
+
+protected:
+ MPEG2TransportStreamFromESSource(UsageEnvironment& env);
+ // called only by createNew()
+ virtual ~MPEG2TransportStreamFromESSource();
+
+ void addNewInputSource(FramedSource* inputSource,
+ u_int8_t streamId, int mpegVersion, int16_t PID = -1);
+ // used to implement addNew*Source() above
+
+private:
+ // Redefined virtual functions:
+ virtual void doStopGettingFrames();
+ virtual void awaitNewBuffer(unsigned char* oldBuffer);
+
+private:
+ friend class InputESSourceRecord;
+ class InputESSourceRecord* fInputSources;
+ unsigned fVideoSourceCounter, fAudioSourceCounter;
+ Boolean fAwaitingBackgroundDelivery;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG2TransportStreamFromPESSource.hh b/liveMedia/include/MPEG2TransportStreamFromPESSource.hh
new file mode 100644
index 0000000..e4be2d7
--- /dev/null
+++ b/liveMedia/include/MPEG2TransportStreamFromPESSource.hh
@@ -0,0 +1,62 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter for converting a stream of MPEG PES packets to a MPEG-2 Transport Stream
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_STREAM_FROM_PES_SOURCE_HH
+#define _MPEG2_TRANSPORT_STREAM_FROM_PES_SOURCE_HH
+
+#ifndef _MPEG2_TRANSPORT_STREAM_MULTIPLEXOR_HH
+#include "MPEG2TransportStreamMultiplexor.hh"
+#endif
+#ifndef _MPEG_1OR2_DEMUXED_ELEMENTARY_STREAM_HH
+#include "MPEG1or2DemuxedElementaryStream.hh"
+#endif
+
+class MPEG2TransportStreamFromPESSource: public MPEG2TransportStreamMultiplexor {
+public:
+ static MPEG2TransportStreamFromPESSource*
+ createNew(UsageEnvironment& env, MPEG1or2DemuxedElementaryStream* inputSource);
+
+protected:
+ MPEG2TransportStreamFromPESSource(UsageEnvironment& env,
+ MPEG1or2DemuxedElementaryStream* inputSource);
+ // called only by createNew()
+ virtual ~MPEG2TransportStreamFromPESSource();
+
+private:
+ // Redefined virtual functions:
+ virtual void doStopGettingFrames();
+ virtual void awaitNewBuffer(unsigned char* oldBuffer);
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+private:
+ MPEG1or2DemuxedElementaryStream* fInputSource;
+ unsigned char* fInputBuffer;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG2TransportStreamIndexFile.hh b/liveMedia/include/MPEG2TransportStreamIndexFile.hh
new file mode 100644
index 0000000..03badfc
--- /dev/null
+++ b/liveMedia/include/MPEG2TransportStreamIndexFile.hh
@@ -0,0 +1,96 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class that encapsulates MPEG-2 Transport Stream 'index files'/
+// These index files are used to implement 'trick play' operations
+// (seek-by-time, fast forward, reverse play) on Transport Stream files.
+//
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_STREAM_INDEX_FILE_HH
+#define _MPEG2_TRANSPORT_STREAM_INDEX_FILE_HH
+
+#ifndef _MEDIA_HH
+#include "Media.hh"
+#endif
+
+#define INDEX_RECORD_SIZE 11
+
+class MPEG2TransportStreamIndexFile: public Medium {
+public:
+ static MPEG2TransportStreamIndexFile* createNew(UsageEnvironment& env,
+ char const* indexFileName);
+
+ virtual ~MPEG2TransportStreamIndexFile();
+
+ // Functions that map between a playing time and a Transport packet number
+ // in the original Transport Stream file:
+
+ void lookupTSPacketNumFromNPT(float& npt, unsigned long& tsPacketNumber,
+ unsigned long& indexRecordNumber);
+ // Looks up the Transport Stream Packet number corresponding to "npt".
+ // (This may modify "npt" to a more exact value.)
+ // (We also return the index record number that we looked up.)
+
+ void lookupPCRFromTSPacketNum(unsigned long& tsPacketNumber, Boolean reverseToPreviousCleanPoint,
+ float& pcr, unsigned long& indexRecordNumber);
+ // Looks up the PCR timestamp for the transport packet "tsPacketNumber".
+ // (Adjust "tsPacketNumber" only if "reverseToPreviousCleanPoint" is True.)
+ // (We also return the index record number that we looked up.)
+
+ // Miscellaneous functions used to implement 'trick play':
+ Boolean readIndexRecordValues(unsigned long indexRecordNum,
+ unsigned long& transportPacketNum, u_int8_t& offset,
+ u_int8_t& size, float& pcr, u_int8_t& recordType);
+ float getPlayingDuration();
+ void stopReading() { closeFid(); }
+
+ int mpegVersion();
+ // returns the best guess for the version of MPEG being used for data within the underlying Transport Stream file.
+ // (1,2,4, or 5 (representing H.264). 0 means 'don't know' (usually because the index file is empty))
+
+private:
+ MPEG2TransportStreamIndexFile(UsageEnvironment& env, char const* indexFileName);
+
+ Boolean openFid();
+ Boolean seekToIndexRecord(unsigned long indexRecordNumber);
+ Boolean readIndexRecord(unsigned long indexRecordNum); // into "fBuf"
+ Boolean readOneIndexRecord(unsigned long indexRecordNum); // closes "fFid" at end
+ void closeFid();
+
+ u_int8_t recordTypeFromBuf() { return fBuf[0]; }
+ u_int8_t offsetFromBuf() { return fBuf[1]; }
+ u_int8_t sizeFromBuf() { return fBuf[2]; }
+ float pcrFromBuf(); // after "fBuf" has been read
+ unsigned long tsPacketNumFromBuf();
+ void setMPEGVersionFromRecordType(u_int8_t recordType);
+
+ Boolean rewindToCleanPoint(unsigned long&ixFound);
+ // used to implement "lookupTSPacketNumber()"
+
+private:
+ char* fFileName;
+ FILE* fFid; // used internally when reading from the file
+ int fMPEGVersion;
+ unsigned long fCurrentIndexRecordNum; // within "fFid"
+ float fCachedPCR;
+ unsigned long fCachedTSPacketNumber, fCachedIndexRecordNumber;
+ unsigned long fNumIndexRecords;
+ unsigned char fBuf[INDEX_RECORD_SIZE]; // used for reading index records from file
+};
+
+#endif
diff --git a/liveMedia/include/MPEG2TransportStreamMultiplexor.hh b/liveMedia/include/MPEG2TransportStreamMultiplexor.hh
new file mode 100644
index 0000000..7c7458b
--- /dev/null
+++ b/liveMedia/include/MPEG2TransportStreamMultiplexor.hh
@@ -0,0 +1,120 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class for generating MPEG-2 Transport Stream from one or more input
+// Elementary Stream data sources
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_STREAM_MULTIPLEXOR_HH
+#define _MPEG2_TRANSPORT_STREAM_MULTIPLEXOR_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+#ifndef _MPEG_1OR2_DEMUX_HH
+#include "MPEG1or2Demux.hh" // for SCR
+#endif
+
+#define PID_TABLE_SIZE 0x2000 // 2^13
+
+class MPEG2TransportStreamMultiplexor: public FramedSource {
+public:
+ typedef void (onEndOfSegmentFunc)(void* clientData, double segmentDuration);
+ void setTimedSegmentation(unsigned segmentationDuration,
+ onEndOfSegmentFunc* onEndOfSegmentFunc = NULL,
+ void* onEndOfSegmentClientData = NULL);
+ // Specifies that PAT and PMT packets should be output every "segmentationDuration" seconds.
+ // (If "segmentationDuration" is 0 (the default value), then PAT and PMT packets are output
+ // at a preset frequency.)
+ // The optional function "onEndOfSegmentFunc" is called after each segment is output.
+ double currentSegmentDuration() const { return fCurrentSegmentDuration; }
+ // Valid only if "setTimedSegmentation()" was previously called with "segmentationDuration" > 0
+
+ Boolean canDeliverNewFrameImmediately() const { return fInputBufferBytesUsed < fInputBufferSize; }
+ // Can be used by a downstream reader to test whether the next call to "doGetNextFrame()"
+ // will deliver data immediately).
+
+protected:
+ MPEG2TransportStreamMultiplexor(UsageEnvironment& env);
+ virtual ~MPEG2TransportStreamMultiplexor();
+
+ virtual void awaitNewBuffer(unsigned char* oldBuffer) = 0;
+ // implemented by subclasses
+
+ void handleNewBuffer(unsigned char* buffer, unsigned bufferSize,
+ int mpegVersion, MPEG1or2Demux::SCR scr, int16_t PID = -1);
+ // called by "awaitNewBuffer()"
+ // Note: For MPEG-4 video, set "mpegVersion" to 4; for H.264 video, set "mpegVersion" to 5;
+ // for H.265 video, set "mpegVersion" to 6.
+ // For Opus audio, set "mpegVersion" to 3.
+ // The buffer is assumed to be a PES packet, with a proper PES header.
+ // If "PID" is not -1, then it (currently, only the low 8 bits) is used as the stream's PID,
+ // otherwise the "stream_id" in the PES header is reused to be the stream's PID.
+
+private:
+ // Redefined virtual functions:
+ virtual Boolean isMPEG2TransportStreamMultiplexor() const;
+ virtual void doGetNextFrame();
+
+private:
+ void deliverDataToClient(u_int16_t pid, unsigned char* buffer, unsigned bufferSize,
+ unsigned& startPositionInBuffer);
+
+ void deliverPATPacket();
+ void deliverPMTPacket(Boolean hasChanged);
+
+ void setProgramStreamMap(unsigned frameSize);
+
+protected:
+ Boolean fHaveVideoStreams;
+
+private:
+ unsigned fOutgoingPacketCounter;
+ unsigned fProgramMapVersion;
+ u_int8_t fPreviousInputProgramMapVersion, fCurrentInputProgramMapVersion;
+ // These two fields are used if we see "program_stream_map"s in the input.
+ struct {
+ unsigned counter;
+ u_int8_t streamType; // for use in Program Maps
+ } fPIDState[PID_TABLE_SIZE];
+ u_int16_t fPCR_PID, fCurrentPID; // only the low 13 bits are used
+ MPEG1or2Demux::SCR fPCR;
+ unsigned char* fInputBuffer;
+ unsigned fInputBufferSize, fInputBufferBytesUsed;
+ Boolean fIsFirstAdaptationField;
+ unsigned fSegmentationDuration;
+ // if nonzero, this is the number of seconds between successive 'segments'. Each 'segment'
+ // begins with a PAT, followed by a PMT.
+ // if zero (the default value), then the frequency of PATs and PMTs depends on the constants
+ // PAT_PERIOD_IF_UNTIMED and PMT_PERIOD_IF_UNTIMED, defined in the .cpp file.
+ Boolean segmentationIsTimed() const { return fSegmentationDuration > 0; }
+ u_int8_t fSegmentationIndication;
+ // used only if fSegmentationDuration > 0:
+ // 1 if a segment has just ended and the next packet is to be a PAT
+ // 2 if a segment has just ended and the following PAT has been sent; a PMT is next
+ // 0 otherwise
+ double fCurrentSegmentDuration, fPreviousPTS; // used only if fSegmentationDuration > 0
+ onEndOfSegmentFunc* fOnEndOfSegmentFunc; // used only if fSegmentationDuration > 0
+ void* fOnEndOfSegmentClientData; // ditto
+};
+
+
+// The CRC calculation function that Transport Streams use. We make this function public
+// here in case it's useful elsewhere:
+u_int32_t calculateCRC(u_int8_t const* data, unsigned dataLength, u_int32_t initialValue = 0xFFFFFFFF);
+
+#endif
diff --git a/liveMedia/include/MPEG2TransportStreamTrickModeFilter.hh b/liveMedia/include/MPEG2TransportStreamTrickModeFilter.hh
new file mode 100644
index 0000000..85066c1
--- /dev/null
+++ b/liveMedia/include/MPEG2TransportStreamTrickModeFilter.hh
@@ -0,0 +1,99 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.// A filter that converts a MPEG Transport Stream file - with corresponding index file
+// - to a corresponding Video Elementary Stream. It also uses a "scale" parameter
+// to implement 'trick mode' (fast forward or reverse play, using I-frames) on
+// the video stream.
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_STREAM_TRICK_MODE_FILTER_HH
+#define _MPEG2_TRANSPORT_STREAM_TRICK_MODE_FILTER_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+#ifndef _MPEG2_TRANSPORT_STREAM_INDEX_FILE_HH
+#include "MPEG2TransportStreamIndexFile.hh"
+#endif
+
+#ifndef TRANSPORT_PACKET_SIZE
+#define TRANSPORT_PACKET_SIZE 188
+#endif
+
+class MPEG2TransportStreamTrickModeFilter: public FramedFilter {
+public:
+ static MPEG2TransportStreamTrickModeFilter*
+ createNew(UsageEnvironment& env, FramedSource* inputSource,
+ MPEG2TransportStreamIndexFile* indexFile, int scale);
+
+ Boolean seekTo(unsigned long tsPacketNumber, unsigned long indexRecordNumber);
+
+ unsigned long nextIndexRecordNum() const { return fNextIndexRecordNum; }
+
+ void forgetInputSource() { fInputSource = NULL; }
+ // this lets us delete this without also deleting the input Transport Stream
+
+protected:
+ MPEG2TransportStreamTrickModeFilter(UsageEnvironment& env, FramedSource* inputSource,
+ MPEG2TransportStreamIndexFile* indexFile, int scale);
+ // called only by createNew()
+ virtual ~MPEG2TransportStreamTrickModeFilter();
+
+private:
+ // Redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+
+private:
+ void attemptDeliveryToClient();
+ void seekToTransportPacket(unsigned long tsPacketNum);
+ void readTransportPacket(unsigned long tsPacketNum); // asynchronously
+
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize);
+
+ static void onSourceClosure(void* clientData);
+ void onSourceClosure1();
+
+private:
+ Boolean fHaveStarted;
+ MPEG2TransportStreamIndexFile* fIndexFile;
+ int fScale; // absolute value
+ int fDirection; // 1 => forward; -1 => reverse
+ enum {
+ SKIPPING_FRAME,
+ DELIVERING_SAVED_FRAME,
+ SAVING_AND_DELIVERING_FRAME
+ } fState;
+ unsigned fFrameCount;
+ unsigned long fNextIndexRecordNum; // next to be read from the index file
+ unsigned long fNextTSPacketNum; // next to be read from the transport stream file
+ unsigned char fInputBuffer[TRANSPORT_PACKET_SIZE];
+ unsigned long fCurrentTSPacketNum; // corresponding to data currently in the buffer
+ unsigned long fDesiredTSPacketNum;
+ u_int8_t fDesiredDataOffset, fDesiredDataSize;
+ float fDesiredDataPCR, fFirstPCR;
+ unsigned long fSavedFrameIndexRecordStart;
+ unsigned long fSavedSequentialIndexRecordNum;
+ Boolean fUseSavedFrameNextTime;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG2TransportUDPServerMediaSubsession.hh b/liveMedia/include/MPEG2TransportUDPServerMediaSubsession.hh
new file mode 100644
index 0000000..bd4dc8a
--- /dev/null
+++ b/liveMedia/include/MPEG2TransportUDPServerMediaSubsession.hh
@@ -0,0 +1,55 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an incoming UDP (or RTP/UDP) MPEG-2 Transport Stream
+// C++ header
+
+#ifndef _MPEG2_TRANSPORT_UDP_SERVER_MEDIA_SUBSESSION_HH
+#define _MPEG2_TRANSPORT_UDP_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _ON_DEMAND_SERVER_MEDIA_SUBSESSION_HH
+#include "OnDemandServerMediaSubsession.hh"
+#endif
+
+class MPEG2TransportUDPServerMediaSubsession: public OnDemandServerMediaSubsession {
+public:
+ static MPEG2TransportUDPServerMediaSubsession*
+ createNew(UsageEnvironment& env,
+ char const* inputAddressStr, // An IP multicast address, or use "0.0.0.0" or NULL for unicast input
+ Port const& inputPort,
+ Boolean inputStreamIsRawUDP = False); // otherwise (default) the input stream is RTP/UDP
+protected:
+ MPEG2TransportUDPServerMediaSubsession(UsageEnvironment& env,
+ char const* inputAddressStr, Port const& inputPort, Boolean inputStreamIsRawUDP);
+ // called only by createNew();
+ virtual ~MPEG2TransportUDPServerMediaSubsession();
+
+protected: // redefined virtual functions
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+protected:
+ char const* fInputAddressStr;
+ Port fInputPort;
+ Groupsock* fInputGroupsock;
+ Boolean fInputStreamIsRawUDP;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG4ESVideoRTPSink.hh b/liveMedia/include/MPEG4ESVideoRTPSink.hh
new file mode 100644
index 0000000..5779bbb
--- /dev/null
+++ b/liveMedia/include/MPEG4ESVideoRTPSink.hh
@@ -0,0 +1,72 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for MPEG-4 Elementary Stream video (RFC 3016)
+// C++ header
+
+#ifndef _MPEG4ES_VIDEO_RTP_SINK_HH
+#define _MPEG4ES_VIDEO_RTP_SINK_HH
+
+#ifndef _VIDEO_RTP_SINK_HH
+#include "VideoRTPSink.hh"
+#endif
+
+class MPEG4ESVideoRTPSink: public VideoRTPSink {
+public:
+ static MPEG4ESVideoRTPSink* createNew(UsageEnvironment& env,
+ Groupsock* RTPgs, unsigned char rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency = 90000);
+ static MPEG4ESVideoRTPSink* createNew(UsageEnvironment& env,
+ Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency,
+ u_int8_t profileAndLevelIndication, char const* configStr);
+ // an optional variant of "createNew()", useful if we know, in advance, the stream's 'configuration' info.
+
+
+protected:
+ MPEG4ESVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency,
+ u_int8_t profileAndLevelIndication = 0, char const* configStr = NULL);
+ // called only by createNew()
+
+ virtual ~MPEG4ESVideoRTPSink();
+
+protected: // redefined virtual functions:
+ virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
+
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual Boolean allowFragmentationAfterStart() const;
+ virtual Boolean
+ frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+
+ virtual char const* auxSDPLine();
+
+protected:
+ Boolean fVOPIsPresent;
+
+private:
+ u_int8_t fProfileAndLevelIndication;
+ unsigned char* fConfigBytes;
+ unsigned fNumConfigBytes;
+
+ char* fFmtpSDPLine;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG4ESVideoRTPSource.hh b/liveMedia/include/MPEG4ESVideoRTPSource.hh
new file mode 100644
index 0000000..4d6f34e
--- /dev/null
+++ b/liveMedia/include/MPEG4ESVideoRTPSource.hh
@@ -0,0 +1,51 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MP4V-ES video RTP stream sources
+// C++ header
+
+#ifndef _MPEG4_ES_VIDEO_RTP_SOURCE_HH
+#define _MPEG4_ES_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class MPEG4ESVideoRTPSource: public MultiFramedRTPSource {
+public:
+ static MPEG4ESVideoRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+
+protected:
+ virtual ~MPEG4ESVideoRTPSource();
+
+private:
+ MPEG4ESVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG4GenericRTPSink.hh b/liveMedia/include/MPEG4GenericRTPSink.hh
new file mode 100644
index 0000000..039aa66
--- /dev/null
+++ b/liveMedia/include/MPEG4GenericRTPSink.hh
@@ -0,0 +1,70 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MPEG4-GENERIC ("audio", "video", or "application") RTP stream sinks
+// C++ header
+
+#ifndef _MPEG4_GENERIC_RTP_SINK_HH
+#define _MPEG4_GENERIC_RTP_SINK_HH
+
+#ifndef _MULTI_FRAMED_RTP_SINK_HH
+#include "MultiFramedRTPSink.hh"
+#endif
+
+class MPEG4GenericRTPSink: public MultiFramedRTPSink {
+public:
+ static MPEG4GenericRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency,
+ char const* sdpMediaTypeString, char const* mpeg4Mode,
+ char const* configString,
+ unsigned numChannels = 1);
+
+protected:
+ MPEG4GenericRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency,
+ char const* sdpMediaTypeString,
+ char const* mpeg4Mode, char const* configString,
+ unsigned numChannels);
+ // called only by createNew()
+
+ virtual ~MPEG4GenericRTPSink();
+
+private: // redefined virtual functions:
+ virtual
+ Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual unsigned specialHeaderSize() const;
+
+ virtual char const* sdpMediaType() const;
+
+ virtual char const* auxSDPLine(); // for the "a=fmtp:" SDP line
+
+private:
+ char const* fSDPMediaTypeString;
+ char const* fMPEG4Mode;
+ char const* fConfigString;
+ char* fFmtpSDPLine;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG4GenericRTPSource.hh b/liveMedia/include/MPEG4GenericRTPSource.hh
new file mode 100644
index 0000000..c6ea4e7
--- /dev/null
+++ b/liveMedia/include/MPEG4GenericRTPSource.hh
@@ -0,0 +1,78 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MPEG4-GENERIC ("audio", "video", or "application") RTP stream sources
+// C++ header
+
+#ifndef _MPEG4_GENERIC_RTP_SOURCE_HH
+#define _MPEG4_GENERIC_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class MPEG4GenericRTPSource: public MultiFramedRTPSource {
+public:
+ static MPEG4GenericRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* mediumName,
+ char const* mode, unsigned sizeLength, unsigned indexLength,
+ unsigned indexDeltaLength
+ // add other parameters later
+ );
+ // mediumName is "audio", "video", or "application"
+ // it *cannot* be NULL
+
+protected:
+ MPEG4GenericRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* mediumName,
+ char const* mode,
+ unsigned sizeLength, unsigned indexLength,
+ unsigned indexDeltaLength
+ );
+ // called only by createNew(), or by subclass constructors
+ virtual ~MPEG4GenericRTPSource();
+
+protected:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ char* fMIMEType;
+
+ char* fMode;
+ unsigned fSizeLength, fIndexLength, fIndexDeltaLength;
+ unsigned fNumAUHeaders; // in the most recently read packet
+ unsigned fNextAUHeader; // index of the next AU Header to read
+ struct AUHeader* fAUHeaders;
+
+ friend class MPEG4GenericBufferedPacket;
+};
+
+
+
+// A function that looks up the sampling frequency from an
+// "AudioSpecificConfig" string. (0 means 'unknown')
+unsigned samplingFrequencyFromAudioSpecificConfig(char const* configStr);
+
+#endif
diff --git a/liveMedia/include/MPEG4LATMAudioRTPSink.hh b/liveMedia/include/MPEG4LATMAudioRTPSink.hh
new file mode 100644
index 0000000..f3288cc
--- /dev/null
+++ b/liveMedia/include/MPEG4LATMAudioRTPSink.hh
@@ -0,0 +1,69 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for MPEG-4 audio, using LATM multiplexing (RFC 3016)
+// (Note that the initial 'size' field is assumed to be present at the start of
+// each frame.)
+// C++ header
+
+#ifndef _MPEG4_LATM_AUDIO_RTP_SINK_HH
+#define _MPEG4_LATM_AUDIO_RTP_SINK_HH
+
+#ifndef _AUDIO_RTP_SINK_HH
+#include "AudioRTPSink.hh"
+#endif
+
+class MPEG4LATMAudioRTPSink: public AudioRTPSink {
+public:
+ static MPEG4LATMAudioRTPSink* createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency,
+ char const* streamMuxConfigString,
+ unsigned numChannels,
+ Boolean allowMultipleFramesPerPacket = False);
+
+protected:
+ MPEG4LATMAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency,
+ char const* streamMuxConfigString,
+ unsigned numChannels,
+ Boolean allowMultipleFramesPerPacket);
+ // called only by createNew()
+
+ virtual ~MPEG4LATMAudioRTPSink();
+
+private: // redefined virtual functions:
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual Boolean
+ frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+
+ virtual char const* auxSDPLine(); // for the "a=fmtp:" SDP line
+
+private:
+ char const* fStreamMuxConfigString;
+ char* fFmtpSDPLine;
+ Boolean fAllowMultipleFramesPerPacket;
+};
+
+#endif
diff --git a/liveMedia/include/MPEG4LATMAudioRTPSource.hh b/liveMedia/include/MPEG4LATMAudioRTPSource.hh
new file mode 100644
index 0000000..39cbf06
--- /dev/null
+++ b/liveMedia/include/MPEG4LATMAudioRTPSource.hh
@@ -0,0 +1,101 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// MPEG-4 audio, using LATM multiplexing
+// C++ header
+
+#ifndef _MPEG4_LATM_AUDIO_RTP_SOURCE_HH
+#define _MPEG4_LATM_AUDIO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class MPEG4LATMAudioRTPSource: public MultiFramedRTPSource {
+public:
+ static MPEG4LATMAudioRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+
+ // By default, the LATM data length field is included at the beginning of each
+ // returned frame. To omit this field, call the following:
+ void omitLATMDataLengthField();
+
+ Boolean returnedFrameIncludesLATMDataLengthField() const { return fIncludeLATMDataLengthField; }
+
+protected:
+ virtual ~MPEG4LATMAudioRTPSource();
+
+private:
+ MPEG4LATMAudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ Boolean fIncludeLATMDataLengthField;
+};
+
+
+// A utility for parsing a "StreamMuxConfig" string
+Boolean
+parseStreamMuxConfigStr(char const* configStr,
+ // result parameters:
+ Boolean& audioMuxVersion,
+ Boolean& allStreamsSameTimeFraming,
+ unsigned char& numSubFrames,
+ unsigned char& numProgram,
+ unsigned char& numLayer,
+ unsigned char*& audioSpecificConfig,
+ unsigned& audioSpecificConfigSize);
+ // Parses "configStr" as a sequence of hexadecimal digits, representing
+ // a "StreamMuxConfig" (as defined in ISO.IEC 14496-3, table 1.21).
+ // Returns, in "audioSpecificConfig", a binary representation of
+ // the enclosed "AudioSpecificConfig" structure (of size
+ // "audioSpecificConfigSize" bytes). The memory for this is allocated
+ // dynamically by this function; the caller is responsible for
+ // freeing it. Other values, that precede "AudioSpecificConfig",
+ // are returned in the other parameters.
+ // Returns True iff the parsing succeeds.
+ // IMPORTANT NOTE: The implementation of this function currently assumes
+ // that everything after the first "numLayer" field is an
+ // "AudioSpecificConfig". Therefore, it will not work properly if
+ // "audioMuxVersion" != 0, "numProgram" > 0, or "numLayer" > 0.
+ // Also, any 'other data' or CRC info will be included at
+ // the end of "audioSpecificConfig".
+
+unsigned char* parseStreamMuxConfigStr(char const* configStr,
+ // result parameter:
+ unsigned& audioSpecificConfigSize);
+ // A variant of the above that returns just the "AudioSpecificConfig" data
+ // (or NULL) if the parsing failed, without bothering with the other
+ // result parameters.
+
+unsigned char* parseGeneralConfigStr(char const* configStr,
+ // result parameter:
+ unsigned& configSize);
+ // A routine that parses an arbitrary config string, returning
+ // the result in binary form.
+
+#endif
diff --git a/liveMedia/include/MPEG4VideoFileServerMediaSubsession.hh b/liveMedia/include/MPEG4VideoFileServerMediaSubsession.hh
new file mode 100644
index 0000000..83dd060
--- /dev/null
+++ b/liveMedia/include/MPEG4VideoFileServerMediaSubsession.hh
@@ -0,0 +1,61 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from a MPEG-4 video file.
+// C++ header
+
+#ifndef _MPEG4_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _MPEG4_VIDEO_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+
+class MPEG4VideoFileServerMediaSubsession: public FileServerMediaSubsession{
+public:
+ static MPEG4VideoFileServerMediaSubsession*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource);
+
+ // Used to implement "getAuxSDPLine()":
+ void checkForAuxSDPLine1();
+ void afterPlayingDummy1();
+
+protected:
+ MPEG4VideoFileServerMediaSubsession(UsageEnvironment& env,
+ char const* fileName, Boolean reuseFirstSource);
+ // called only by createNew();
+ virtual ~MPEG4VideoFileServerMediaSubsession();
+
+ void setDoneFlag() { fDoneFlag = ~0; }
+
+protected: // redefined virtual functions
+ virtual char const* getAuxSDPLine(RTPSink* rtpSink,
+ FramedSource* inputSource);
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+
+private:
+ char* fAuxSDPLine;
+ char fDoneFlag; // used when setting up "fAuxSDPLine"
+ RTPSink* fDummyRTPSink; // ditto
+};
+
+#endif
diff --git a/liveMedia/include/MPEG4VideoStreamDiscreteFramer.hh b/liveMedia/include/MPEG4VideoStreamDiscreteFramer.hh
new file mode 100644
index 0000000..f201010
--- /dev/null
+++ b/liveMedia/include/MPEG4VideoStreamDiscreteFramer.hh
@@ -0,0 +1,73 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simplified version of "MPEG4VideoStreamFramer" that takes only complete,
+// discrete frames (rather than an arbitrary byte stream) as input.
+// This avoids the parsing and data copying overhead of the full
+// "MPEG4VideoStreamFramer".
+// C++ header
+
+#ifndef _MPEG4_VIDEO_STREAM_DISCRETE_FRAMER_HH
+#define _MPEG4_VIDEO_STREAM_DISCRETE_FRAMER_HH
+
+#ifndef _MPEG4_VIDEO_STREAM_FRAMER_HH
+#include "MPEG4VideoStreamFramer.hh"
+#endif
+
+class MPEG4VideoStreamDiscreteFramer: public MPEG4VideoStreamFramer {
+public:
+ static MPEG4VideoStreamDiscreteFramer*
+ createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean leavePresentationTimesUnmodified = False);
+
+protected:
+ MPEG4VideoStreamDiscreteFramer(UsageEnvironment& env,
+ FramedSource* inputSource, Boolean leavePresentationTimesUnmodified);
+ // called only by createNew()
+ virtual ~MPEG4VideoStreamDiscreteFramer();
+
+protected:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+protected:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+ Boolean getNextFrameBit(u_int8_t& result);
+ Boolean getNextFrameBits(unsigned numBits, u_int32_t& result);
+ // Which are used by:
+ void analyzeVOLHeader();
+
+protected:
+ Boolean fLeavePresentationTimesUnmodified;
+ u_int32_t vop_time_increment_resolution;
+ unsigned fNumVTIRBits;
+ // # of bits needed to count to "vop_time_increment_resolution"
+ struct timeval fLastNonBFramePresentationTime;
+ unsigned fLastNonBFrameVop_time_increment;
+
+private:
+ unsigned fNumBitsSeenSoFar; // used by the getNextFrameBit*() routines
+};
+
+#endif
diff --git a/liveMedia/include/MPEG4VideoStreamFramer.hh b/liveMedia/include/MPEG4VideoStreamFramer.hh
new file mode 100644
index 0000000..d0cb351
--- /dev/null
+++ b/liveMedia/include/MPEG4VideoStreamFramer.hh
@@ -0,0 +1,75 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an MPEG-4 video elementary stream into
+// frames for:
+// - Visual Object Sequence (VS) Header + Visual Object (VO) Header
+// + Video Object Layer (VOL) Header
+// - Group of VOP (GOV) Header
+// - VOP frame
+// C++ header
+
+#ifndef _MPEG4_VIDEO_STREAM_FRAMER_HH
+#define _MPEG4_VIDEO_STREAM_FRAMER_HH
+
+#ifndef _MPEG_VIDEO_STREAM_FRAMER_HH
+#include "MPEGVideoStreamFramer.hh"
+#endif
+
+class MPEG4VideoStreamFramer: public MPEGVideoStreamFramer {
+public:
+ static MPEG4VideoStreamFramer*
+ createNew(UsageEnvironment& env, FramedSource* inputSource);
+
+ u_int8_t profile_and_level_indication() const {
+ return fProfileAndLevelIndication;
+ }
+
+ unsigned char* getConfigBytes(unsigned& numBytes) const;
+
+ void setConfigInfo(u_int8_t profileAndLevelIndication, char const* configStr);
+ // Assigns the "profile_and_level_indication" number, and the 'config' bytes.
+ // If this function is not called, then this data is only assigned later, when it appears in the input stream.
+
+protected:
+ MPEG4VideoStreamFramer(UsageEnvironment& env,
+ FramedSource* inputSource,
+ Boolean createParser = True);
+ // called only by createNew(), or by subclass constructors
+ virtual ~MPEG4VideoStreamFramer();
+
+ void startNewConfig();
+ void appendToNewConfig(unsigned char* newConfigBytes,
+ unsigned numNewBytes);
+ void completeNewConfig();
+
+private:
+ // redefined virtual functions:
+ virtual Boolean isMPEG4VideoStreamFramer() const;
+
+protected:
+ u_int8_t fProfileAndLevelIndication;
+ unsigned char* fConfigBytes;
+ unsigned fNumConfigBytes;
+
+private:
+ unsigned char* fNewConfigBytes;
+ unsigned fNumNewConfigBytes;
+ friend class MPEG4VideoStreamParser; // hack
+};
+
+#endif
diff --git a/liveMedia/include/MPEGVideoStreamFramer.hh b/liveMedia/include/MPEGVideoStreamFramer.hh
new file mode 100644
index 0000000..1afdd00
--- /dev/null
+++ b/liveMedia/include/MPEGVideoStreamFramer.hh
@@ -0,0 +1,85 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A filter that breaks up an MPEG video elementary stream into
+// headers and frames
+// C++ header
+
+#ifndef _MPEG_VIDEO_STREAM_FRAMER_HH
+#define _MPEG_VIDEO_STREAM_FRAMER_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+class TimeCode {
+public:
+ TimeCode();
+ virtual ~TimeCode();
+
+ int operator==(TimeCode const& arg2);
+ unsigned days, hours, minutes, seconds, pictures;
+};
+
+class MPEGVideoStreamFramer: public FramedFilter {
+public:
+ Boolean& pictureEndMarker() { return fPictureEndMarker; }
+ // a hack for implementing the RTP 'M' bit
+
+ void flushInput(); // called if there is a discontinuity (seeking) in the input
+
+protected:
+ MPEGVideoStreamFramer(UsageEnvironment& env, FramedSource* inputSource);
+ // we're an abstract base class
+ virtual ~MPEGVideoStreamFramer();
+
+ void computePresentationTime(unsigned numAdditionalPictures);
+ // sets "fPresentationTime"
+ void setTimeCode(unsigned hours, unsigned minutes, unsigned seconds,
+ unsigned pictures, unsigned picturesSinceLastGOP);
+
+protected: // redefined virtual functions
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+
+private:
+ void reset();
+
+ static void continueReadProcessing(void* clientData,
+ unsigned char* ptr, unsigned size,
+ struct timeval presentationTime);
+ void continueReadProcessing();
+
+protected:
+ double fFrameRate; // Note: For MPEG-4, this is really a 'tick rate'
+ unsigned fPictureCount; // hack used to implement doGetNextFrame()
+ Boolean fPictureEndMarker;
+ struct timeval fPresentationTimeBase;
+
+ // parsing state
+ class MPEGVideoStreamParser* fParser;
+ friend class MPEGVideoStreamParser; // hack
+
+private:
+ TimeCode fCurGOPTimeCode, fPrevGOPTimeCode;
+ unsigned fPicturesAdjustment;
+ double fPictureTimeBase;
+ unsigned fTcSecsBase;
+ Boolean fHaveSeenFirstTimeCode;
+};
+
+#endif
diff --git a/liveMedia/include/MatroskaFile.hh b/liveMedia/include/MatroskaFile.hh
new file mode 100644
index 0000000..90ac205
--- /dev/null
+++ b/liveMedia/include/MatroskaFile.hh
@@ -0,0 +1,212 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class that encapsulates a Matroska file.
+// C++ header
+
+#ifndef _MATROSKA_FILE_HH
+#define _MATROSKA_FILE_HH
+
+#ifndef _RTP_SINK_HH
+#include "RTPSink.hh"
+#endif
+#ifndef _FILE_SINK_HH
+#include "FileSink.hh"
+#endif
+#ifndef _HASH_TABLE_HH
+#include "HashTable.hh"
+#endif
+
+class MatroskaTrack; // forward
+class MatroskaDemux; // forward
+
+class MatroskaFile: public Medium {
+public:
+ typedef void (onCreationFunc)(MatroskaFile* newFile, void* clientData);
+ static void createNew(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData,
+ char const* preferredLanguage = "eng");
+ // Note: Unlike most "createNew()" functions, this one doesn't return a new object immediately. Instead, because this class
+ // requires file reading (to parse the Matroska 'Track' headers) before a new object can be initialized, the creation of a new
+ // object is signalled by calling - from the event loop - an 'onCreationFunc' that is passed as a parameter to "createNew()".
+
+ MatroskaTrack* lookup(unsigned trackNumber) const;
+
+ // Create a demultiplexor for extracting tracks from this file. (Separate clients will typically have separate demultiplexors.)
+ MatroskaDemux* newDemux();
+
+ // Parameters of the file ('Segment'); set when the file is parsed:
+ unsigned timecodeScale() { return fTimecodeScale; } // in nanoseconds
+ float segmentDuration() { return fSegmentDuration; } // in units of "timecodeScale()"
+ float fileDuration(); // in seconds
+
+ char const* fileName() const { return fFileName; }
+
+ unsigned chosenVideoTrackNumber() { return fChosenVideoTrackNumber; }
+ unsigned chosenAudioTrackNumber() { return fChosenAudioTrackNumber; }
+ unsigned chosenSubtitleTrackNumber() { return fChosenSubtitleTrackNumber; }
+
+ FramedSource*
+ createSourceForStreaming(FramedSource* baseSource, unsigned trackNumber,
+ unsigned& estBitrate, unsigned& numFiltersInFrontOfTrack);
+ // Takes a data source (which must be a demultiplexed track from this file) and returns
+ // a (possibly modified) data source that can be used for streaming.
+
+ char const* trackMIMEType(unsigned trackNumber) const;
+ // in the form "<medium-name>/<CODEC-NAME>", or NULL if no such track exists
+
+ RTPSink* createRTPSinkForTrackNumber(unsigned trackNumber, Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic);
+ // Creates a "RTPSink" object that would be appropriate for streaming the specified track,
+ // or NULL if no appropriate "RTPSink" exists
+
+ FileSink* createFileSinkForTrackNumber(unsigned trackNumber, char const* fileName);
+ // Creates a "FileSink" object that would be appropriate for recording the contents of
+ // the specified track, or NULL if no appropriate "FileSink" exists.
+
+private:
+ MatroskaFile(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData,
+ char const* preferredLanguage);
+ // called only by createNew()
+ virtual ~MatroskaFile();
+
+ static void handleEndOfTrackHeaderParsing(void* clientData);
+ void handleEndOfTrackHeaderParsing();
+
+ void addTrack(MatroskaTrack* newTrack, unsigned trackNumber);
+ void addCuePoint(double cueTime, u_int64_t clusterOffsetInFile, unsigned blockNumWithinCluster);
+ Boolean lookupCuePoint(double& cueTime, u_int64_t& resultClusterOffsetInFile, unsigned& resultBlockNumWithinCluster);
+ void printCuePoints(FILE* fid);
+
+ void removeDemux(MatroskaDemux* demux);
+
+ void getH264ConfigData(MatroskaTrack const* track,
+ u_int8_t*& sps, unsigned& spsSize,
+ u_int8_t*& pps, unsigned& ppsSize);
+ // "sps","pps" are dynamically allocated by this function, and must be delete[]d afterwards
+ void getH265ConfigData(MatroskaTrack const* track,
+ u_int8_t*& vps, unsigned& vpsSize,
+ u_int8_t*& sps, unsigned& spsSize,
+ u_int8_t*& pps, unsigned& ppsSize);
+ // "vps","sps","pps" are dynamically allocated by this function, and must be delete[]d afterwards
+
+ void getVorbisOrTheoraConfigData(MatroskaTrack const* track,
+ u_int8_t*& identificationHeader, unsigned& identificationHeaderSize,
+ u_int8_t*& commentHeader, unsigned& commentHeaderSize,
+ u_int8_t*& setupHeader, unsigned& setupHeaderSize);
+ // "identificationHeader", "commentHeader", "setupHeader" are dynamically allocated by this function, and must be delete[]d afterwards
+
+private:
+ friend class MatroskaFileParser;
+ friend class MatroskaDemux;
+ char const* fFileName;
+ onCreationFunc* fOnCreation;
+ void* fOnCreationClientData;
+ char const* fPreferredLanguage;
+
+ unsigned fTimecodeScale; // in nanoseconds
+ float fSegmentDuration; // in units of "fTimecodeScale"
+ u_int64_t fSegmentDataOffset, fClusterOffset, fCuesOffset;
+
+ class MatroskaTrackTable* fTrackTable;
+ HashTable* fDemuxesTable;
+ class CuePoint* fCuePoints;
+ unsigned fChosenVideoTrackNumber, fChosenAudioTrackNumber, fChosenSubtitleTrackNumber;
+ class MatroskaFileParser* fParserForInitialization;
+};
+
+// We define our own track type codes as bits (powers of 2), so we can use the set of track types as a bitmap, representing a set:
+// (Note that MATROSKA_TRACK_TYPE_OTHER must be last, and have the largest value.)
+#define MATROSKA_TRACK_TYPE_VIDEO 0x01
+#define MATROSKA_TRACK_TYPE_AUDIO 0x02
+#define MATROSKA_TRACK_TYPE_SUBTITLE 0x04
+#define MATROSKA_TRACK_TYPE_OTHER 0x08
+
+class MatroskaTrack {
+public:
+ MatroskaTrack();
+ virtual ~MatroskaTrack();
+
+ // track parameters
+ unsigned trackNumber;
+ u_int8_t trackType;
+ Boolean isEnabled, isDefault, isForced;
+ unsigned defaultDuration;
+ char* name;
+ char* language;
+ char* codecID;
+ unsigned samplingFrequency;
+ unsigned numChannels;
+ char const* mimeType;
+ unsigned codecPrivateSize;
+ u_int8_t* codecPrivate;
+ Boolean codecPrivateUsesH264FormatForH265; // a hack specifically for H.265 video tracks
+ Boolean codecIsOpus; // a hack for Opus audio
+ unsigned headerStrippedBytesSize;
+ u_int8_t* headerStrippedBytes;
+ char const* colorSampling;
+ char const* colorimetry;
+ unsigned pixelWidth;
+ unsigned pixelHeight;
+ unsigned bitDepth;
+ unsigned subframeSizeSize; // 0 means: frames do not have subframes (the default behavior)
+ Boolean haveSubframes() const { return subframeSizeSize > 0; }
+};
+
+class MatroskaDemux: public Medium {
+public:
+ FramedSource* newDemuxedTrack();
+ FramedSource* newDemuxedTrack(unsigned& resultTrackNumber);
+ // Returns a new stream ("FramedSource" subclass) that represents the next preferred media
+ // track (video, audio, subtitle - in that order) from the file. (Preferred media tracks
+ // are based on the file's language preference.)
+ // This function returns NULL when no more media tracks exist.
+
+ FramedSource* newDemuxedTrackByTrackNumber(unsigned trackNumber);
+ // As above, but creates a new stream for a specific track number within the Matroska file.
+ // (You should not call this function more than once with the same track number.)
+
+ // Note: We assume that:
+ // - Every track created by "newDemuxedTrack()" is later read
+ // - All calls to "newDemuxedTrack()" are made before any track is read
+
+protected:
+ friend class MatroskaFile;
+ friend class MatroskaFileParser;
+ class MatroskaDemuxedTrack* lookupDemuxedTrack(unsigned trackNumber);
+
+ MatroskaDemux(MatroskaFile& ourFile); // we're created only by a "MatroskaFile" (a friend)
+ virtual ~MatroskaDemux();
+
+private:
+ friend class MatroskaDemuxedTrack;
+ void removeTrack(unsigned trackNumber);
+ void continueReading(); // called by a demuxed track to tell us that it has a pending read ("doGetNextFrame()")
+ void seekToTime(double& seekNPT);
+
+ static void handleEndOfFile(void* clientData);
+ void handleEndOfFile();
+
+private:
+ MatroskaFile& fOurFile;
+ class MatroskaFileParser* fOurParser;
+ HashTable* fDemuxedTracksTable;
+
+ // Used to implement "newServerMediaSubsession()":
+ u_int8_t fNextTrackTypeToCheck;
+};
+
+#endif
diff --git a/liveMedia/include/MatroskaFileServerDemux.hh b/liveMedia/include/MatroskaFileServerDemux.hh
new file mode 100644
index 0000000..d5572d6
--- /dev/null
+++ b/liveMedia/include/MatroskaFileServerDemux.hh
@@ -0,0 +1,84 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A server demultiplexor for a Matroska file
+// C++ header
+
+#ifndef _MATROSKA_FILE_SERVER_DEMUX_HH
+#define _MATROSKA_FILE_SERVER_DEMUX_HH
+
+#ifndef _SERVER_MEDIA_SESSION_HH
+#include "ServerMediaSession.hh"
+#endif
+
+#ifndef _MATROSKA_FILE_HH
+#include "MatroskaFile.hh"
+#endif
+
+class MatroskaFileServerDemux: public Medium {
+public:
+ typedef void (onCreationFunc)(MatroskaFileServerDemux* newDemux, void* clientData);
+ static void createNew(UsageEnvironment& env, char const* fileName,
+ onCreationFunc* onCreation, void* onCreationClientData,
+ char const* preferredLanguage = "eng");
+ // Note: Unlike most "createNew()" functions, this one doesn't return a new object immediately. Instead, because this class
+ // requires file reading (to parse the Matroska 'Track' headers) before a new object can be initialized, the creation of a new
+ // object is signalled by calling - from the event loop - an 'onCreationFunc' that is passed as a parameter to "createNew()".
+
+ ServerMediaSubsession* newServerMediaSubsession();
+ ServerMediaSubsession* newServerMediaSubsession(unsigned& resultTrackNumber);
+ // Returns a new "ServerMediaSubsession" object that represents the next preferred media track
+ // (video, audio, subtitle - in that order) from the file. (Preferred media tracks are based on the file's language preference.)
+ // This function returns NULL when no more media tracks exist.
+
+ ServerMediaSubsession* newServerMediaSubsessionByTrackNumber(unsigned trackNumber);
+ // As above, but creates a new "ServerMediaSubsession" object for a specific track number within the Matroska file.
+ // (You should not call this function more than once with the same track number.)
+
+ // The following public: member functions are called only by the "ServerMediaSubsession" objects:
+
+ MatroskaFile* ourMatroskaFile() { return fOurMatroskaFile; }
+ char const* fileName() const { return fFileName; }
+ float fileDuration() const { return fOurMatroskaFile->fileDuration(); }
+
+ FramedSource* newDemuxedTrack(unsigned clientSessionId, unsigned trackNumber);
+ // Used by the "ServerMediaSubsession" objects to implement their "createNewStreamSource()" virtual function.
+
+private:
+ MatroskaFileServerDemux(UsageEnvironment& env, char const* fileName,
+ onCreationFunc* onCreation, void* onCreationClientData,
+ char const* preferredLanguage);
+ // called only by createNew()
+ virtual ~MatroskaFileServerDemux();
+
+ static void onMatroskaFileCreation(MatroskaFile* newFile, void* clientData);
+ void onMatroskaFileCreation(MatroskaFile* newFile);
+private:
+ char const* fFileName;
+ onCreationFunc* fOnCreation;
+ void* fOnCreationClientData;
+ MatroskaFile* fOurMatroskaFile;
+
+ // Used to implement "newServerMediaSubsession()":
+ u_int8_t fNextTrackTypeToCheck;
+
+ // Used to set up demuxing, to implement "newDemuxedTrack()":
+ unsigned fLastClientSessionId;
+ MatroskaDemux* fLastCreatedDemux;
+};
+
+#endif
diff --git a/liveMedia/include/Media.hh b/liveMedia/include/Media.hh
new file mode 100644
index 0000000..777a7d9
--- /dev/null
+++ b/liveMedia/include/Media.hh
@@ -0,0 +1,137 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Medium
+// C++ header
+
+#ifndef _MEDIA_HH
+#define _MEDIA_HH
+
+#ifndef _LIVEMEDIA_VERSION_HH
+#include "liveMedia_version.hh"
+#endif
+
+#ifndef _HASH_TABLE_HH
+#include "HashTable.hh"
+#endif
+
+#ifndef _USAGE_ENVIRONMENT_HH
+#include "UsageEnvironment.hh"
+#endif
+
+// Lots of files end up needing the following, so just #include them here:
+#ifndef _NET_COMMON_H
+#include "NetCommon.h"
+#endif
+#include <stdio.h>
+
+// The following makes the Borland compiler happy:
+#ifdef __BORLANDC__
+#define _strnicmp strnicmp
+#define fabsf(x) fabs(x)
+#endif
+
+#define mediumNameMaxLen 30
+
+class Medium {
+public:
+ static Boolean lookupByName(UsageEnvironment& env,
+ char const* mediumName,
+ Medium*& resultMedium);
+ static void close(UsageEnvironment& env, char const* mediumName);
+ static void close(Medium* medium); // alternative close() method using ptrs
+ // (has no effect if medium == NULL)
+
+ UsageEnvironment& envir() const {return fEnviron;}
+
+ char const* name() const {return fMediumName;}
+
+ // Test for specific types of media:
+ virtual Boolean isSource() const;
+ virtual Boolean isSink() const;
+ virtual Boolean isRTCPInstance() const;
+ virtual Boolean isRTSPClient() const;
+ virtual Boolean isRTSPServer() const;
+ virtual Boolean isMediaSession() const;
+ virtual Boolean isServerMediaSession() const;
+
+protected:
+ friend class MediaLookupTable;
+ Medium(UsageEnvironment& env); // abstract base class
+ virtual ~Medium(); // instances are deleted using close() only
+
+ TaskToken& nextTask() {
+ return fNextTask;
+ }
+
+private:
+ UsageEnvironment& fEnviron;
+ char fMediumName[mediumNameMaxLen];
+ TaskToken fNextTask;
+};
+
+
+// A data structure for looking up a Medium by its string name.
+// (It is used only to implement "Medium", but we make it visible here, in case developers want to use it to iterate over
+// the whole set of "Medium" objects that we've created.)
+class MediaLookupTable {
+public:
+ static MediaLookupTable* ourMedia(UsageEnvironment& env);
+ HashTable const& getTable() { return *fTable; }
+
+protected:
+ MediaLookupTable(UsageEnvironment& env);
+ virtual ~MediaLookupTable();
+
+private:
+ friend class Medium;
+
+ Medium* lookup(char const* name) const;
+ // Returns NULL if none already exists
+
+ void addNew(Medium* medium, char* mediumName);
+ void remove(char const* name);
+
+ void generateNewName(char* mediumName, unsigned maxLen);
+
+private:
+ UsageEnvironment& fEnv;
+ HashTable* fTable;
+ unsigned fNameGenerator;
+};
+
+
+// The structure pointed to by the "liveMediaPriv" UsageEnvironment field:
+class _Tables {
+public:
+ static _Tables* getOurTables(UsageEnvironment& env, Boolean createIfNotPresent = True);
+ // returns a pointer to a "_Tables" structure (creating it if necessary)
+ void reclaimIfPossible();
+ // used to delete ourselves when we're no longer used
+
+ MediaLookupTable* mediaTable;
+ void* socketTable;
+
+protected:
+ _Tables(UsageEnvironment& env);
+ virtual ~_Tables();
+
+private:
+ UsageEnvironment& fEnv;
+};
+
+#endif
diff --git a/liveMedia/include/MediaSession.hh b/liveMedia/include/MediaSession.hh
new file mode 100644
index 0000000..0ddd0be
--- /dev/null
+++ b/liveMedia/include/MediaSession.hh
@@ -0,0 +1,362 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A data structure that represents a session that consists of
+// potentially multiple (audio and/or video) sub-sessions
+// (This data structure is used for media *receivers* - i.e., clients.
+// For media streamers, use "ServerMediaSession" instead.)
+// C++ header
+
+/* NOTE: To support receiving your own custom RTP payload format, you must first define a new
+ subclass of "MultiFramedRTPSource" (or "BasicUDPSource") that implements it.
+ Then define your own subclass of "MediaSession" and "MediaSubsession", as follows:
+ - In your subclass of "MediaSession" (named, for example, "myMediaSession"):
+ - Define and implement your own static member function
+ static myMediaSession* createNew(UsageEnvironment& env, char const* sdpDescription);
+ and call this - instead of "MediaSession::createNew()" - in your application,
+ when you create a new "MediaSession" object.
+ - Reimplement the "createNewMediaSubsession()" virtual function, as follows:
+ MediaSubsession* myMediaSession::createNewMediaSubsession() { return new myMediaSubsession(*this); }
+ - In your subclass of "MediaSubsession" (named, for example, "myMediaSubsession"):
+ - Reimplement the "createSourceObjects()" virtual function, perhaps similar to this:
+ Boolean myMediaSubsession::createSourceObjects(int useSpecialRTPoffset) {
+ if (strcmp(fCodecName, "X-MY-RTP-PAYLOAD-FORMAT") == 0) {
+ // This subsession uses our custom RTP payload format:
+ fReadSource = fRTPSource = myRTPPayloadFormatRTPSource::createNew( <parameters> );
+ return True;
+ } else {
+ // This subsession uses some other RTP payload format - perhaps one that we already implement:
+ return ::createSourceObjects(useSpecialRTPoffset);
+ }
+ }
+*/
+
+#ifndef _MEDIA_SESSION_HH
+#define _MEDIA_SESSION_HH
+
+#ifndef _RTCP_HH
+#include "RTCP.hh"
+#endif
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+#ifndef _SRTP_CRYPTOGRAPHIC_CONTEXT_HH
+#include "SRTPCryptographicContext.hh"
+#endif
+
+class MediaSubsession; // forward
+
+class MediaSession: public Medium {
+public:
+ static MediaSession* createNew(UsageEnvironment& env,
+ char const* sdpDescription);
+
+ static Boolean lookupByName(UsageEnvironment& env, char const* sourceName,
+ MediaSession*& resultSession);
+
+ Boolean hasSubsessions() const { return fSubsessionsHead != NULL; }
+
+ char* connectionEndpointName() const { return fConnectionEndpointName; }
+ char const* CNAME() const { return fCNAME; }
+ struct in_addr const& sourceFilterAddr() const { return fSourceFilterAddr; }
+ float& scale() { return fScale; }
+ float& speed() { return fSpeed; }
+ char* mediaSessionType() const { return fMediaSessionType; }
+ char* sessionName() const { return fSessionName; }
+ char* sessionDescription() const { return fSessionDescription; }
+ char const* controlPath() const { return fControlPath; }
+
+ double& playStartTime() { return fMaxPlayStartTime; }
+ double& playEndTime() { return fMaxPlayEndTime; }
+ char* absStartTime() const;
+ char* absEndTime() const;
+ // Used only to set the local fields:
+ char*& _absStartTime() { return fAbsStartTime; }
+ char*& _absEndTime() { return fAbsEndTime; }
+
+ Boolean initiateByMediaType(char const* mimeType,
+ MediaSubsession*& resultSubsession,
+ int useSpecialRTPoffset = -1);
+ // Initiates the first subsession with the specified MIME type
+ // Returns the resulting subsession, or 'multi source' (not both)
+
+ MIKEYState* getMIKEYState() const { return fMIKEYState; }
+ SRTPCryptographicContext* getCrypto() const { return fCrypto; }
+
+protected: // redefined virtual functions
+ virtual Boolean isMediaSession() const;
+
+protected:
+ MediaSession(UsageEnvironment& env);
+ // called only by createNew();
+ virtual ~MediaSession();
+
+ virtual MediaSubsession* createNewMediaSubsession();
+
+ Boolean initializeWithSDP(char const* sdpDescription);
+ Boolean parseSDPLine(char const* input, char const*& nextLine);
+ Boolean parseSDPLine_s(char const* sdpLine);
+ Boolean parseSDPLine_i(char const* sdpLine);
+ Boolean parseSDPLine_c(char const* sdpLine);
+ Boolean parseSDPAttribute_type(char const* sdpLine);
+ Boolean parseSDPAttribute_control(char const* sdpLine);
+ Boolean parseSDPAttribute_range(char const* sdpLine);
+ Boolean parseSDPAttribute_source_filter(char const* sdpLine);
+ Boolean parseSDPAttribute_key_mgmt(char const* sdpLine);
+
+ static char* lookupPayloadFormat(unsigned char rtpPayloadType,
+ unsigned& rtpTimestampFrequency,
+ unsigned& numChannels);
+ static unsigned guessRTPTimestampFrequency(char const* mediumName,
+ char const* codecName);
+
+protected:
+ friend class MediaSubsessionIterator;
+ char* fCNAME; // used for RTCP
+
+ // Linkage fields:
+ MediaSubsession* fSubsessionsHead;
+ MediaSubsession* fSubsessionsTail;
+
+ // Fields set from a SDP description:
+ char* fConnectionEndpointName;
+ double fMaxPlayStartTime;
+ double fMaxPlayEndTime;
+ char* fAbsStartTime;
+ char* fAbsEndTime;
+ struct in_addr fSourceFilterAddr; // used for SSM
+ float fScale; // set from a RTSP "Scale:" header
+ float fSpeed;
+ char* fMediaSessionType; // holds a=type value
+ char* fSessionName; // holds s=<session name> value
+ char* fSessionDescription; // holds i=<session description> value
+ char* fControlPath; // holds optional a=control: string
+
+ // Optional key management and crypto state:
+ MIKEYState* fMIKEYState;
+ SRTPCryptographicContext* fCrypto;
+};
+
+
+class MediaSubsessionIterator {
+public:
+ MediaSubsessionIterator(MediaSession const& session);
+ virtual ~MediaSubsessionIterator();
+
+ MediaSubsession* next(); // NULL if none
+ void reset();
+
+private:
+ MediaSession const& fOurSession;
+ MediaSubsession* fNextPtr;
+};
+
+
+class MediaSubsession {
+public:
+ MediaSession& parentSession() { return fParent; }
+ MediaSession const& parentSession() const { return fParent; }
+
+ unsigned short clientPortNum() const { return fClientPortNum; }
+ unsigned char rtpPayloadFormat() const { return fRTPPayloadFormat; }
+ char const* savedSDPLines() const { return fSavedSDPLines; }
+ char const* mediumName() const { return fMediumName; }
+ char const* codecName() const { return fCodecName; }
+ char const* protocolName() const { return fProtocolName; }
+ char const* controlPath() const { return fControlPath; }
+
+ Boolean isSSM() const { return fSourceFilterAddr.s_addr != 0; }
+
+ unsigned short videoWidth() const { return fVideoWidth; }
+ unsigned short videoHeight() const { return fVideoHeight; }
+ unsigned videoFPS() const { return fVideoFPS; }
+ unsigned numChannels() const { return fNumChannels; }
+ float& scale() { return fScale; }
+ float& speed() { return fSpeed; }
+
+ RTPSource* rtpSource() { return fRTPSource; }
+ RTCPInstance* rtcpInstance() { return fRTCPInstance; }
+ unsigned rtpTimestampFrequency() const { return fRTPTimestampFrequency; }
+ Boolean rtcpIsMuxed() const { return fMultiplexRTCPWithRTP; }
+ FramedSource* readSource() { return fReadSource; }
+ // This is the source that client sinks read from. It is usually
+ // (but not necessarily) the same as "rtpSource()"
+ void addFilter(FramedFilter* filter);
+ // Changes "readSource()" to "filter" (which must have just been created with "readSource()" as its input)
+
+ double playStartTime() const;
+ double playEndTime() const;
+ char* absStartTime() const;
+ char* absEndTime() const;
+ // Used only to set the local fields:
+ double& _playStartTime() { return fPlayStartTime; }
+ double& _playEndTime() { return fPlayEndTime; }
+ char*& _absStartTime() { return fAbsStartTime; }
+ char*& _absEndTime() { return fAbsEndTime; }
+
+ Boolean initiate(int useSpecialRTPoffset = -1);
+ // Creates a "RTPSource" for this subsession. (Has no effect if it's
+ // already been created.) Returns True iff this succeeds.
+ void deInitiate(); // Destroys any previously created RTPSource
+ Boolean setClientPortNum(unsigned short portNum);
+ // Sets the preferred client port number that any "RTPSource" for
+ // this subsession would use. (By default, the client port number
+ // is gotten from the original SDP description, or - if the SDP
+ // description does not specfy a client port number - an ephemeral
+ // (even) port number is chosen.) This routine must *not* be
+ // called after initiate().
+ void receiveRawMP3ADUs() { fReceiveRawMP3ADUs = True; } // optional hack for audio/MPA-ROBUST; must not be called after initiate()
+ void receiveRawJPEGFrames() { fReceiveRawJPEGFrames = True; } // optional hack for video/JPEG; must not be called after initiate()
+ char*& connectionEndpointName() { return fConnectionEndpointName; }
+ char const* connectionEndpointName() const {
+ return fConnectionEndpointName;
+ }
+
+ // 'Bandwidth' parameter, set in the "b=" SDP line:
+ unsigned bandwidth() const { return fBandwidth; }
+
+ // General SDP attribute accessor functions:
+ char const* attrVal_str(char const* attrName) const;
+ // returns "" if attribute doesn't exist (and has no default value), or is not a string
+ char const* attrVal_strToLower(char const* attrName) const;
+ // returns "" if attribute doesn't exist (and has no default value), or is not a string
+ unsigned attrVal_int(char const* attrName) const;
+ // also returns 0 if attribute doesn't exist (and has no default value)
+ unsigned attrVal_unsigned(char const* attrName) const { return (unsigned)attrVal_int(attrName); }
+ Boolean attrVal_bool(char const* attrName) const { return attrVal_int(attrName) != 0; }
+
+ // Old, now-deprecated SDP attribute accessor functions, kept here for backwards-compatibility:
+ char const* fmtp_config() const;
+ char const* fmtp_configuration() const { return fmtp_config(); }
+ char const* fmtp_spropparametersets() const { return attrVal_str("sprop-parameter-sets"); }
+ char const* fmtp_spropvps() const { return attrVal_str("sprop-vps"); }
+ char const* fmtp_spropsps() const { return attrVal_str("sprop-sps"); }
+ char const* fmtp_sproppps() const { return attrVal_str("sprop-pps"); }
+
+ netAddressBits connectionEndpointAddress() const;
+ // Converts "fConnectionEndpointName" to an address (or 0 if unknown)
+ void setDestinations(netAddressBits defaultDestAddress);
+ // Uses "fConnectionEndpointName" and "serverPortNum" to set
+ // the destination address and port of the RTP and RTCP objects.
+ // This is typically called by RTSP clients after doing "SETUP".
+
+ char const* sessionId() const { return fSessionId; }
+ void setSessionId(char const* sessionId);
+
+ // Public fields that external callers can use to keep state.
+ // (They are responsible for all storage management on these fields)
+ unsigned short serverPortNum; // in host byte order (used by RTSP)
+ unsigned char rtpChannelId, rtcpChannelId; // used by RTSP (for RTP/TCP)
+ MediaSink* sink; // callers can use this to keep track of who's playing us
+ void* miscPtr; // callers can use this for whatever they want
+
+ // Parameters set from a RTSP "RTP-Info:" header:
+ struct {
+ u_int16_t seqNum;
+ u_int32_t timestamp;
+ Boolean infoIsNew; // not part of the RTSP header; instead, set whenever this struct is filled in
+ } rtpInfo;
+
+ double getNormalPlayTime(struct timeval const& presentationTime);
+ // Computes the stream's "Normal Play Time" (NPT) from the given "presentationTime".
+ // (For the definition of "Normal Play Time", see RFC 2326, section 3.6.)
+ // This function is useful only if the "rtpInfo" structure was previously filled in
+ // (e.g., by a "RTP-Info:" header in a RTSP response).
+ // Also, for this function to work properly, the RTP stream's presentation times must (eventually) be
+ // synchronized via RTCP.
+ // (Note: If this function returns a negative number, then the result should be ignored by the caller.)
+
+ MIKEYState* getMIKEYState() const { return fMIKEYState != NULL ? fMIKEYState : fParent.getMIKEYState(); }
+ SRTPCryptographicContext* getCrypto() const { return fCrypto != NULL ? fCrypto : fParent.getCrypto(); }
+
+protected:
+ friend class MediaSession;
+ friend class MediaSubsessionIterator;
+ MediaSubsession(MediaSession& parent);
+ virtual ~MediaSubsession();
+
+ UsageEnvironment& env() { return fParent.envir(); }
+ void setNext(MediaSubsession* next) { fNext = next; }
+
+ void setAttribute(char const* name, char const* value = NULL, Boolean valueIsHexadecimal = False);
+
+ Boolean parseSDPLine_c(char const* sdpLine);
+ Boolean parseSDPLine_b(char const* sdpLine);
+ Boolean parseSDPAttribute_rtpmap(char const* sdpLine);
+ Boolean parseSDPAttribute_rtcpmux(char const* sdpLine);
+ Boolean parseSDPAttribute_control(char const* sdpLine);
+ Boolean parseSDPAttribute_range(char const* sdpLine);
+ Boolean parseSDPAttribute_fmtp(char const* sdpLine);
+ Boolean parseSDPAttribute_source_filter(char const* sdpLine);
+ Boolean parseSDPAttribute_x_dimensions(char const* sdpLine);
+ Boolean parseSDPAttribute_framerate(char const* sdpLine);
+ Boolean parseSDPAttribute_key_mgmt(char const* sdpLine);
+
+ virtual Boolean createSourceObjects(int useSpecialRTPoffset);
+ // create "fRTPSource" and "fReadSource" member objects, after we've been initialized via SDP
+
+protected:
+ // Linkage fields:
+ MediaSession& fParent;
+ MediaSubsession* fNext;
+
+ // Fields set from a SDP description:
+ char* fConnectionEndpointName; // may also be set by RTSP SETUP response
+ unsigned short fClientPortNum; // in host byte order
+ // This field is also set by initiate()
+ unsigned char fRTPPayloadFormat;
+ char* fSavedSDPLines;
+ char* fMediumName;
+ char* fCodecName;
+ char* fProtocolName;
+ unsigned fRTPTimestampFrequency;
+ Boolean fMultiplexRTCPWithRTP;
+ char* fControlPath; // holds optional a=control: string
+
+ // Optional key management and crypto state:
+ MIKEYState* fMIKEYState;
+ SRTPCryptographicContext* fCrypto;
+
+ struct in_addr fSourceFilterAddr; // used for SSM
+ unsigned fBandwidth; // in kilobits-per-second, from b= line
+
+ double fPlayStartTime;
+ double fPlayEndTime;
+ char* fAbsStartTime;
+ char* fAbsEndTime;
+ unsigned short fVideoWidth, fVideoHeight;
+ // screen dimensions (set by an optional a=x-dimensions: <w>,<h> line)
+ unsigned fVideoFPS;
+ // frame rate (set by an optional "a=framerate: <fps>" or "a=x-framerate: <fps>" line)
+ unsigned fNumChannels;
+ // optionally set by "a=rtpmap:" lines for audio sessions. Default: 1
+ float fScale; // set from a RTSP "Scale:" header
+ float fSpeed;
+ double fNPT_PTS_Offset; // set by "getNormalPlayTime()"; add this to a PTS to get NPT
+ HashTable* fAttributeTable; // for "a=fmtp:" attributes. (Later an array by payload type #####)
+
+ // Fields set or used by initiate():
+ Groupsock* fRTPSocket; Groupsock* fRTCPSocket; // works even for unicast
+ RTPSource* fRTPSource; RTCPInstance* fRTCPInstance;
+ FramedSource* fReadSource;
+ Boolean fReceiveRawMP3ADUs, fReceiveRawJPEGFrames;
+
+ // Other fields:
+ char* fSessionId; // used by RTSP
+};
+
+#endif
diff --git a/liveMedia/include/MediaSink.hh b/liveMedia/include/MediaSink.hh
new file mode 100644
index 0000000..60720fd
--- /dev/null
+++ b/liveMedia/include/MediaSink.hh
@@ -0,0 +1,135 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Media Sinks
+// C++ header
+
+#ifndef _MEDIA_SINK_HH
+#define _MEDIA_SINK_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class MediaSink: public Medium {
+public:
+ static Boolean lookupByName(UsageEnvironment& env, char const* sinkName,
+ MediaSink*& resultSink);
+
+ typedef void (afterPlayingFunc)(void* clientData);
+ Boolean startPlaying(MediaSource& source,
+ afterPlayingFunc* afterFunc,
+ void* afterClientData);
+ virtual void stopPlaying();
+
+ // Test for specific types of sink:
+ virtual Boolean isRTPSink() const;
+
+ FramedSource* source() const {return fSource;}
+
+protected:
+ MediaSink(UsageEnvironment& env); // abstract base class
+ virtual ~MediaSink();
+
+ virtual Boolean sourceIsCompatibleWithUs(MediaSource& source);
+ // called by startPlaying()
+ virtual Boolean continuePlaying() = 0;
+ // called by startPlaying()
+
+ static void onSourceClosure(void* clientData); // can be used in "getNextFrame()" calls
+ void onSourceClosure();
+ // should be called (on ourselves) by continuePlaying() when it
+ // discovers that the source we're playing from has closed.
+
+ FramedSource* fSource;
+
+private:
+ // redefined virtual functions:
+ virtual Boolean isSink() const;
+
+private:
+ // The following fields are used when we're being played:
+ afterPlayingFunc* fAfterFunc;
+ void* fAfterClientData;
+};
+
+// A data structure that a sink may use for an output packet:
+class OutPacketBuffer {
+public:
+ OutPacketBuffer(unsigned preferredPacketSize, unsigned maxPacketSize,
+ unsigned maxBufferSize = 0);
+ // if "maxBufferSize" is >0, use it - instead of "maxSize" to compute the buffer size
+ ~OutPacketBuffer();
+
+ static unsigned maxSize;
+ static void increaseMaxSizeTo(unsigned newMaxSize) { if (newMaxSize > OutPacketBuffer::maxSize) OutPacketBuffer::maxSize = newMaxSize; }
+
+ unsigned char* curPtr() const {return &fBuf[fPacketStart + fCurOffset];}
+ unsigned totalBytesAvailable() const {
+ return fLimit - (fPacketStart + fCurOffset);
+ }
+ unsigned totalBufferSize() const { return fLimit; }
+ unsigned char* packet() const {return &fBuf[fPacketStart];}
+ unsigned curPacketSize() const {return fCurOffset;}
+
+ void increment(unsigned numBytes) {fCurOffset += numBytes;}
+
+ void enqueue(unsigned char const* from, unsigned numBytes);
+ void enqueueWord(u_int32_t word);
+ void insert(unsigned char const* from, unsigned numBytes, unsigned toPosition);
+ void insertWord(u_int32_t word, unsigned toPosition);
+ void extract(unsigned char* to, unsigned numBytes, unsigned fromPosition);
+ u_int32_t extractWord(unsigned fromPosition);
+
+ void skipBytes(unsigned numBytes);
+
+ Boolean isPreferredSize() const {return fCurOffset >= fPreferred;}
+ Boolean wouldOverflow(unsigned numBytes) const {
+ return (fCurOffset+numBytes) > fMax;
+ }
+ unsigned numOverflowBytes(unsigned numBytes) const {
+ return (fCurOffset+numBytes) - fMax;
+ }
+ Boolean isTooBigForAPacket(unsigned numBytes) const {
+ return numBytes > fMax;
+ }
+
+ void setOverflowData(unsigned overflowDataOffset,
+ unsigned overflowDataSize,
+ struct timeval const& presentationTime,
+ unsigned durationInMicroseconds);
+ unsigned overflowDataSize() const {return fOverflowDataSize;}
+ struct timeval overflowPresentationTime() const {return fOverflowPresentationTime;}
+ unsigned overflowDurationInMicroseconds() const {return fOverflowDurationInMicroseconds;}
+ Boolean haveOverflowData() const {return fOverflowDataSize > 0;}
+ void useOverflowData();
+
+ void adjustPacketStart(unsigned numBytes);
+ void resetPacketStart();
+ void resetOffset() { fCurOffset = 0; }
+ void resetOverflowData() { fOverflowDataOffset = fOverflowDataSize = 0; }
+
+private:
+ unsigned fPacketStart, fCurOffset, fPreferred, fMax, fLimit;
+ unsigned char* fBuf;
+
+ unsigned fOverflowDataOffset, fOverflowDataSize;
+ struct timeval fOverflowPresentationTime;
+ unsigned fOverflowDurationInMicroseconds;
+};
+
+#endif
diff --git a/liveMedia/include/MediaSource.hh b/liveMedia/include/MediaSource.hh
new file mode 100644
index 0000000..9bd4974
--- /dev/null
+++ b/liveMedia/include/MediaSource.hh
@@ -0,0 +1,59 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Media Sources
+// C++ header
+
+#ifndef _MEDIA_SOURCE_HH
+#define _MEDIA_SOURCE_HH
+
+#ifndef _MEDIA_HH
+#include "Media.hh"
+#endif
+
+class MediaSource: public Medium {
+public:
+ static Boolean lookupByName(UsageEnvironment& env, char const* sourceName,
+ MediaSource*& resultSource);
+ virtual void getAttributes() const;
+ // attributes are returned in "env's" 'result message'
+
+ // The MIME type of this source:
+ virtual char const* MIMEtype() const;
+
+ // Test for specific types of source:
+ virtual Boolean isFramedSource() const;
+ virtual Boolean isRTPSource() const;
+ virtual Boolean isMPEG1or2VideoStreamFramer() const;
+ virtual Boolean isMPEG4VideoStreamFramer() const;
+ virtual Boolean isH264VideoStreamFramer() const;
+ virtual Boolean isH265VideoStreamFramer() const;
+ virtual Boolean isDVVideoStreamFramer() const;
+ virtual Boolean isJPEGVideoSource() const;
+ virtual Boolean isAMRAudioSource() const;
+ virtual Boolean isMPEG2TransportStreamMultiplexor() const;
+
+protected:
+ MediaSource(UsageEnvironment& env); // abstract base class
+ virtual ~MediaSource();
+
+private:
+ // redefined virtual functions:
+ virtual Boolean isSource() const;
+};
+
+#endif
diff --git a/liveMedia/include/MediaTranscodingTable.hh b/liveMedia/include/MediaTranscodingTable.hh
new file mode 100644
index 0000000..cea3710
--- /dev/null
+++ b/liveMedia/include/MediaTranscodingTable.hh
@@ -0,0 +1,66 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class that implements a database that can be accessed to create
+// "FramedFilter" (subclass) objects that transcode one codec into another.
+// The implementation of this class just returns NULL for each codec lookup;
+// To actually implement transcoding, you would subclass it.
+// C++ header
+
+#ifndef _MEDIA_TRANSCODING_TABLE_HH
+#define _MEDIA_TRANSCODING_TABLE_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+#ifndef _MEDIA_SESSION_HH
+#include "MediaSession.hh"
+#endif
+
+class MediaTranscodingTable: public Medium {
+public:
+ virtual FramedFilter*
+ lookupTranscoder(MediaSubsession& /*inputCodecDescription*/, // in
+ char*& outputCodecName/* out; must be delete[]d later */) {
+ // Default implementation: Return NULL (indicating: no transcoding).
+ // You would reimplement this virtual function in a subclass to return a new 'transcoding'
+ // "FramedFilter" (subclass) object for each ("mediumName","codecName") that you wish to
+ // transcode (or return NULL for no transcoding).
+ // (Note that "inputCodecDescription" must have a non-NULL "readSource()"; this is used
+ // as the input to the new "FramedFilter" (subclass) object.)
+ outputCodecName = NULL;
+ return NULL;
+ }
+
+ virtual Boolean weWillTranscode(char const* /*mediumName*/, char const* /*codecName*/) {
+ // Default implementation: Return False.
+ // You would reimplement this in a subclass - returning True for each
+ // <mediumName>/<codecName> for which you'll do transcoding.
+ // Note: Unlike "lookupTranscoder()", this function does not actually create any 'transcoding'
+ // filter objects. (It may be called before "MediaSubsession::initiate()".)
+ return False;
+ }
+
+protected: // we are to be subclassed only
+ MediaTranscodingTable(UsageEnvironment& env)
+ : Medium(env) {
+ }
+ virtual ~MediaTranscodingTable() {
+ }
+};
+
+#endif
diff --git a/liveMedia/include/MultiFramedRTPSink.hh b/liveMedia/include/MultiFramedRTPSink.hh
new file mode 100644
index 0000000..5bfd5c8
--- /dev/null
+++ b/liveMedia/include/MultiFramedRTPSink.hh
@@ -0,0 +1,140 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for a common kind of payload format: Those which pack multiple,
+// complete codec frames (as many as possible) into each RTP packet.
+// C++ header
+
+#ifndef _MULTI_FRAMED_RTP_SINK_HH
+#define _MULTI_FRAMED_RTP_SINK_HH
+
+#ifndef _RTP_SINK_HH
+#include "RTPSink.hh"
+#endif
+
+class MultiFramedRTPSink: public RTPSink {
+public:
+ void setPacketSizes(unsigned preferredPacketSize, unsigned maxPacketSize);
+
+ typedef void (onSendErrorFunc)(void* clientData);
+ void setOnSendErrorFunc(onSendErrorFunc* onSendErrorFunc, void* onSendErrorFuncData) {
+ // Can be used to set a callback function to be called if there's an error sending RTP packets on our socket.
+ fOnSendErrorFunc = onSendErrorFunc;
+ fOnSendErrorData = onSendErrorFuncData;
+ }
+
+protected:
+ MultiFramedRTPSink(UsageEnvironment& env,
+ Groupsock* rtpgs, unsigned char rtpPayloadType,
+ unsigned rtpTimestampFrequency,
+ char const* rtpPayloadFormatName,
+ unsigned numChannels = 1);
+ // we're a virtual base class
+
+ virtual ~MultiFramedRTPSink();
+
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ // perform any processing specific to the particular payload format
+ virtual Boolean allowFragmentationAfterStart() const;
+ // whether a frame can be fragmented if other frame(s) appear earlier
+ // in the packet (by default: False)
+ virtual Boolean allowOtherFramesAfterLastFragment() const;
+ // whether other frames can be packed into a packet following the
+ // final fragment of a previous, fragmented frame (by default: False)
+ virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ // whether this frame can appear in position >1 in a pkt (default: True)
+ virtual unsigned specialHeaderSize() const;
+ // returns the size of any special header used (following the RTP header) (default: 0)
+ virtual unsigned frameSpecificHeaderSize() const;
+ // returns the size of any frame-specific header used (before each frame
+ // within the packet) (default: 0)
+ virtual unsigned computeOverflowForNewFrame(unsigned newFrameSize) const;
+ // returns the number of overflow bytes that would be produced by adding a new
+ // frame of size "newFrameSize" to the current RTP packet.
+ // (By default, this just calls "numOverflowBytes()", but subclasses can redefine
+ // this to (e.g.) impose a granularity upon RTP payload fragments.)
+
+ // Functions that might be called by doSpecialFrameHandling(), or other subclass virtual functions:
+ Boolean isFirstPacket() const { return fIsFirstPacket; }
+ Boolean isFirstFrameInPacket() const { return fNumFramesUsedSoFar == 0; }
+ unsigned curFragmentationOffset() const { return fCurFragmentationOffset; }
+ void setMarkerBit();
+ void setTimestamp(struct timeval framePresentationTime);
+ void setSpecialHeaderWord(unsigned word, /* 32 bits, in host order */
+ unsigned wordPosition = 0);
+ void setSpecialHeaderBytes(unsigned char const* bytes, unsigned numBytes,
+ unsigned bytePosition = 0);
+ void setFrameSpecificHeaderWord(unsigned word, /* 32 bits, in host order */
+ unsigned wordPosition = 0);
+ void setFrameSpecificHeaderBytes(unsigned char const* bytes, unsigned numBytes,
+ unsigned bytePosition = 0);
+ void setFramePadding(unsigned numPaddingBytes);
+ unsigned numFramesUsedSoFar() const { return fNumFramesUsedSoFar; }
+ unsigned ourMaxPacketSize() const { return fOurMaxPacketSize; }
+
+public: // redefined virtual functions:
+ virtual void stopPlaying();
+
+protected: // redefined virtual functions:
+ virtual Boolean continuePlaying();
+
+private:
+ void buildAndSendPacket(Boolean isFirstPacket);
+ void packFrame();
+ void sendPacketIfNecessary();
+ static void sendNext(void* firstArg);
+ friend void sendNext(void*);
+
+ static void afterGettingFrame(void* clientData,
+ unsigned numBytesRead, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned numBytesRead, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ Boolean isTooBigForAPacket(unsigned numBytes) const;
+
+ static void ourHandleClosure(void* clientData);
+
+private:
+ OutPacketBuffer* fOutBuf;
+
+ Boolean fNoFramesLeft;
+ unsigned fNumFramesUsedSoFar;
+ unsigned fCurFragmentationOffset;
+ Boolean fPreviousFrameEndedFragmentation;
+
+ Boolean fIsFirstPacket;
+ struct timeval fNextSendTime;
+ unsigned fTimestampPosition;
+ unsigned fSpecialHeaderPosition;
+ unsigned fSpecialHeaderSize; // size in bytes of any special header used
+ unsigned fCurFrameSpecificHeaderPosition;
+ unsigned fCurFrameSpecificHeaderSize; // size in bytes of cur frame-specific header
+ unsigned fTotalFrameSpecificHeaderSizes; // size of all frame-specific hdrs in pkt
+ unsigned fOurMaxPacketSize;
+
+ onSendErrorFunc* fOnSendErrorFunc;
+ void* fOnSendErrorData;
+};
+
+#endif
diff --git a/liveMedia/include/MultiFramedRTPSource.hh b/liveMedia/include/MultiFramedRTPSource.hh
new file mode 100644
index 0000000..b950d70
--- /dev/null
+++ b/liveMedia/include/MultiFramedRTPSource.hh
@@ -0,0 +1,159 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP source for a common kind of payload format: Those which pack multiple,
+// complete codec frames (as many as possible) into each RTP packet.
+// C++ header
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#define _MULTI_FRAMED_RTP_SOURCE_HH
+
+#ifndef _RTP_SOURCE_HH
+#include "RTPSource.hh"
+#endif
+
+class BufferedPacket; // forward
+class BufferedPacketFactory; // forward
+
+class MultiFramedRTPSource: public RTPSource {
+protected:
+ MultiFramedRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ BufferedPacketFactory* packetFactory = NULL);
+ // virtual base class
+ virtual ~MultiFramedRTPSource();
+
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ // Subclasses redefine this to handle any special, payload format
+ // specific header that follows the RTP header.
+
+ virtual Boolean packetIsUsableInJitterCalculation(unsigned char* packet,
+ unsigned packetSize);
+ // The default implementation returns True, but this can be redefined
+
+protected:
+ Boolean fCurrentPacketBeginsFrame;
+ Boolean fCurrentPacketCompletesFrame;
+
+protected:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+
+private:
+ // redefined virtual functions:
+ virtual void setPacketReorderingThresholdTime(unsigned uSeconds);
+
+private:
+ void reset();
+ void doGetNextFrame1();
+
+ static void networkReadHandler(MultiFramedRTPSource* source, int /*mask*/);
+ void networkReadHandler1();
+
+ Boolean fAreDoingNetworkReads;
+ BufferedPacket* fPacketReadInProgress;
+ Boolean fNeedDelivery;
+ Boolean fPacketLossInFragmentedFrame;
+ unsigned char* fSavedTo;
+ unsigned fSavedMaxSize;
+
+ // A buffer to (optionally) hold incoming pkts that have been reorderered
+ class ReorderingPacketBuffer* fReorderingBuffer;
+};
+
+
+// A 'packet data' class that's used to implement the above.
+// Note that this can be subclassed - if desired - to redefine
+// "nextEnclosedFrameParameters()".
+
+class BufferedPacket {
+public:
+ BufferedPacket();
+ virtual ~BufferedPacket();
+
+ Boolean hasUsableData() const { return fTail > fHead; }
+ unsigned useCount() const { return fUseCount; }
+
+ Boolean fillInData(RTPInterface& rtpInterface, struct sockaddr_in& fromAddress, Boolean& packetReadWasIncomplete);
+ void assignMiscParams(unsigned short rtpSeqNo, unsigned rtpTimestamp,
+ struct timeval presentationTime,
+ Boolean hasBeenSyncedUsingRTCP,
+ Boolean rtpMarkerBit, struct timeval timeReceived);
+ void skip(unsigned numBytes); // used to skip over an initial header
+ void removePadding(unsigned numBytes); // used to remove trailing bytes
+ void appendData(unsigned char* newData, unsigned numBytes);
+ void use(unsigned char* to, unsigned toSize,
+ unsigned& bytesUsed, unsigned& bytesTruncated,
+ unsigned short& rtpSeqNo, unsigned& rtpTimestamp,
+ struct timeval& presentationTime,
+ Boolean& hasBeenSyncedUsingRTCP, Boolean& rtpMarkerBit);
+
+ BufferedPacket*& nextPacket() { return fNextPacket; }
+
+ unsigned short rtpSeqNo() const { return fRTPSeqNo; }
+ struct timeval const& timeReceived() const { return fTimeReceived; }
+
+ unsigned char* data() const { return &fBuf[fHead]; }
+ unsigned dataSize() const { return fTail-fHead; }
+ Boolean rtpMarkerBit() const { return fRTPMarkerBit; }
+ Boolean& isFirstPacket() { return fIsFirstPacket; }
+ unsigned bytesAvailable() const { return fPacketSize - fTail; }
+
+protected:
+ virtual void reset();
+ virtual unsigned nextEnclosedFrameSize(unsigned char*& framePtr,
+ unsigned dataSize);
+ // The above function has been deprecated. Instead, new subclasses should use:
+ virtual void getNextEnclosedFrameParameters(unsigned char*& framePtr,
+ unsigned dataSize,
+ unsigned& frameSize,
+ unsigned& frameDurationInMicroseconds);
+
+ unsigned fPacketSize;
+ unsigned char* fBuf;
+ unsigned fHead;
+ unsigned fTail;
+
+private:
+ BufferedPacket* fNextPacket; // used to link together packets
+
+ unsigned fUseCount;
+ unsigned short fRTPSeqNo;
+ unsigned fRTPTimestamp;
+ struct timeval fPresentationTime; // corresponding to "fRTPTimestamp"
+ Boolean fHasBeenSyncedUsingRTCP;
+ Boolean fRTPMarkerBit;
+ Boolean fIsFirstPacket;
+ struct timeval fTimeReceived;
+};
+
+// A 'factory' class for creating "BufferedPacket" objects.
+// If you want to subclass "BufferedPacket", then you'll also
+// want to subclass this, to redefine createNewPacket()
+
+class BufferedPacketFactory {
+public:
+ BufferedPacketFactory();
+ virtual ~BufferedPacketFactory();
+
+ virtual BufferedPacket* createNewPacket(MultiFramedRTPSource* ourSource);
+};
+
+#endif
diff --git a/liveMedia/include/OggFile.hh b/liveMedia/include/OggFile.hh
new file mode 100644
index 0000000..dfb826e
--- /dev/null
+++ b/liveMedia/include/OggFile.hh
@@ -0,0 +1,177 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A class that encapsulates an Ogg file
+// C++ header
+
+#ifndef _OGG_FILE_HH
+#define _OGG_FILE_HH
+
+#ifndef _RTP_SINK_HH
+#include "RTPSink.hh"
+#endif
+#ifndef _HASH_TABLE_HH
+#include "HashTable.hh"
+#endif
+
+class OggTrack; // forward
+class OggDemux; // forward
+
+class OggFile: public Medium {
+public:
+ typedef void (onCreationFunc)(OggFile* newFile, void* clientData);
+ static void createNew(UsageEnvironment& env, char const* fileName,
+ onCreationFunc* onCreation, void* onCreationClientData);
+ // Note: Unlike most "createNew()" functions, this one doesn't return a new object
+ // immediately. Instead, because this class requires file reading (to parse the
+ // Ogg track headers) before a new object can be initialized, the creation of a new object
+ // is signalled by calling - from the event loop - an 'onCreationFunc' that is passed as
+ // a parameter to "createNew()".
+
+ OggTrack* lookup(u_int32_t trackNumber);
+
+ OggDemux* newDemux();
+ // Creates a demultiplexor for extracting tracks from this file.
+ // (Separate clients will typically have separate demultiplexors.)
+
+ char const* fileName() const { return fFileName; }
+ unsigned numTracks() const;
+
+ FramedSource*
+ createSourceForStreaming(FramedSource* baseSource, u_int32_t trackNumber,
+ unsigned& estBitrate, unsigned& numFiltersInFrontOfTrack);
+ // Takes a data source (which must be a demultiplexed track from this file) and returns
+ // a (possibly modified) data source that can be used for streaming.
+
+ RTPSink* createRTPSinkForTrackNumber(u_int32_t trackNumber, Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic);
+ // Creates a "RTPSink" object that would be appropriate for streaming the specified track,
+ // or NULL if no appropriate "RTPSink" exists
+
+ class OggTrackTable& trackTable() { return *fTrackTable; }
+
+private:
+ OggFile(UsageEnvironment& env, char const* fileName, onCreationFunc* onCreation, void* onCreationClientData);
+ // called only by createNew()
+ virtual ~OggFile();
+
+ static void handleEndOfBosPageParsing(void* clientData);
+ void handleEndOfBosPageParsing();
+
+ void addTrack(OggTrack* newTrack);
+ void removeDemux(OggDemux* demux);
+
+private:
+ friend class OggFileParser;
+ friend class OggDemux;
+ char const* fFileName;
+ onCreationFunc* fOnCreation;
+ void* fOnCreationClientData;
+
+ class OggTrackTable* fTrackTable;
+ HashTable* fDemuxesTable;
+ class OggFileParser* fParserForInitialization;
+};
+
+class OggTrack {
+public:
+ OggTrack();
+ virtual ~OggTrack();
+
+ // track parameters
+ u_int32_t trackNumber; // bitstream serial number
+ char const* mimeType; // NULL if not known
+
+ unsigned samplingFrequency, numChannels; // for audio tracks
+ unsigned estBitrate; // estimate, in kbps (for RTCP)
+
+ // Special headers for Vorbis audio, Theora video, and Opus audio tracks:
+ struct _vtoHdrs {
+ u_int8_t* header[3]; // "identification", "comment", "setup"
+ unsigned headerSize[3];
+
+ // Fields specific to Vorbis audio:
+ unsigned blocksize[2]; // samples per frame (packet)
+ unsigned uSecsPerPacket[2]; // computed as (blocksize[i]*1000000)/samplingFrequency
+ unsigned vorbis_mode_count;
+ unsigned ilog_vorbis_mode_count_minus_1;
+ u_int8_t* vorbis_mode_blockflag;
+ // an array (of size "vorbis_mode_count") of indexes into the (2-entry) "blocksize" array
+
+ // Fields specific to Theora video:
+ u_int8_t KFGSHIFT;
+ unsigned uSecsPerFrame;
+
+ } vtoHdrs;
+
+ Boolean weNeedHeaders() const {
+ return
+ vtoHdrs.header[0] == NULL ||
+ vtoHdrs.header[1] == NULL ||
+ (vtoHdrs.header[2] == NULL && strcmp(mimeType, "audio/OPUS") != 0);
+ }
+};
+
+class OggTrackTableIterator {
+public:
+ OggTrackTableIterator(class OggTrackTable& ourTable);
+ virtual ~OggTrackTableIterator();
+
+ OggTrack* next();
+
+private:
+ HashTable::Iterator* fIter;
+};
+
+class OggDemux: public Medium {
+public:
+ FramedSource* newDemuxedTrack(u_int32_t& resultTrackNumber);
+ // Returns a new stream ("FramedSource" subclass) that represents the next media track
+ // from the file. This function returns NULL when no more media tracks exist.
+
+ FramedSource* newDemuxedTrackByTrackNumber(unsigned trackNumber);
+ // As above, but creates a new stream for a specific track number within the Matroska file.
+ // (You should not call this function more than once with the same track number.)
+
+ // Note: We assume that:
+ // - Every track created by "newDemuxedTrack()" is later read
+ // - All calls to "newDemuxedTrack()" are made before any track is read
+
+protected:
+ friend class OggFile;
+ friend class OggFileParser;
+ class OggDemuxedTrack* lookupDemuxedTrack(u_int32_t trackNumber);
+
+ OggDemux(OggFile& ourFile);
+ virtual ~OggDemux();
+
+private:
+ friend class OggDemuxedTrack;
+ void removeTrack(u_int32_t trackNumber);
+ void continueReading(); // called by a demuxed track to tell us that it has a pending read ("doGetNextFrame()")
+
+ static void handleEndOfFile(void* clientData);
+ void handleEndOfFile();
+
+private:
+ OggFile& fOurFile;
+ class OggFileParser* fOurParser;
+ HashTable* fDemuxedTracksTable;
+ OggTrackTableIterator* fIter;
+};
+
+#endif
diff --git a/liveMedia/include/OggFileServerDemux.hh b/liveMedia/include/OggFileServerDemux.hh
new file mode 100644
index 0000000..77b5dfc
--- /dev/null
+++ b/liveMedia/include/OggFileServerDemux.hh
@@ -0,0 +1,81 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A server demultiplexor for an Ogg file
+// C++ header
+
+#ifndef _OGG_FILE_SERVER_DEMUX_HH
+#define _OGG_FILE_SERVER_DEMUX_HH
+
+#ifndef _SERVER_MEDIA_SESSION_HH
+#include "ServerMediaSession.hh"
+#endif
+
+#ifndef _OGG_FILE_HH
+#include "OggFile.hh"
+#endif
+
+class OggFileServerDemux: public Medium {
+public:
+ typedef void (onCreationFunc)(OggFileServerDemux* newDemux, void* clientData);
+ static void createNew(UsageEnvironment& env, char const* fileName,
+ onCreationFunc* onCreation, void* onCreationClientData);
+ // Note: Unlike most "createNew()" functions, this one doesn't return a new object immediately. Instead, because this class
+ // requires file reading (to parse the Ogg 'Track' headers) before a new object can be initialized, the creation of a new
+ // object is signalled by calling - from the event loop - an 'onCreationFunc' that is passed as a parameter to "createNew()".
+
+ ServerMediaSubsession* newServerMediaSubsession();
+ ServerMediaSubsession* newServerMediaSubsession(u_int32_t& resultTrackNumber);
+ // Returns a new "ServerMediaSubsession" object that represents the next media track
+ // from the file. This function returns NULL when no more media tracks exist.
+
+ ServerMediaSubsession* newServerMediaSubsessionByTrackNumber(u_int32_t trackNumber);
+ // As above, but creates a new "ServerMediaSubsession" object for a specific track number
+ // within the Ogg file.
+ // (You should not call this function more than once with the same track number.)
+
+ // The following public: member functions are called only by the "ServerMediaSubsession" objects:
+
+ OggFile* ourOggFile() { return fOurOggFile; }
+ char const* fileName() const { return fFileName; }
+
+ FramedSource* newDemuxedTrack(unsigned clientSessionId, u_int32_t trackNumber);
+ // Used by the "ServerMediaSubsession" objects to implement their "createNewStreamSource()" virtual function.
+
+private:
+ OggFileServerDemux(UsageEnvironment& env, char const* fileName,
+ onCreationFunc* onCreation, void* onCreationClientData);
+ // called only by createNew()
+ virtual ~OggFileServerDemux();
+
+ static void onOggFileCreation(OggFile* newFile, void* clientData);
+ void onOggFileCreation(OggFile* newFile);
+private:
+ char const* fFileName;
+ onCreationFunc* fOnCreation;
+ void* fOnCreationClientData;
+ OggFile* fOurOggFile;
+
+ // Used to implement "newServerMediaSubsession()":
+ OggTrackTableIterator* fIter;
+
+ // Used to set up demuxing, to implement "newDemuxedTrack()":
+ unsigned fLastClientSessionId;
+ OggDemux* fLastCreatedDemux;
+};
+
+#endif
diff --git a/liveMedia/include/OggFileSink.hh b/liveMedia/include/OggFileSink.hh
new file mode 100644
index 0000000..f690fa3
--- /dev/null
+++ b/liveMedia/include/OggFileSink.hh
@@ -0,0 +1,79 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// 'Ogg' File Sink (recording a single media track only)
+// C++ header
+
+#ifndef _OGG_FILE_SINK_HH
+#define _OGG_FILE_SINK_HH
+
+#ifndef _FILE_SINK_HH
+#include "FileSink.hh"
+#endif
+
+class OggFileSink: public FileSink {
+public:
+ static OggFileSink* createNew(UsageEnvironment& env, char const* fileName,
+ unsigned samplingFrequency = 0, // used for granule_position
+ char const* configStr = NULL,
+ // "configStr" is an optional 'SDP format' string (Base64-encoded)
+ // representing 'packed configuration headers' ("identification", "comment", "setup")
+ // to prepend to the output. (For 'Vorbis" audio and 'Theora' video.)
+ unsigned bufferSize = 100000,
+ Boolean oneFilePerFrame = False);
+ // See "FileSink.hh" for a description of these parameters.
+
+protected:
+ OggFileSink(UsageEnvironment& env, FILE* fid, unsigned samplingFrequency, char const* configStr,
+ unsigned bufferSize, char const* perFrameFileNamePrefix);
+ // called only by createNew()
+ virtual ~OggFileSink();
+
+protected: // redefined virtual functions:
+ virtual Boolean continuePlaying();
+ virtual void addData(unsigned char const* data, unsigned dataSize,
+ struct timeval presentationTime);
+ virtual void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime);
+
+private:
+ static void ourOnSourceClosure(void* clientData);
+ void ourOnSourceClosure();
+
+private:
+ unsigned fSamplingFrequency;
+ char const* fConfigStr;
+ Boolean fHaveWrittenFirstFrame, fHaveSeenEOF;
+ struct timeval fFirstPresentationTime;
+ int64_t fGranulePosition;
+ int64_t fGranulePositionAdjustment; // used to ensure that "fGranulePosition" stays monotonic
+ u_int32_t fPageSequenceNumber;
+ u_int8_t fPageHeaderBytes[27];
+ // the header of each Ogg page, through the "number_page_segments" byte
+
+ // Special fields used for Theora video:
+ Boolean fIsTheora;
+ u_int64_t fGranuleIncrementPerFrame; // == 1 << KFGSHIFT
+
+ // Because the last Ogg page before EOF needs to have a special 'eos' bit set in the header,
+ // we need to defer the writing of each incoming frame. To do this, we maintain a 2nd buffer:
+ unsigned char* fAltBuffer;
+ unsigned fAltFrameSize, fAltNumTruncatedBytes;
+ struct timeval fAltPresentationTime;
+};
+
+#endif
diff --git a/liveMedia/include/OnDemandServerMediaSubsession.hh b/liveMedia/include/OnDemandServerMediaSubsession.hh
new file mode 100644
index 0000000..2d81d46
--- /dev/null
+++ b/liveMedia/include/OnDemandServerMediaSubsession.hh
@@ -0,0 +1,227 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand.
+// C++ header
+
+#ifndef _ON_DEMAND_SERVER_MEDIA_SUBSESSION_HH
+#define _ON_DEMAND_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _SERVER_MEDIA_SESSION_HH
+#include "ServerMediaSession.hh"
+#endif
+#ifndef _RTP_SINK_HH
+#include "RTPSink.hh"
+#endif
+#ifndef _BASIC_UDP_SINK_HH
+#include "BasicUDPSink.hh"
+#endif
+#ifndef _RTCP_HH
+#include "RTCP.hh"
+#endif
+
+class OnDemandServerMediaSubsession: public ServerMediaSubsession {
+protected: // we're a virtual base class
+ OnDemandServerMediaSubsession(UsageEnvironment& env, Boolean reuseFirstSource,
+ portNumBits initialPortNum = 6970,
+ Boolean multiplexRTCPWithRTP = False);
+ virtual ~OnDemandServerMediaSubsession();
+
+protected: // redefined virtual functions
+ virtual char const* sdpLines();
+ virtual void getStreamParameters(unsigned clientSessionId,
+ netAddressBits clientAddress,
+ Port const& clientRTPPort,
+ Port const& clientRTCPPort,
+ int tcpSocketNum,
+ unsigned char rtpChannelId,
+ unsigned char rtcpChannelId,
+ netAddressBits& destinationAddress,
+ u_int8_t& destinationTTL,
+ Boolean& isMulticast,
+ Port& serverRTPPort,
+ Port& serverRTCPPort,
+ void*& streamToken);
+ virtual void startStream(unsigned clientSessionId, void* streamToken,
+ TaskFunc* rtcpRRHandler,
+ void* rtcpRRHandlerClientData,
+ unsigned short& rtpSeqNum,
+ unsigned& rtpTimestamp,
+ ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
+ void* serverRequestAlternativeByteHandlerClientData);
+ virtual void pauseStream(unsigned clientSessionId, void* streamToken);
+ virtual void seekStream(unsigned clientSessionId, void* streamToken, double& seekNPT, double streamDuration, u_int64_t& numBytes);
+ virtual void seekStream(unsigned clientSessionId, void* streamToken, char*& absStart, char*& absEnd);
+ virtual void nullSeekStream(unsigned clientSessionId, void* streamToken,
+ double streamEndTime, u_int64_t& numBytes);
+ virtual void setStreamScale(unsigned clientSessionId, void* streamToken, float scale);
+ virtual float getCurrentNPT(void* streamToken);
+ virtual FramedSource* getStreamSource(void* streamToken);
+ virtual void getRTPSinkandRTCP(void* streamToken,
+ RTPSink const*& rtpSink, RTCPInstance const*& rtcp);
+ virtual void deleteStream(unsigned clientSessionId, void*& streamToken);
+
+protected: // new virtual functions, possibly redefined by subclasses
+ virtual char const* getAuxSDPLine(RTPSink* rtpSink,
+ FramedSource* inputSource);
+ virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes);
+ // This routine is used to seek by relative (i.e., NPT) time.
+ // "streamDuration", if >0.0, specifies how much data to stream, past "seekNPT". (If <=0.0, all remaining data is streamed.)
+ // "numBytes" returns the size (in bytes) of the data to be streamed, or 0 if unknown or unlimited.
+ virtual void seekStreamSource(FramedSource* inputSource, char*& absStart, char*& absEnd);
+ // This routine is used to seek by 'absolute' time.
+ // "absStart" should be a string of the form "YYYYMMDDTHHMMSSZ" or "YYYYMMDDTHHMMSS.<frac>Z".
+ // "absEnd" should be either NULL (for no end time), or a string of the same form as "absStart".
+ // These strings may be modified in-place, or can be reassigned to a newly-allocated value (after delete[]ing the original).
+ virtual void setStreamSourceScale(FramedSource* inputSource, float scale);
+ virtual void setStreamSourceDuration(FramedSource* inputSource, double streamDuration, u_int64_t& numBytes);
+ virtual void closeStreamSource(FramedSource* inputSource);
+
+protected: // new virtual functions, defined by all subclasses
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate) = 0;
+ // "estBitrate" is the stream's estimated bitrate, in kbps
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource) = 0;
+
+protected: // new virtual functions, may be redefined by a subclass:
+ virtual Groupsock* createGroupsock(struct in_addr const& addr, Port port);
+ virtual RTCPInstance* createRTCP(Groupsock* RTCPgs, unsigned totSessionBW, /* in kbps */
+ unsigned char const* cname, RTPSink* sink);
+
+public:
+ void multiplexRTCPWithRTP() { fMultiplexRTCPWithRTP = True; }
+ // An alternative to passing the "multiplexRTCPWithRTP" parameter as True in the constructor
+
+ void setRTCPAppPacketHandler(RTCPAppHandlerFunc* handler, void* clientData);
+ // Sets a handler to be called if a RTCP "APP" packet arrives from any future client.
+ // (Any current clients are not affected; any "APP" packets from them will continue to be
+ // handled by whatever handler existed when the client sent its first RTSP "PLAY" command.)
+ // (Call with (NULL, NULL) to remove an existing handler - for future clients only)
+
+ void sendRTCPAppPacket(u_int8_t subtype, char const* name,
+ u_int8_t* appDependentData, unsigned appDependentDataSize);
+ // Sends a custom RTCP "APP" packet to the most recent client (if "reuseFirstSource" was False),
+ // or to all current clients (if "reuseFirstSource" was True).
+ // The parameters correspond to their
+ // respective fields as described in the RTP/RTCP definition (RFC 3550).
+ // Note that only the low-order 5 bits of "subtype" are used, and only the first 4 bytes
+ // of "name" are used. (If "name" has fewer than 4 bytes, or is NULL,
+ // then the remaining bytes are '\0'.)
+
+protected:
+ void setSDPLinesFromRTPSink(RTPSink* rtpSink, FramedSource* inputSource,
+ unsigned estBitrate);
+ // used to implement "sdpLines()"
+
+protected:
+ char* fSDPLines;
+ HashTable* fDestinationsHashTable; // indexed by client session id
+
+private:
+ Boolean fReuseFirstSource;
+ portNumBits fInitialPortNum;
+ Boolean fMultiplexRTCPWithRTP;
+ void* fLastStreamToken;
+ char fCNAME[100]; // for RTCP
+ RTCPAppHandlerFunc* fAppHandlerTask;
+ void* fAppHandlerClientData;
+ friend class StreamState;
+};
+
+
+// A class that represents the state of an ongoing stream. This is used only internally, in the implementation of
+// "OnDemandServerMediaSubsession", but we expose the definition here, in case subclasses of "OnDemandServerMediaSubsession"
+// want to access it.
+
+class Destinations {
+public:
+ Destinations(struct in_addr const& destAddr,
+ Port const& rtpDestPort,
+ Port const& rtcpDestPort)
+ : isTCP(False), addr(destAddr), rtpPort(rtpDestPort), rtcpPort(rtcpDestPort) {
+ }
+ Destinations(int tcpSockNum, unsigned char rtpChanId, unsigned char rtcpChanId)
+ : isTCP(True), rtpPort(0) /*dummy*/, rtcpPort(0) /*dummy*/,
+ tcpSocketNum(tcpSockNum), rtpChannelId(rtpChanId), rtcpChannelId(rtcpChanId) {
+ }
+
+public:
+ Boolean isTCP;
+ struct in_addr addr;
+ Port rtpPort;
+ Port rtcpPort;
+ int tcpSocketNum;
+ unsigned char rtpChannelId, rtcpChannelId;
+};
+
+class StreamState {
+public:
+ StreamState(OnDemandServerMediaSubsession& master,
+ Port const& serverRTPPort, Port const& serverRTCPPort,
+ RTPSink* rtpSink, BasicUDPSink* udpSink,
+ unsigned totalBW, FramedSource* mediaSource,
+ Groupsock* rtpGS, Groupsock* rtcpGS);
+ virtual ~StreamState();
+
+ void startPlaying(Destinations* destinations, unsigned clientSessionId,
+ TaskFunc* rtcpRRHandler, void* rtcpRRHandlerClientData,
+ ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
+ void* serverRequestAlternativeByteHandlerClientData);
+ void pause();
+ void sendRTCPAppPacket(u_int8_t subtype, char const* name,
+ u_int8_t* appDependentData, unsigned appDependentDataSize);
+ void endPlaying(Destinations* destinations, unsigned clientSessionId);
+ void reclaim();
+
+ unsigned& referenceCount() { return fReferenceCount; }
+
+ Port const& serverRTPPort() const { return fServerRTPPort; }
+ Port const& serverRTCPPort() const { return fServerRTCPPort; }
+
+ RTPSink* rtpSink() const { return fRTPSink; }
+ RTCPInstance* rtcpInstance() const { return fRTCPInstance; }
+
+ float streamDuration() const { return fStreamDuration; }
+
+ FramedSource* mediaSource() const { return fMediaSource; }
+ float& startNPT() { return fStartNPT; }
+
+private:
+ OnDemandServerMediaSubsession& fMaster;
+ Boolean fAreCurrentlyPlaying;
+ unsigned fReferenceCount;
+
+ Port fServerRTPPort, fServerRTCPPort;
+
+ RTPSink* fRTPSink;
+ BasicUDPSink* fUDPSink;
+
+ float fStreamDuration;
+ unsigned fTotalBW;
+ RTCPInstance* fRTCPInstance;
+
+ FramedSource* fMediaSource;
+ float fStartNPT; // initial 'normal play time'; reset after each seek
+
+ Groupsock* fRTPgs;
+ Groupsock* fRTCPgs;
+};
+
+#endif
diff --git a/liveMedia/include/OutputFile.hh b/liveMedia/include/OutputFile.hh
new file mode 100644
index 0000000..b104204
--- /dev/null
+++ b/liveMedia/include/OutputFile.hh
@@ -0,0 +1,31 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Common routines for opening/closing named output files
+// C++ header
+
+#ifndef _OUTPUT_FILE_HH
+#define _OUTPUT_FILE_HH
+
+#include <UsageEnvironment.hh>
+#include <stdio.h>
+
+FILE* OpenOutputFile(UsageEnvironment& env, char const* fileName);
+
+void CloseOutputFile(FILE* fid);
+
+#endif
diff --git a/liveMedia/include/PassiveServerMediaSubsession.hh b/liveMedia/include/PassiveServerMediaSubsession.hh
new file mode 100644
index 0000000..1f56863
--- /dev/null
+++ b/liveMedia/include/PassiveServerMediaSubsession.hh
@@ -0,0 +1,82 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that represents an existing
+// 'RTPSink', rather than one that creates new 'RTPSink's on demand.
+// C++ header
+
+#ifndef _PASSIVE_SERVER_MEDIA_SUBSESSION_HH
+#define _PASSIVE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _SERVER_MEDIA_SESSION_HH
+#include "ServerMediaSession.hh"
+#endif
+
+#ifndef _RTP_SINK_HH
+#include "RTPSink.hh"
+#endif
+#ifndef _RTCP_HH
+#include "RTCP.hh"
+#endif
+
+class PassiveServerMediaSubsession: public ServerMediaSubsession {
+public:
+ static PassiveServerMediaSubsession* createNew(RTPSink& rtpSink,
+ RTCPInstance* rtcpInstance = NULL);
+
+protected:
+ PassiveServerMediaSubsession(RTPSink& rtpSink, RTCPInstance* rtcpInstance);
+ // called only by createNew();
+ virtual ~PassiveServerMediaSubsession();
+
+ virtual Boolean rtcpIsMuxed();
+
+protected: // redefined virtual functions
+ virtual char const* sdpLines();
+ virtual void getStreamParameters(unsigned clientSessionId,
+ netAddressBits clientAddress,
+ Port const& clientRTPPort,
+ Port const& clientRTCPPort,
+ int tcpSocketNum,
+ unsigned char rtpChannelId,
+ unsigned char rtcpChannelId,
+ netAddressBits& destinationAddress,
+ u_int8_t& destinationTTL,
+ Boolean& isMulticast,
+ Port& serverRTPPort,
+ Port& serverRTCPPort,
+ void*& streamToken);
+ virtual void startStream(unsigned clientSessionId, void* streamToken,
+ TaskFunc* rtcpRRHandler,
+ void* rtcpRRHandlerClientData,
+ unsigned short& rtpSeqNum,
+ unsigned& rtpTimestamp,
+ ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
+ void* serverRequestAlternativeByteHandlerClientData);
+ virtual float getCurrentNPT(void* streamToken);
+ virtual void getRTPSinkandRTCP(void* streamToken,
+ RTPSink const*& rtpSink, RTCPInstance const*& rtcp);
+ virtual void deleteStream(unsigned clientSessionId, void*& streamToken);
+
+protected:
+ char* fSDPLines;
+ RTPSink& fRTPSink;
+ RTCPInstance* fRTCPInstance;
+ HashTable* fClientRTCPSourceRecords; // indexed by client session id; used to implement RTCP "RR" handling
+};
+
+#endif
diff --git a/liveMedia/include/ProxyServerMediaSession.hh b/liveMedia/include/ProxyServerMediaSession.hh
new file mode 100644
index 0000000..ea7b4f7
--- /dev/null
+++ b/liveMedia/include/ProxyServerMediaSession.hh
@@ -0,0 +1,238 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A subclass of "ServerMediaSession" that can be used to create a (unicast) RTSP servers that acts as a 'proxy' for
+// another (unicast or multicast) RTSP/RTP stream.
+// C++ header
+
+#ifndef _PROXY_SERVER_MEDIA_SESSION_HH
+#define _PROXY_SERVER_MEDIA_SESSION_HH
+
+#ifndef _SERVER_MEDIA_SESSION_HH
+#include "ServerMediaSession.hh"
+#endif
+#ifndef _MEDIA_SESSION_HH
+#include "MediaSession.hh"
+#endif
+#ifndef _RTSP_CLIENT_HH
+#include "RTSPClient.hh"
+#endif
+#ifndef _MEDIA_TRANSCODING_TABLE_HH
+#include "MediaTranscodingTable.hh"
+#endif
+
+// A subclass of "RTSPClient", used to refer to the particular "ProxyServerMediaSession" object being used.
+// It is used only within the implementation of "ProxyServerMediaSession", but is defined here, in case developers wish to
+// subclass it.
+
+class ProxyRTSPClient: public RTSPClient {
+public:
+ ProxyRTSPClient(class ProxyServerMediaSession& ourServerMediaSession, char const* rtspURL,
+ char const* username, char const* password,
+ portNumBits tunnelOverHTTPPortNum, int verbosityLevel, int socketNumToServer);
+ virtual ~ProxyRTSPClient();
+
+ void continueAfterDESCRIBE(char const* sdpDescription);
+ void continueAfterLivenessCommand(int resultCode, Boolean serverSupportsGetParameter);
+ void continueAfterSETUP(int resultCode);
+ void continueAfterPLAY(int resultCode);
+ void scheduleReset();
+
+private:
+ void reset();
+ int connectToServer(int socketNum, portNumBits remotePortNum);
+
+ Authenticator* auth() { return fOurAuthenticator; }
+
+ void scheduleLivenessCommand();
+ static void sendLivenessCommand(void* clientData);
+ void doReset();
+ static void doReset(void* clientData);
+
+ void scheduleDESCRIBECommand();
+ static void sendDESCRIBE(void* clientData);
+ void sendDESCRIBE();
+
+ static void subsessionTimeout(void* clientData);
+ void handleSubsessionTimeout();
+
+private:
+ friend class ProxyServerMediaSession;
+ friend class ProxyServerMediaSubsession;
+ ProxyServerMediaSession& fOurServerMediaSession;
+ char* fOurURL;
+ Authenticator* fOurAuthenticator;
+ Boolean fStreamRTPOverTCP;
+ class ProxyServerMediaSubsession *fSetupQueueHead, *fSetupQueueTail;
+ unsigned fNumSetupsDone;
+ unsigned fNextDESCRIBEDelay; // in seconds
+ Boolean fServerSupportsGetParameter, fLastCommandWasPLAY, fDoneDESCRIBE;
+ TaskToken fLivenessCommandTask, fDESCRIBECommandTask, fSubsessionTimerTask, fResetTask;
+};
+
+
+typedef ProxyRTSPClient*
+createNewProxyRTSPClientFunc(ProxyServerMediaSession& ourServerMediaSession,
+ char const* rtspURL,
+ char const* username, char const* password,
+ portNumBits tunnelOverHTTPPortNum, int verbosityLevel,
+ int socketNumToServer);
+ProxyRTSPClient*
+defaultCreateNewProxyRTSPClientFunc(ProxyServerMediaSession& ourServerMediaSession,
+ char const* rtspURL,
+ char const* username, char const* password,
+ portNumBits tunnelOverHTTPPortNum, int verbosityLevel,
+ int socketNumToServer);
+
+class ProxyServerMediaSession: public ServerMediaSession {
+public:
+ static ProxyServerMediaSession* createNew(UsageEnvironment& env,
+ GenericMediaServer* ourMediaServer, // Note: We can be used by just one server
+ char const* inputStreamURL, // the "rtsp://" URL of the stream we'll be proxying
+ char const* streamName = NULL,
+ char const* username = NULL, char const* password = NULL,
+ portNumBits tunnelOverHTTPPortNum = 0,
+ // for streaming the *proxied* (i.e., back-end) stream
+ int verbosityLevel = 0,
+ int socketNumToServer = -1,
+ MediaTranscodingTable* transcodingTable = NULL);
+ // Hack: "tunnelOverHTTPPortNum" == 0xFFFF (i.e., all-ones) means: Stream RTP/RTCP-over-TCP, but *not* using HTTP
+ // "verbosityLevel" == 1 means display basic proxy setup info; "verbosityLevel" == 2 means display RTSP client protocol also.
+ // If "socketNumToServer" is >= 0, then it is the socket number of an already-existing TCP connection to the server.
+ // (In this case, "inputStreamURL" must point to the socket's endpoint, so that it can be accessed via the socket.)
+
+ virtual ~ProxyServerMediaSession();
+
+ char const* url() const;
+
+ char describeCompletedFlag;
+ // initialized to 0; set to 1 when the back-end "DESCRIBE" completes.
+ // (This can be used as a 'watch variable' in "doEventLoop()".)
+ Boolean describeCompletedSuccessfully() const { return fClientMediaSession != NULL; }
+ // This can be used - along with "describeCompletdFlag" - to check whether the back-end "DESCRIBE" completed *successfully*.
+
+protected:
+ ProxyServerMediaSession(UsageEnvironment& env, GenericMediaServer* ourMediaServer,
+ char const* inputStreamURL, char const* streamName,
+ char const* username, char const* password,
+ portNumBits tunnelOverHTTPPortNum, int verbosityLevel,
+ int socketNumToServer,
+ MediaTranscodingTable* transcodingTable,
+ createNewProxyRTSPClientFunc* ourCreateNewProxyRTSPClientFunc
+ = defaultCreateNewProxyRTSPClientFunc,
+ portNumBits initialPortNum = 6970,
+ Boolean multiplexRTCPWithRTP = False);
+
+ // If you subclass "ProxyRTSPClient", then you will also need to define your own function
+ // - with signature "createNewProxyRTSPClientFunc" (see above) - that creates a new object
+ // of this subclass. You should also subclass "ProxyServerMediaSession" and, in your
+ // subclass's constructor, initialize the parent class (i.e., "ProxyServerMediaSession")
+ // constructor by passing your new function as the "ourCreateNewProxyRTSPClientFunc"
+ // parameter.
+
+ // Subclasses may redefine the following functions, if they want "ProxyServerSubsession"s
+ // to create subclassed "Groupsock" and/or "RTCPInstance" objects:
+ virtual Groupsock* createGroupsock(struct in_addr const& addr, Port port);
+ virtual RTCPInstance* createRTCP(Groupsock* RTCPgs, unsigned totSessionBW, /* in kbps */
+ unsigned char const* cname, RTPSink* sink);
+
+ virtual Boolean allowProxyingForSubsession(MediaSubsession const& mss);
+ // By default, this function always returns True. However, a subclass may redefine this
+ // if it wishes to restrict which subsessions of a stream get proxied - e.g., if it wishes
+ // to proxy only video tracks, but not audio (or other) tracks.
+
+protected:
+ GenericMediaServer* fOurMediaServer;
+ ProxyRTSPClient* fProxyRTSPClient;
+ MediaSession* fClientMediaSession;
+
+private:
+ friend class ProxyRTSPClient;
+ friend class ProxyServerMediaSubsession;
+ void continueAfterDESCRIBE(char const* sdpDescription);
+ void resetDESCRIBEState(); // undoes what was done by "contineAfterDESCRIBE()"
+
+private:
+ int fVerbosityLevel;
+ class PresentationTimeSessionNormalizer* fPresentationTimeSessionNormalizer;
+ createNewProxyRTSPClientFunc* fCreateNewProxyRTSPClientFunc;
+ MediaTranscodingTable* fTranscodingTable;
+ portNumBits fInitialPortNum;
+ Boolean fMultiplexRTCPWithRTP;
+};
+
+
+////////// PresentationTimeSessionNormalizer and PresentationTimeSubsessionNormalizer definitions //////////
+
+// The following two classes are used by proxies to convert incoming streams' presentation times into wall-clock-aligned
+// presentation times that are suitable for our "RTPSink"s (for the corresponding outgoing streams).
+// (For multi-subsession (i.e., audio+video) sessions, the outgoing streams' presentation times retain the same relative
+// separation as those of the incoming streams.)
+
+class PresentationTimeSubsessionNormalizer: public FramedFilter {
+public:
+ void setRTPSink(RTPSink* rtpSink) { fRTPSink = rtpSink; }
+
+private:
+ friend class PresentationTimeSessionNormalizer;
+ PresentationTimeSubsessionNormalizer(PresentationTimeSessionNormalizer& parent, FramedSource* inputSource, RTPSource* rtpSource,
+ char const* codecName, PresentationTimeSubsessionNormalizer* next);
+ // called only from within "PresentationTimeSessionNormalizer"
+ virtual ~PresentationTimeSubsessionNormalizer();
+
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+private: // redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ PresentationTimeSessionNormalizer& fParent;
+ RTPSource* fRTPSource;
+ RTPSink* fRTPSink;
+ char const* fCodecName;
+ PresentationTimeSubsessionNormalizer* fNext;
+};
+
+class PresentationTimeSessionNormalizer: public Medium {
+public:
+ PresentationTimeSessionNormalizer(UsageEnvironment& env);
+ virtual ~PresentationTimeSessionNormalizer();
+
+ PresentationTimeSubsessionNormalizer*
+ createNewPresentationTimeSubsessionNormalizer(FramedSource* inputSource, RTPSource* rtpSource, char const* codecName);
+
+private: // called only from within "~PresentationTimeSubsessionNormalizer":
+ friend class PresentationTimeSubsessionNormalizer;
+ void normalizePresentationTime(PresentationTimeSubsessionNormalizer* ssNormalizer,
+ struct timeval& toPT, struct timeval const& fromPT);
+ void removePresentationTimeSubsessionNormalizer(PresentationTimeSubsessionNormalizer* ssNormalizer);
+
+private:
+ PresentationTimeSubsessionNormalizer* fSubsessionNormalizers;
+ PresentationTimeSubsessionNormalizer* fMasterSSNormalizer; // used for subsessions that have been RTCP-synced
+
+ struct timeval fPTAdjustment; // Added to (RTCP-synced) subsession presentation times to 'normalize' them with wall-clock time.
+};
+
+#endif
diff --git a/liveMedia/include/QCELPAudioRTPSource.hh b/liveMedia/include/QCELPAudioRTPSource.hh
new file mode 100644
index 0000000..de9dc5e
--- /dev/null
+++ b/liveMedia/include/QCELPAudioRTPSource.hh
@@ -0,0 +1,39 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Qualcomm "PureVoice" (aka. "QCELP") Audio RTP Sources
+// C++ header
+
+#ifndef _QCELP_AUDIO_RTP_SOURCE_HH
+#define _QCELP_AUDIO_RTP_SOURCE_HH
+
+#ifndef _RTP_SOURCE_HH
+#include "RTPSource.hh"
+#endif
+
+class QCELPAudioRTPSource {
+public:
+ static FramedSource* createNew(UsageEnvironment& env,
+ Groupsock* RTPgs,
+ RTPSource*& resultRTPSource,
+ unsigned char rtpPayloadFormat = 12,
+ unsigned rtpTimestampFrequency = 8000);
+ // This returns a source to read from, but "resultRTPSource" will
+ // point to RTP-related state.
+};
+
+#endif
diff --git a/liveMedia/include/QuickTimeFileSink.hh b/liveMedia/include/QuickTimeFileSink.hh
new file mode 100644
index 0000000..b015e16
--- /dev/null
+++ b/liveMedia/include/QuickTimeFileSink.hh
@@ -0,0 +1,192 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A sink that generates a QuickTime file from a composite media session
+// C++ header
+
+#ifndef _QUICKTIME_FILE_SINK_HH
+#define _QUICKTIME_FILE_SINK_HH
+
+#ifndef _MEDIA_SESSION_HH
+#include "MediaSession.hh"
+#endif
+
+class QuickTimeFileSink: public Medium {
+public:
+ static QuickTimeFileSink* createNew(UsageEnvironment& env,
+ MediaSession& inputSession,
+ char const* outputFileName,
+ unsigned bufferSize = 20000,
+ unsigned short movieWidth = 240,
+ unsigned short movieHeight = 180,
+ unsigned movieFPS = 15,
+ Boolean packetLossCompensate = False,
+ Boolean syncStreams = False,
+ Boolean generateHintTracks = False,
+ Boolean generateMP4Format = False);
+
+ typedef void (afterPlayingFunc)(void* clientData);
+ Boolean startPlaying(afterPlayingFunc* afterFunc,
+ void* afterClientData);
+
+ unsigned numActiveSubsessions() const { return fNumSubsessions; }
+
+protected:
+ QuickTimeFileSink(UsageEnvironment& env, MediaSession& inputSession,
+ char const* outputFileName, unsigned bufferSize,
+ unsigned short movieWidth, unsigned short movieHeight,
+ unsigned movieFPS, Boolean packetLossCompensate,
+ Boolean syncStreams, Boolean generateHintTracks,
+ Boolean generateMP4Format);
+ // called only by createNew()
+ virtual ~QuickTimeFileSink();
+
+ virtual void noteRecordedFrame(MediaSubsession& inputSubsession,
+ unsigned packetDataSize, struct timeval const& presentationTime);
+
+private:
+ Boolean continuePlaying();
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ static void onSourceClosure(void* clientData);
+ void onSourceClosure1();
+ static void onRTCPBye(void* clientData);
+ void completeOutputFile();
+
+private:
+ friend class SubsessionIOState;
+ MediaSession& fInputSession;
+ FILE* fOutFid;
+ unsigned fBufferSize;
+ Boolean fPacketLossCompensate;
+ Boolean fSyncStreams, fGenerateMP4Format;
+ struct timeval fNewestSyncTime, fFirstDataTime;
+ Boolean fAreCurrentlyBeingPlayed;
+ afterPlayingFunc* fAfterFunc;
+ void* fAfterClientData;
+ unsigned fAppleCreationTime;
+ unsigned fLargestRTPtimestampFrequency;
+ unsigned fNumSubsessions, fNumSyncedSubsessions;
+ struct timeval fStartTime;
+ Boolean fHaveCompletedOutputFile;
+
+private:
+ ///// Definitions specific to the QuickTime file format:
+
+ unsigned addWord64(u_int64_t word);
+ unsigned addWord(unsigned word);
+ unsigned addHalfWord(unsigned short halfWord);
+ unsigned addByte(unsigned char byte) {
+ putc(byte, fOutFid);
+ return 1;
+ }
+ unsigned addZeroWords(unsigned numWords);
+ unsigned add4ByteString(char const* str);
+ unsigned addArbitraryString(char const* str,
+ Boolean oneByteLength = True);
+ unsigned addAtomHeader(char const* atomName);
+ unsigned addAtomHeader64(char const* atomName);
+ // strlen(atomName) must be 4
+ void setWord(int64_t filePosn, unsigned size);
+ void setWord64(int64_t filePosn, u_int64_t size);
+
+ unsigned movieTimeScale() const {return fLargestRTPtimestampFrequency;}
+
+ // Define member functions for outputting various types of atom:
+#define _atom(name) unsigned addAtom_##name()
+ _atom(ftyp); // for MP4 format files
+ _atom(moov);
+ _atom(mvhd);
+ _atom(iods); // for MP4 format files
+ _atom(trak);
+ _atom(tkhd);
+ _atom(edts);
+ _atom(elst);
+ _atom(tref);
+ _atom(hint);
+ _atom(mdia);
+ _atom(mdhd);
+ _atom(hdlr);
+ _atom(minf);
+ _atom(smhd);
+ _atom(vmhd);
+ _atom(gmhd);
+ _atom(gmin);
+ unsigned addAtom_hdlr2();
+ _atom(dinf);
+ _atom(dref);
+ _atom(alis);
+ _atom(stbl);
+ _atom(stsd);
+ unsigned addAtom_genericMedia();
+ unsigned addAtom_soundMediaGeneral();
+ _atom(ulaw);
+ _atom(alaw);
+ _atom(Qclp);
+ _atom(wave);
+ _atom(frma);
+ _atom(Fclp);
+ _atom(Hclp);
+ _atom(mp4a);
+// _atom(wave);
+// _atom(frma);
+ _atom(esds);
+ _atom(srcq);
+ _atom(h263);
+ _atom(avc1);
+ _atom(avcC);
+ _atom(mp4v);
+ _atom(rtp);
+ _atom(tims);
+ _atom(stts);
+ _atom(stss);
+ _atom(stsc);
+ _atom(stsz);
+ _atom(co64);
+ _atom(udta);
+ _atom(name);
+ _atom(hnti);
+ _atom(sdp);
+ _atom(hinf);
+ _atom(totl);
+ _atom(npck);
+ _atom(tpay);
+ _atom(trpy);
+ _atom(nump);
+ _atom(tpyl);
+ _atom(dmed);
+ _atom(dimm);
+ _atom(drep);
+ _atom(tmin);
+ _atom(tmax);
+ _atom(pmax);
+ _atom(dmax);
+ _atom(payt);
+ unsigned addAtom_dummy();
+
+private:
+ unsigned short fMovieWidth, fMovieHeight;
+ unsigned fMovieFPS;
+ int64_t fMDATposition;
+ int64_t fMVHD_durationPosn;
+ unsigned fMaxTrackDurationM; // in movie time units
+ class SubsessionIOState* fCurrentIOState;
+};
+
+#endif
diff --git a/liveMedia/include/QuickTimeGenericRTPSource.hh b/liveMedia/include/QuickTimeGenericRTPSource.hh
new file mode 100644
index 0000000..cdca3d3
--- /dev/null
+++ b/liveMedia/include/QuickTimeGenericRTPSource.hh
@@ -0,0 +1,68 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP Sources containing generic QuickTime stream data, as defined in
+// <http://developer.apple.com/quicktime/icefloe/dispatch026.html>
+// C++ header
+
+#ifndef _QUICKTIME_GENERIC_RTP_SOURCE_HH
+#define _QUICKTIME_GENERIC_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class QuickTimeGenericRTPSource: public MultiFramedRTPSource {
+public:
+ static QuickTimeGenericRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat, unsigned rtpTimestampFrequency,
+ char const* mimeTypeString);
+
+ // QuickTime-specific information, set from the QuickTime header
+ // in each packet. This, along with the data following the header,
+ // is used by receivers.
+ struct QTState {
+ char PCK;
+ unsigned timescale;
+ char* sdAtom;
+ unsigned sdAtomSize;
+ unsigned short width, height;
+ // later add other state as needed #####
+ } qtState;
+
+protected:
+ virtual ~QuickTimeGenericRTPSource();
+
+private:
+ QuickTimeGenericRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* mimeTypeString);
+ // called only by createNew()
+
+private:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ char const* fMIMEtypeString;
+};
+
+#endif
diff --git a/liveMedia/include/RTCP.hh b/liveMedia/include/RTCP.hh
new file mode 100644
index 0000000..a0550a0
--- /dev/null
+++ b/liveMedia/include/RTCP.hh
@@ -0,0 +1,248 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTCP
+// C++ header
+
+#ifndef _RTCP_HH
+#define _RTCP_HH
+
+#ifndef _RTP_SINK_HH
+#include "RTPSink.hh"
+#endif
+#ifndef _RTP_SOURCE_HH
+#include "RTPSource.hh"
+#endif
+#ifndef _SRTP_CRYPTOGRAPHIC_CONTEXT_HH
+#include "SRTPCryptographicContext.hh"
+#endif
+
+class SDESItem {
+public:
+ SDESItem(unsigned char tag, unsigned char const* value);
+
+ unsigned char const* data() const {return fData;}
+ unsigned totalSize() const;
+
+private:
+ unsigned char fData[2 + 0xFF]; // first 2 bytes are tag and length
+};
+
+typedef void RTCPAppHandlerFunc(void* clientData,
+ u_int8_t subtype, u_int32_t nameBytes/*big-endian order*/,
+ u_int8_t* appDependentData, unsigned appDependentDataSize);
+
+class RTCPMemberDatabase; // forward
+
+typedef void ByeWithReasonHandlerFunc(void* clientData, char const* reason);
+
+class RTCPInstance: public Medium {
+public:
+ static RTCPInstance* createNew(UsageEnvironment& env, Groupsock* RTCPgs,
+ unsigned totSessionBW, /* in kbps */
+ unsigned char const* cname,
+ RTPSink* sink,
+ RTPSource* source,
+ Boolean isSSMTransmitter = False,
+ SRTPCryptographicContext* crypto = NULL);
+
+ static Boolean lookupByName(UsageEnvironment& env, char const* instanceName,
+ RTCPInstance*& resultInstance);
+
+ unsigned numMembers() const;
+ unsigned totSessionBW() const { return fTotSessionBW; }
+
+ void setByeHandler(TaskFunc* handlerTask, void* clientData,
+ Boolean handleActiveParticipantsOnly = True);
+ // Assigns a handler routine to be called if a "BYE" arrives.
+ // The handler is called once only; for subsequent "BYE"s,
+ // "setByeHandler()" would need to be called again.
+ // If "handleActiveParticipantsOnly" is True, then the handler is called
+ // only if the SSRC is for a known sender (if we have a "RTPSource"),
+ // or if the SSRC is for a known receiver (if we have a "RTPSink").
+ // This prevents (for example) the handler for a multicast receiver being
+ // called if some other multicast receiver happens to exit.
+ // If "handleActiveParticipantsOnly" is False, then the handler is called
+ // for any incoming RTCP "BYE".
+ // (To remove an existing "BYE" handler, call "setByeHandler()" again, with a "handlerTask" of NULL.)
+ void setByeWithReasonHandler(ByeWithReasonHandlerFunc* handlerTask, void* clientData,
+ Boolean handleActiveParticipantsOnly = True);
+ // Like "setByeHandler()", except that a string 'reason for the bye' (received as part of
+ // the RTCP "BYE" packet) is passed to the handler function (along with "clientData").
+ // (The 'reason' parameter to the handler function will be a dynamically-allocated string,
+ // or NULL, and should be delete[]d by the handler function.)
+ void setSRHandler(TaskFunc* handlerTask, void* clientData);
+ void setRRHandler(TaskFunc* handlerTask, void* clientData);
+ // Assigns a handler routine to be called if a "SR" or "RR" packet
+ // (respectively) arrives. Unlike "setByeHandler()", the handler will
+ // be called once for each incoming "SR" or "RR". (To turn off handling,
+ // call the function again with "handlerTask" (and "clientData") as NULL.)
+ void setSpecificRRHandler(netAddressBits fromAddress, Port fromPort,
+ TaskFunc* handlerTask, void* clientData);
+ // Like "setRRHandler()", but applies only to "RR" packets that come from
+ // a specific source address and port. (Note that if both a specific
+ // and a general "RR" handler function is set, then both will be called.)
+ void unsetSpecificRRHandler(netAddressBits fromAddress, Port fromPort); // equivalent to setSpecificRRHandler(..., NULL, NULL);
+ void setAppHandler(RTCPAppHandlerFunc* handlerTask, void* clientData);
+ // Assigns a handler routine to be called whenever an "APP" packet arrives. (To turn off
+ // handling, call the function again with "handlerTask" (and "clientData") as NULL.)
+ void sendAppPacket(u_int8_t subtype, char const* name,
+ u_int8_t* appDependentData, unsigned appDependentDataSize);
+ // Sends a custom RTCP "APP" packet to the peer(s). The parameters correspond to their
+ // respective fields as described in the RTP/RTCP definition (RFC 3550).
+ // Note that only the low-order 5 bits of "subtype" are used, and only the first 4 bytes
+ // of "name" are used. (If "name" has fewer than 4 bytes, or is NULL,
+ // then the remaining bytes are '\0'.)
+
+ Groupsock* RTCPgs() const { return fRTCPInterface.gs(); }
+
+ void setStreamSocket(int sockNum, unsigned char streamChannelId);
+ void addStreamSocket(int sockNum, unsigned char streamChannelId);
+ void removeStreamSocket(int sockNum, unsigned char streamChannelId) {
+ fRTCPInterface.removeStreamSocket(sockNum, streamChannelId);
+ }
+ // hacks to allow sending RTP over TCP (RFC 2236, section 10.12)
+
+ void setAuxilliaryReadHandler(AuxHandlerFunc* handlerFunc,
+ void* handlerClientData) {
+ fRTCPInterface.setAuxilliaryReadHandler(handlerFunc,
+ handlerClientData);
+ }
+
+ void injectReport(u_int8_t const* packet, unsigned packetSize, struct sockaddr_in const& fromAddress);
+ // Allows an outside party to inject an RTCP report (from other than the network interface)
+
+protected:
+ RTCPInstance(UsageEnvironment& env, Groupsock* RTPgs, unsigned totSessionBW,
+ unsigned char const* cname,
+ RTPSink* sink, RTPSource* source,
+ Boolean isSSMTransmitter,
+ SRTPCryptographicContext* crypto);
+ // called only by createNew()
+ virtual ~RTCPInstance();
+
+ virtual void noteArrivingRR(struct sockaddr_in const& fromAddressAndPort,
+ int tcpSocketNum, unsigned char tcpStreamChannelId);
+
+ void incomingReportHandler1();
+
+private:
+ // redefined virtual functions:
+ virtual Boolean isRTCPInstance() const;
+
+private:
+ Boolean addReport(Boolean alwaysAdd = False);
+ void addSR();
+ void addRR();
+ void enqueueCommonReportPrefix(unsigned char packetType, u_int32_t SSRC,
+ unsigned numExtraWords = 0);
+ void enqueueCommonReportSuffix();
+ void enqueueReportBlock(RTPReceptionStats* receptionStats);
+ void addSDES();
+ void addBYE(char const* reason);
+
+ void sendBuiltPacket();
+
+ static void onExpire(RTCPInstance* instance);
+ void onExpire1();
+
+ static void incomingReportHandler(RTCPInstance* instance, int /*mask*/);
+ void processIncomingReport(unsigned packetSize, struct sockaddr_in const& fromAddressAndPort,
+ int tcpSocketNum, unsigned char tcpStreamChannelId);
+ void onReceive(int typeOfPacket, int totPacketSize, u_int32_t ssrc);
+
+private:
+ u_int8_t* fInBuf;
+ unsigned fNumBytesAlreadyRead;
+ OutPacketBuffer* fOutBuf;
+ RTPInterface fRTCPInterface;
+ unsigned fTotSessionBW;
+ RTPSink* fSink;
+ RTPSource* fSource;
+ Boolean fIsSSMTransmitter;
+ SRTPCryptographicContext* fCrypto;
+
+ SDESItem fCNAME;
+ RTCPMemberDatabase* fKnownMembers;
+ unsigned fOutgoingReportCount; // used for SSRC member aging
+
+ double fAveRTCPSize;
+ int fIsInitial;
+ double fPrevReportTime;
+ double fNextReportTime;
+ int fPrevNumMembers;
+
+ int fLastSentSize;
+ int fLastReceivedSize;
+ u_int32_t fLastReceivedSSRC;
+ int fTypeOfEvent;
+ int fTypeOfPacket;
+ Boolean fHaveJustSentPacket;
+ unsigned fLastPacketSentSize;
+
+ TaskFunc* fByeHandlerTask;
+ ByeWithReasonHandlerFunc* fByeWithReasonHandlerTask;
+ void* fByeHandlerClientData;
+ Boolean fByeHandleActiveParticipantsOnly;
+ TaskFunc* fSRHandlerTask;
+ void* fSRHandlerClientData;
+ TaskFunc* fRRHandlerTask;
+ void* fRRHandlerClientData;
+ AddressPortLookupTable* fSpecificRRHandlerTable;
+ RTCPAppHandlerFunc* fAppHandlerTask;
+ void* fAppHandlerClientData;
+
+public: // because this stuff is used by an external "C" function
+ void schedule(double nextTime);
+ void reschedule(double nextTime);
+ void sendReport();
+ void sendBYE(char const* reason = NULL);
+ int typeOfEvent() {return fTypeOfEvent;}
+ int sentPacketSize() {return fLastSentSize;}
+ int packetType() {return fTypeOfPacket;}
+ int receivedPacketSize() {return fLastReceivedSize;}
+ int checkNewSSRC();
+ void removeLastReceivedSSRC();
+ void removeSSRC(u_int32_t ssrc, Boolean alsoRemoveStats);
+};
+
+// RTCP packet types:
+const unsigned char RTCP_PT_SR = 200;
+const unsigned char RTCP_PT_RR = 201;
+const unsigned char RTCP_PT_SDES = 202;
+const unsigned char RTCP_PT_BYE = 203;
+const unsigned char RTCP_PT_APP = 204;
+const unsigned char RTCP_PT_RTPFB = 205; // Generic RTP Feedback [RFC4585]
+const unsigned char RTCP_PT_PSFB = 206; // Payload-specific [RFC4585]
+const unsigned char RTCP_PT_XR = 207; // extended report [RFC3611]
+const unsigned char RTCP_PT_AVB = 208; // AVB RTCP packet ["Standard for Layer 3 Transport Protocol for Time Sensitive Applications in Local Area Networks." Work in progress.]
+const unsigned char RTCP_PT_RSI = 209; // Receiver Summary Information [RFC5760]
+const unsigned char RTCP_PT_TOKEN = 210; // Port Mapping [RFC6284]
+const unsigned char RTCP_PT_IDMS = 211; // IDMS Settings [RFC7272]
+
+// SDES tags:
+const unsigned char RTCP_SDES_END = 0;
+const unsigned char RTCP_SDES_CNAME = 1;
+const unsigned char RTCP_SDES_NAME = 2;
+const unsigned char RTCP_SDES_EMAIL = 3;
+const unsigned char RTCP_SDES_PHONE = 4;
+const unsigned char RTCP_SDES_LOC = 5;
+const unsigned char RTCP_SDES_TOOL = 6;
+const unsigned char RTCP_SDES_NOTE = 7;
+const unsigned char RTCP_SDES_PRIV = 8;
+
+#endif
diff --git a/liveMedia/include/RTPInterface.hh b/liveMedia/include/RTPInterface.hh
new file mode 100644
index 0000000..1b70094
--- /dev/null
+++ b/liveMedia/include/RTPInterface.hh
@@ -0,0 +1,109 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// An abstraction of a network interface used for RTP (or RTCP).
+// (This allows the RTP-over-TCP hack (RFC 2326, section 10.12) to
+// be implemented transparently.)
+// C++ header
+
+#ifndef _RTP_INTERFACE_HH
+#define _RTP_INTERFACE_HH
+
+#ifndef _MEDIA_HH
+#include <Media.hh>
+#endif
+#ifndef _GROUPSOCK_HH
+#include "Groupsock.hh"
+#endif
+
+// Typedef for an optional auxilliary handler function, to be called
+// when each new packet is read:
+typedef void AuxHandlerFunc(void* clientData, unsigned char* packet,
+ unsigned& packetSize);
+
+typedef void ServerRequestAlternativeByteHandler(void* instance, u_int8_t requestByte);
+// A hack that allows a handler for RTP/RTCP packets received over TCP to process RTSP commands that may also appear within
+// the same TCP connection. A RTSP server implementation would supply a function like this - as a parameter to
+// "ServerMediaSubsession::startStream()".
+
+class RTPInterface {
+public:
+ RTPInterface(Medium* owner, Groupsock* gs);
+ virtual ~RTPInterface();
+
+ Groupsock* gs() const { return fGS; }
+
+ void setStreamSocket(int sockNum, unsigned char streamChannelId);
+ void addStreamSocket(int sockNum, unsigned char streamChannelId);
+ void removeStreamSocket(int sockNum, unsigned char streamChannelId);
+ static void setServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum,
+ ServerRequestAlternativeByteHandler* handler, void* clientData);
+ static void clearServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum);
+
+ Boolean sendPacket(unsigned char* packet, unsigned packetSize);
+ void startNetworkReading(TaskScheduler::BackgroundHandlerProc*
+ handlerProc);
+ Boolean handleRead(unsigned char* buffer, unsigned bufferMaxSize,
+ // out parameters:
+ unsigned& bytesRead, struct sockaddr_in& fromAddress,
+ int& tcpSocketNum, unsigned char& tcpStreamChannelId,
+ Boolean& packetReadWasIncomplete);
+ // Note: If "tcpSocketNum" < 0, then the packet was received over UDP, and "tcpStreamChannelId"
+ // is undefined (and irrelevant).
+
+
+ // Otherwise (if "tcpSocketNum" >= 0), the packet was received (interleaved) over TCP, and
+ // "tcpStreamChannelId" will return the channel id.
+
+ void stopNetworkReading();
+
+ UsageEnvironment& envir() const { return fOwner->envir(); }
+
+ void setAuxilliaryReadHandler(AuxHandlerFunc* handlerFunc,
+ void* handlerClientData) {
+ fAuxReadHandlerFunc = handlerFunc;
+ fAuxReadHandlerClientData = handlerClientData;
+ }
+
+ void forgetOurGroupsock() { fGS = NULL; }
+ // This may be called - *only immediately prior* to deleting this - to prevent our destructor
+ // from turning off background reading on the 'groupsock'. (This is in case the 'groupsock'
+ // is also being read from elsewhere.)
+
+private:
+ // Helper functions for sending a RTP or RTCP packet over a TCP connection:
+ Boolean sendRTPorRTCPPacketOverTCP(unsigned char* packet, unsigned packetSize,
+ int socketNum, unsigned char streamChannelId);
+ Boolean sendDataOverTCP(int socketNum, u_int8_t const* data, unsigned dataSize, Boolean forceSendToSucceed);
+
+private:
+ friend class SocketDescriptor;
+ Medium* fOwner;
+ Groupsock* fGS;
+ class tcpStreamRecord* fTCPStreams; // optional, for RTP-over-TCP streaming/receiving
+
+ unsigned short fNextTCPReadSize;
+ // how much data (if any) is available to be read from the TCP stream
+ int fNextTCPReadStreamSocketNum;
+ unsigned char fNextTCPReadStreamChannelId;
+ TaskScheduler::BackgroundHandlerProc* fReadHandlerProc; // if any
+
+ AuxHandlerFunc* fAuxReadHandlerFunc;
+ void* fAuxReadHandlerClientData;
+};
+
+#endif
diff --git a/liveMedia/include/RTPSink.hh b/liveMedia/include/RTPSink.hh
new file mode 100644
index 0000000..6361c2e
--- /dev/null
+++ b/liveMedia/include/RTPSink.hh
@@ -0,0 +1,233 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP Sinks
+// C++ header
+
+#ifndef _RTP_SINK_HH
+#define _RTP_SINK_HH
+
+#ifndef _MEDIA_SINK_HH
+#include "MediaSink.hh"
+#endif
+#ifndef _RTP_INTERFACE_HH
+#include "RTPInterface.hh"
+#endif
+
+class RTPTransmissionStatsDB; // forward
+
+class RTPSink: public MediaSink {
+public:
+ static Boolean lookupByName(UsageEnvironment& env, char const* sinkName,
+ RTPSink*& resultSink);
+
+ // used by RTSP servers:
+ Groupsock const& groupsockBeingUsed() const { return *(fRTPInterface.gs()); }
+ Groupsock& groupsockBeingUsed() { return *(fRTPInterface.gs()); }
+
+ unsigned char rtpPayloadType() const { return fRTPPayloadType; }
+ unsigned rtpTimestampFrequency() const { return fTimestampFrequency; }
+ void setRTPTimestampFrequency(unsigned freq) {
+ fTimestampFrequency = freq;
+ }
+ char const* rtpPayloadFormatName() const {return fRTPPayloadFormatName;}
+
+ unsigned numChannels() const { return fNumChannels; }
+
+ virtual char const* sdpMediaType() const; // for use in SDP m= lines
+ virtual char* rtpmapLine() const; // returns a string to be delete[]d
+ virtual char const* auxSDPLine();
+ // optional SDP line (e.g. a=fmtp:...)
+
+ u_int16_t currentSeqNo() const { return fSeqNo; }
+ u_int32_t presetNextTimestamp();
+ // ensures that the next timestamp to be used will correspond to
+ // the current 'wall clock' time.
+
+ RTPTransmissionStatsDB& transmissionStatsDB() const {
+ return *fTransmissionStatsDB;
+ }
+
+ Boolean nextTimestampHasBeenPreset() const { return fNextTimestampHasBeenPreset; }
+ Boolean& enableRTCPReports() { return fEnableRTCPReports; }
+
+ void getTotalBitrate(unsigned& outNumBytes, double& outElapsedTime);
+ // returns the number of bytes sent since the last time that we
+ // were called, and resets the counter.
+
+ struct timeval const& creationTime() const { return fCreationTime; }
+ struct timeval const& initialPresentationTime() const { return fInitialPresentationTime; }
+ struct timeval const& mostRecentPresentationTime() const { return fMostRecentPresentationTime; }
+ void resetPresentationTimes();
+
+ // Hacks to allow sending RTP over TCP (RFC 2236, section 10.12):
+ void setStreamSocket(int sockNum, unsigned char streamChannelId) {
+ fRTPInterface.setStreamSocket(sockNum, streamChannelId);
+ }
+ void addStreamSocket(int sockNum, unsigned char streamChannelId) {
+ fRTPInterface.addStreamSocket(sockNum, streamChannelId);
+ }
+ void removeStreamSocket(int sockNum, unsigned char streamChannelId) {
+ fRTPInterface.removeStreamSocket(sockNum, streamChannelId);
+ }
+ unsigned& estimatedBitrate() { return fEstimatedBitrate; } // kbps; usually 0 (i.e., unset)
+
+ u_int32_t SSRC() const {return fSSRC;}
+ // later need a means of changing the SSRC if there's a collision #####
+
+protected:
+ RTPSink(UsageEnvironment& env,
+ Groupsock* rtpGS, unsigned char rtpPayloadType,
+ u_int32_t rtpTimestampFrequency,
+ char const* rtpPayloadFormatName,
+ unsigned numChannels);
+ // abstract base class
+
+ virtual ~RTPSink();
+
+ // used by RTCP:
+ friend class RTCPInstance;
+ friend class RTPTransmissionStats;
+ u_int32_t convertToRTPTimestamp(struct timeval tv);
+ unsigned packetCount() const {return fPacketCount;}
+ unsigned octetCount() const {return fOctetCount;}
+
+protected:
+ RTPInterface fRTPInterface;
+ unsigned char fRTPPayloadType;
+ unsigned fPacketCount, fOctetCount, fTotalOctetCount /*incl RTP hdr*/;
+ struct timeval fTotalOctetCountStartTime, fInitialPresentationTime, fMostRecentPresentationTime;
+ u_int32_t fCurrentTimestamp;
+ u_int16_t fSeqNo;
+
+private:
+ // redefined virtual functions:
+ virtual Boolean isRTPSink() const;
+
+private:
+ u_int32_t fSSRC, fTimestampBase;
+ unsigned fTimestampFrequency;
+ Boolean fNextTimestampHasBeenPreset;
+ Boolean fEnableRTCPReports; // whether RTCP "SR" reports should be sent for this sink (default: True)
+ char const* fRTPPayloadFormatName;
+ unsigned fNumChannels;
+ struct timeval fCreationTime;
+ unsigned fEstimatedBitrate; // set on creation if known; otherwise 0
+
+ RTPTransmissionStatsDB* fTransmissionStatsDB;
+};
+
+
+class RTPTransmissionStats; // forward
+
+class RTPTransmissionStatsDB {
+public:
+ unsigned numReceivers() const { return fNumReceivers; }
+
+ class Iterator {
+ public:
+ Iterator(RTPTransmissionStatsDB& receptionStatsDB);
+ virtual ~Iterator();
+
+ RTPTransmissionStats* next();
+ // NULL if none
+
+ private:
+ HashTable::Iterator* fIter;
+ };
+
+ // The following is called whenever a RTCP RR packet is received:
+ void noteIncomingRR(u_int32_t SSRC, struct sockaddr_in const& lastFromAddress,
+ unsigned lossStats, unsigned lastPacketNumReceived,
+ unsigned jitter, unsigned lastSRTime, unsigned diffSR_RRTime);
+
+ // The following is called when a RTCP BYE packet is received:
+ void removeRecord(u_int32_t SSRC);
+
+ RTPTransmissionStats* lookup(u_int32_t SSRC) const;
+
+private: // constructor and destructor, called only by RTPSink:
+ friend class RTPSink;
+ RTPTransmissionStatsDB(RTPSink& rtpSink);
+ virtual ~RTPTransmissionStatsDB();
+
+private:
+ void add(u_int32_t SSRC, RTPTransmissionStats* stats);
+
+private:
+ friend class Iterator;
+ unsigned fNumReceivers;
+ RTPSink& fOurRTPSink;
+ HashTable* fTable;
+};
+
+class RTPTransmissionStats {
+public:
+ u_int32_t SSRC() const {return fSSRC;}
+ struct sockaddr_in const& lastFromAddress() const {return fLastFromAddress;}
+ unsigned lastPacketNumReceived() const {return fLastPacketNumReceived;}
+ unsigned firstPacketNumReported() const {return fFirstPacketNumReported;}
+ unsigned totNumPacketsLost() const {return fTotNumPacketsLost;}
+ unsigned jitter() const {return fJitter;}
+ unsigned lastSRTime() const { return fLastSRTime; }
+ unsigned diffSR_RRTime() const { return fDiffSR_RRTime; }
+ unsigned roundTripDelay() const;
+ // The round-trip delay (in units of 1/65536 seconds) computed from
+ // the most recently-received RTCP RR packet.
+ struct timeval const& timeCreated() const {return fTimeCreated;}
+ struct timeval const& lastTimeReceived() const {return fTimeReceived;}
+ void getTotalOctetCount(u_int32_t& hi, u_int32_t& lo);
+ void getTotalPacketCount(u_int32_t& hi, u_int32_t& lo);
+
+ // Information which requires at least two RRs to have been received:
+ unsigned packetsReceivedSinceLastRR() const;
+ u_int8_t packetLossRatio() const { return fPacketLossRatio; }
+ // as an 8-bit fixed-point number
+ int packetsLostBetweenRR() const;
+
+private:
+ // called only by RTPTransmissionStatsDB:
+ friend class RTPTransmissionStatsDB;
+ RTPTransmissionStats(RTPSink& rtpSink, u_int32_t SSRC);
+ virtual ~RTPTransmissionStats();
+
+ void noteIncomingRR(struct sockaddr_in const& lastFromAddress,
+ unsigned lossStats, unsigned lastPacketNumReceived,
+ unsigned jitter,
+ unsigned lastSRTime, unsigned diffSR_RRTime);
+
+private:
+ RTPSink& fOurRTPSink;
+ u_int32_t fSSRC;
+ struct sockaddr_in fLastFromAddress;
+ unsigned fLastPacketNumReceived;
+ u_int8_t fPacketLossRatio;
+ unsigned fTotNumPacketsLost;
+ unsigned fJitter;
+ unsigned fLastSRTime;
+ unsigned fDiffSR_RRTime;
+ struct timeval fTimeCreated, fTimeReceived;
+ Boolean fAtLeastTwoRRsHaveBeenReceived;
+ unsigned fOldLastPacketNumReceived;
+ unsigned fOldTotNumPacketsLost;
+ Boolean fFirstPacket;
+ unsigned fFirstPacketNumReported;
+ u_int32_t fLastOctetCount, fTotalOctetCount_hi, fTotalOctetCount_lo;
+ u_int32_t fLastPacketCount, fTotalPacketCount_hi, fTotalPacketCount_lo;
+};
+
+#endif
diff --git a/liveMedia/include/RTPSource.hh b/liveMedia/include/RTPSource.hh
new file mode 100644
index 0000000..82ba4c5
--- /dev/null
+++ b/liveMedia/include/RTPSource.hh
@@ -0,0 +1,271 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP Sources
+// C++ header
+
+#ifndef _RTP_SOURCE_HH
+#define _RTP_SOURCE_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+#ifndef _RTP_INTERFACE_HH
+#include "RTPInterface.hh"
+#endif
+#ifndef _SRTP_CRYPTOGRAPHIC_CONTEXT_HH
+#include "SRTPCryptographicContext.hh"
+#endif
+
+class RTPReceptionStatsDB; // forward
+
+class RTPSource: public FramedSource {
+public:
+ static Boolean lookupByName(UsageEnvironment& env, char const* sourceName,
+ RTPSource*& resultSource);
+
+ Boolean curPacketMarkerBit() const { return fCurPacketMarkerBit; }
+
+ unsigned char rtpPayloadFormat() const { return fRTPPayloadFormat; }
+
+ virtual Boolean hasBeenSynchronizedUsingRTCP();
+
+ Groupsock* RTPgs() const { return fRTPInterface.gs(); }
+
+ virtual void setPacketReorderingThresholdTime(unsigned uSeconds) = 0;
+
+ void setCrypto(SRTPCryptographicContext* crypto) { fCrypto = crypto; }
+
+ // used by RTCP:
+ u_int32_t SSRC() const { return fSSRC; }
+ // Note: This is *our* SSRC, not the SSRC in incoming RTP packets.
+ // later need a means of changing the SSRC if there's a collision #####
+ void registerForMultiplexedRTCPPackets(class RTCPInstance* rtcpInstance) {
+ fRTCPInstanceForMultiplexedRTCPPackets = rtcpInstance;
+ }
+ void deregisterForMultiplexedRTCPPackets() { registerForMultiplexedRTCPPackets(NULL); }
+
+ unsigned timestampFrequency() const {return fTimestampFrequency;}
+
+ RTPReceptionStatsDB& receptionStatsDB() const {
+ return *fReceptionStatsDB;
+ }
+
+ u_int32_t lastReceivedSSRC() const { return fLastReceivedSSRC; }
+ // Note: This is the SSRC in the most recently received RTP packet; not *our* SSRC
+
+ Boolean& enableRTCPReports() { return fEnableRTCPReports; }
+ Boolean const& enableRTCPReports() const { return fEnableRTCPReports; }
+
+ void setStreamSocket(int sockNum, unsigned char streamChannelId) {
+ // hack to allow sending RTP over TCP (RFC 2236, section 10.12)
+ fRTPInterface.setStreamSocket(sockNum, streamChannelId);
+ }
+
+ void setAuxilliaryReadHandler(AuxHandlerFunc* handlerFunc,
+ void* handlerClientData) {
+ fRTPInterface.setAuxilliaryReadHandler(handlerFunc,
+ handlerClientData);
+ }
+
+ // Note that RTP receivers will usually not need to call either of the following two functions, because
+ // RTP sequence numbers and timestamps are usually not useful to receivers.
+ // (Our implementation of RTP reception already does all needed handling of RTP sequence numbers and timestamps.)
+ u_int16_t curPacketRTPSeqNum() const { return fCurPacketRTPSeqNum; }
+ u_int32_t curPacketRTPTimestamp() const { return fCurPacketRTPTimestamp; }
+
+protected:
+ RTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat, u_int32_t rtpTimestampFrequency);
+ // abstract base class
+ virtual ~RTPSource();
+
+protected:
+ RTPInterface fRTPInterface;
+ u_int16_t fCurPacketRTPSeqNum;
+ u_int32_t fCurPacketRTPTimestamp;
+ Boolean fCurPacketMarkerBit;
+ Boolean fCurPacketHasBeenSynchronizedUsingRTCP;
+ u_int32_t fLastReceivedSSRC;
+ class RTCPInstance* fRTCPInstanceForMultiplexedRTCPPackets;
+ SRTPCryptographicContext* fCrypto;
+
+private:
+ // redefined virtual functions:
+ virtual Boolean isRTPSource() const;
+ virtual void getAttributes() const;
+
+private:
+ unsigned char fRTPPayloadFormat;
+ unsigned fTimestampFrequency;
+ u_int32_t fSSRC;
+ Boolean fEnableRTCPReports; // whether RTCP "RR" reports should be sent for this source (default: True)
+
+ RTPReceptionStatsDB* fReceptionStatsDB;
+};
+
+
+class RTPReceptionStats; // forward
+
+class RTPReceptionStatsDB {
+public:
+ unsigned totNumPacketsReceived() const { return fTotNumPacketsReceived; }
+ unsigned numActiveSourcesSinceLastReset() const {
+ return fNumActiveSourcesSinceLastReset;
+ }
+
+ void reset();
+ // resets periodic stats (called each time they're used to
+ // generate a reception report)
+
+ class Iterator {
+ public:
+ Iterator(RTPReceptionStatsDB& receptionStatsDB);
+ virtual ~Iterator();
+
+ RTPReceptionStats* next(Boolean includeInactiveSources = False);
+ // NULL if none
+
+ private:
+ HashTable::Iterator* fIter;
+ };
+
+ // The following is called whenever a RTP packet is received:
+ void noteIncomingPacket(u_int32_t SSRC, u_int16_t seqNum,
+ u_int32_t rtpTimestamp,
+ unsigned timestampFrequency,
+ Boolean useForJitterCalculation,
+ struct timeval& resultPresentationTime,
+ Boolean& resultHasBeenSyncedUsingRTCP,
+ unsigned packetSize /* payload only */);
+
+ // The following is called whenever a RTCP SR packet is received:
+ void noteIncomingSR(u_int32_t SSRC,
+ u_int32_t ntpTimestampMSW, u_int32_t ntpTimestampLSW,
+ u_int32_t rtpTimestamp);
+
+ // The following is called when a RTCP BYE packet is received:
+ void removeRecord(u_int32_t SSRC);
+
+ RTPReceptionStats* lookup(u_int32_t SSRC) const;
+
+protected: // constructor and destructor, called only by RTPSource:
+ friend class RTPSource;
+ RTPReceptionStatsDB();
+ virtual ~RTPReceptionStatsDB();
+
+protected:
+ void add(u_int32_t SSRC, RTPReceptionStats* stats);
+
+protected:
+ friend class Iterator;
+ unsigned fNumActiveSourcesSinceLastReset;
+
+private:
+ HashTable* fTable;
+ unsigned fTotNumPacketsReceived; // for all SSRCs
+};
+
+class RTPReceptionStats {
+public:
+ u_int32_t SSRC() const { return fSSRC; }
+ unsigned numPacketsReceivedSinceLastReset() const {
+ return fNumPacketsReceivedSinceLastReset;
+ }
+ unsigned totNumPacketsReceived() const { return fTotNumPacketsReceived; }
+ double totNumKBytesReceived() const;
+
+ unsigned totNumPacketsExpected() const {
+ return (fHighestExtSeqNumReceived - fBaseExtSeqNumReceived) + 1;
+ }
+
+ unsigned baseExtSeqNumReceived() const { return fBaseExtSeqNumReceived; }
+ unsigned lastResetExtSeqNumReceived() const {
+ return fLastResetExtSeqNumReceived;
+ }
+ unsigned highestExtSeqNumReceived() const {
+ return fHighestExtSeqNumReceived;
+ }
+
+ unsigned jitter() const;
+
+ unsigned lastReceivedSR_NTPmsw() const { return fLastReceivedSR_NTPmsw; }
+ unsigned lastReceivedSR_NTPlsw() const { return fLastReceivedSR_NTPlsw; }
+ struct timeval const& lastReceivedSR_time() const {
+ return fLastReceivedSR_time;
+ }
+
+ unsigned minInterPacketGapUS() const { return fMinInterPacketGapUS; }
+ unsigned maxInterPacketGapUS() const { return fMaxInterPacketGapUS; }
+ struct timeval const& totalInterPacketGaps() const {
+ return fTotalInterPacketGaps;
+ }
+
+protected:
+ // called only by RTPReceptionStatsDB:
+ friend class RTPReceptionStatsDB;
+ RTPReceptionStats(u_int32_t SSRC, u_int16_t initialSeqNum);
+ RTPReceptionStats(u_int32_t SSRC);
+ virtual ~RTPReceptionStats();
+
+private:
+ void noteIncomingPacket(u_int16_t seqNum, u_int32_t rtpTimestamp,
+ unsigned timestampFrequency,
+ Boolean useForJitterCalculation,
+ struct timeval& resultPresentationTime,
+ Boolean& resultHasBeenSyncedUsingRTCP,
+ unsigned packetSize /* payload only */);
+ void noteIncomingSR(u_int32_t ntpTimestampMSW, u_int32_t ntpTimestampLSW,
+ u_int32_t rtpTimestamp);
+ void init(u_int32_t SSRC);
+ void initSeqNum(u_int16_t initialSeqNum);
+ void reset();
+ // resets periodic stats (called each time they're used to
+ // generate a reception report)
+
+protected:
+ u_int32_t fSSRC;
+ unsigned fNumPacketsReceivedSinceLastReset;
+ unsigned fTotNumPacketsReceived;
+ u_int32_t fTotBytesReceived_hi, fTotBytesReceived_lo;
+ Boolean fHaveSeenInitialSequenceNumber;
+ unsigned fBaseExtSeqNumReceived;
+ unsigned fLastResetExtSeqNumReceived;
+ unsigned fHighestExtSeqNumReceived;
+ int fLastTransit; // used in the jitter calculation
+ u_int32_t fPreviousPacketRTPTimestamp;
+ double fJitter;
+ // The following are recorded whenever we receive a RTCP SR for this SSRC:
+ unsigned fLastReceivedSR_NTPmsw; // NTP timestamp (from SR), most-signif
+ unsigned fLastReceivedSR_NTPlsw; // NTP timestamp (from SR), least-signif
+ struct timeval fLastReceivedSR_time;
+ struct timeval fLastPacketReceptionTime;
+ unsigned fMinInterPacketGapUS, fMaxInterPacketGapUS;
+ struct timeval fTotalInterPacketGaps;
+
+private:
+ // Used to convert from RTP timestamp to 'wall clock' time:
+ Boolean fHasBeenSynchronized;
+ u_int32_t fSyncTimestamp;
+ struct timeval fSyncTime;
+};
+
+
+Boolean seqNumLT(u_int16_t s1, u_int16_t s2);
+ // a 'less-than' on 16-bit sequence numbers
+
+#endif
diff --git a/liveMedia/include/RTSPClient.hh b/liveMedia/include/RTSPClient.hh
new file mode 100644
index 0000000..e398a75
--- /dev/null
+++ b/liveMedia/include/RTSPClient.hh
@@ -0,0 +1,408 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic RTSP client - for a single "rtsp://" URL
+// C++ header
+
+#ifndef _RTSP_CLIENT_HH
+#define _RTSP_CLIENT_HH
+
+#ifndef _MEDIA_SESSION_HH
+#include "MediaSession.hh"
+#endif
+#ifndef _NET_ADDRESS_HH
+#include "NetAddress.hh"
+#endif
+#ifndef _DIGEST_AUTHENTICATION_HH
+#include "DigestAuthentication.hh"
+#endif
+#ifndef _TLS_STATE_HH
+#include "TLSState.hh"
+#endif
+#ifndef OMIT_REGISTER_HANDLING
+#ifndef _RTSP_SERVER_HH
+#include "RTSPServer.hh" // For the optional "HandlerForREGISTERCommand" mini-server
+#endif
+#endif
+
+class RTSPClient: public Medium {
+public:
+ static RTSPClient* createNew(UsageEnvironment& env, char const* rtspURL,
+ int verbosityLevel = 0,
+ char const* applicationName = NULL,
+ portNumBits tunnelOverHTTPPortNum = 0,
+ int socketNumToServer = -1);
+ // If "tunnelOverHTTPPortNum" is non-zero, we tunnel RTSP (and RTP)
+ // over a HTTP connection with the given port number, using the technique
+ // described in Apple's document <http://developer.apple.com/documentation/QuickTime/QTSS/Concepts/chapter_2_section_14.html>
+ // If "socketNumToServer" is >= 0, then it is the socket number of an already-existing TCP connection to the server.
+ // (In this case, "rtspURL" must point to the socket's endpoint, so that it can be accessed via the socket.)
+
+ typedef void (responseHandler)(RTSPClient* rtspClient,
+ int resultCode, char* resultString);
+ // A function that is called in response to a RTSP command. The parameters are as follows:
+ // "rtspClient": The "RTSPClient" object on which the original command was issued.
+ // "resultCode": If zero, then the command completed successfully. If non-zero, then the command did not complete
+ // successfully, and "resultCode" indicates the error, as follows:
+ // A positive "resultCode" is a RTSP error code (for example, 404 means "not found")
+ // A negative "resultCode" indicates a socket/network error; 0-"resultCode" is the standard "errno" code.
+ // "resultString": A ('\0'-terminated) string returned along with the response, or else NULL.
+ // In particular:
+ // "resultString" for a successful "DESCRIBE" command will be the media session's SDP description.
+ // "resultString" for a successful "OPTIONS" command will be a list of allowed commands.
+ // Note that this string can be present (i.e., not NULL) even if "resultCode" is non-zero - i.e., an error message.
+ // Also, "resultString" can be NULL, even if "resultCode" is zero (e.g., if the RTSP command succeeded, but without
+ // including an appropriate result header).
+ // Note also that this string is dynamically allocated, and must be freed by the handler (or the caller)
+ // - using "delete[]".
+
+ unsigned sendDescribeCommand(responseHandler* responseHandler, Authenticator* authenticator = NULL);
+ // Issues a RTSP "DESCRIBE" command, then returns the "CSeq" sequence number that was used in the command.
+ // The (programmer-supplied) "responseHandler" function is called later to handle the response
+ // (or is called immediately - with an error code - if the command cannot be sent).
+ // "authenticator" (optional) is used for access control. If you have username and password strings, you can use this by
+ // passing an actual parameter that you created by creating an "Authenticator(username, password) object".
+ // (Note that if you supply a non-NULL "authenticator" parameter, you need do this only for the first command you send.)
+
+ unsigned sendOptionsCommand(responseHandler* responseHandler, Authenticator* authenticator = NULL);
+ // Issues a RTSP "OPTIONS" command, then returns the "CSeq" sequence number that was used in the command.
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+
+ unsigned sendAnnounceCommand(char const* sdpDescription, responseHandler* responseHandler, Authenticator* authenticator = NULL);
+ // Issues a RTSP "ANNOUNCE" command (with "sdpDescription" as parameter),
+ // then returns the "CSeq" sequence number that was used in the command.
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+
+ unsigned sendSetupCommand(MediaSubsession& subsession, responseHandler* responseHandler,
+ Boolean streamOutgoing = False,
+ Boolean streamUsingTCP = False,
+ Boolean forceMulticastOnUnspecified = False,
+ Authenticator* authenticator = NULL);
+ // Issues a RTSP "SETUP" command, then returns the "CSeq" sequence number that was used in the command.
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+
+ unsigned sendPlayCommand(MediaSession& session, responseHandler* responseHandler,
+ double start = 0.0f, double end = -1.0f, float scale = 1.0f,
+ Authenticator* authenticator = NULL);
+ // Issues an aggregate RTSP "PLAY" command on "session", then returns the "CSeq" sequence number that was used in the command.
+ // (Note: start=-1 means 'resume'; end=-1 means 'play to end')
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+ unsigned sendPlayCommand(MediaSubsession& subsession, responseHandler* responseHandler,
+ double start = 0.0f, double end = -1.0f, float scale = 1.0f,
+ Authenticator* authenticator = NULL);
+ // Issues a RTSP "PLAY" command on "subsession", then returns the "CSeq" sequence number that was used in the command.
+ // (Note: start=-1 means 'resume'; end=-1 means 'play to end')
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+
+ // Alternative forms of "sendPlayCommand()", used to send "PLAY" commands that include an 'absolute' time range:
+ // (The "absStartTime" string (and "absEndTime" string, if present) *must* be of the form
+ // "YYYYMMDDTHHMMSSZ" or "YYYYMMDDTHHMMSS.<frac>Z")
+ unsigned sendPlayCommand(MediaSession& session, responseHandler* responseHandler,
+ char const* absStartTime, char const* absEndTime = NULL, float scale = 1.0f,
+ Authenticator* authenticator = NULL);
+ unsigned sendPlayCommand(MediaSubsession& subsession, responseHandler* responseHandler,
+ char const* absStartTime, char const* absEndTime = NULL, float scale = 1.0f,
+ Authenticator* authenticator = NULL);
+
+ unsigned sendPauseCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator = NULL);
+ // Issues an aggregate RTSP "PAUSE" command on "session", then returns the "CSeq" sequence number that was used in the command.
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+ unsigned sendPauseCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator = NULL);
+ // Issues a RTSP "PAUSE" command on "subsession", then returns the "CSeq" sequence number that was used in the command.
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+
+ unsigned sendRecordCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator = NULL);
+ // Issues an aggregate RTSP "RECORD" command on "session", then returns the "CSeq" sequence number that was used in the command.
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+ unsigned sendRecordCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator = NULL);
+ // Issues a RTSP "RECORD" command on "subsession", then returns the "CSeq" sequence number that was used in the command.
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+
+ unsigned sendTeardownCommand(MediaSession& session, responseHandler* responseHandler, Authenticator* authenticator = NULL);
+ // Issues an aggregate RTSP "TEARDOWN" command on "session", then returns the "CSeq" sequence number that was used in the command.
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+ unsigned sendTeardownCommand(MediaSubsession& subsession, responseHandler* responseHandler, Authenticator* authenticator = NULL);
+ // Issues a RTSP "TEARDOWN" command on "subsession", then returns the "CSeq" sequence number that was used in the command.
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+
+ unsigned sendSetParameterCommand(MediaSession& session, responseHandler* responseHandler,
+ char const* parameterName, char const* parameterValue,
+ Authenticator* authenticator = NULL);
+ // Issues an aggregate RTSP "SET_PARAMETER" command on "session", then returns the "CSeq" sequence number that was used in the command.
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+
+ unsigned sendGetParameterCommand(MediaSession& session, responseHandler* responseHandler, char const* parameterName,
+ Authenticator* authenticator = NULL);
+ // Issues an aggregate RTSP "GET_PARAMETER" command on "session", then returns the "CSeq" sequence number that was used in the command.
+ // (The "responseHandler" and "authenticator" parameters are as described for "sendDescribeCommand".)
+
+ void sendDummyUDPPackets(MediaSession& session, unsigned numDummyPackets = 2);
+ void sendDummyUDPPackets(MediaSubsession& subsession, unsigned numDummyPackets = 2);
+ // Sends short 'dummy' (i.e., non-RTP or RTCP) UDP packets towards the server, to increase
+ // the likelihood of RTP/RTCP packets from the server reaching us if we're behind a NAT.
+ // (If we requested RTP-over-TCP streaming, then these functions have no effect.)
+ // Our implementation automatically does this just prior to sending each "PLAY" command;
+ // You should not call these functions yourself unless you know what you're doing.
+
+ void setSpeed(MediaSession& session, float speed = 1.0f);
+ // Set (recorded) media download speed to given value to support faster download using 'Speed:'
+ // option on 'PLAY' command.
+
+ Boolean changeResponseHandler(unsigned cseq, responseHandler* newResponseHandler);
+ // Changes the response handler for the previously-performed command (whose operation returned "cseq").
+ // (To turn off any response handling for the command, use a "newResponseHandler" value of NULL. This might be done as part
+ // of an implementation of a 'timeout handler' on the command, for example.)
+ // This function returns True iff "cseq" was for a valid previously-performed command (whose response is still unhandled).
+
+ int socketNum() const { return fInputSocketNum; }
+
+ static Boolean lookupByName(UsageEnvironment& env,
+ char const* sourceName,
+ RTSPClient*& resultClient);
+
+ Boolean parseRTSPURL(char const* url,
+ char*& username, char*& password, NetAddress& address, portNumBits& portNum, char const** urlSuffix = NULL);
+ // Parses "url" as "rtsp://[<username>[:<password>]@]<server-address-or-name>[:<port>][/<stream-name>]"
+ // (Note that the returned "username" and "password" are either NULL, or heap-allocated strings that the caller must later delete[].)
+
+ void setUserAgentString(char const* userAgentName);
+ // sets an alternative string to be used in RTSP "User-Agent:" headers
+
+ void disallowBasicAuthentication() { fAllowBasicAuthentication = False; }
+ // call this if you don't want the server to request 'Basic' authentication
+ // (which would cause the client to send usernames and passwords over the net).
+
+ unsigned sessionTimeoutParameter() const { return fSessionTimeoutParameter; }
+
+ char const* url() const { return fBaseURL; }
+
+ void useTLS() { fTLS.isNeeded = True; }
+
+ static unsigned responseBufferSize;
+
+public: // Some compilers complain if this is "private:"
+ // The state of a request-in-progress:
+ class RequestRecord {
+ public:
+ RequestRecord(unsigned cseq, char const* commandName, responseHandler* handler,
+ MediaSession* session = NULL, MediaSubsession* subsession = NULL, u_int32_t booleanFlags = 0,
+ double start = 0.0f, double end = -1.0f, float scale = 1.0f, char const* contentStr = NULL);
+ RequestRecord(unsigned cseq, responseHandler* handler,
+ char const* absStartTime, char const* absEndTime = NULL, float scale = 1.0f,
+ MediaSession* session = NULL, MediaSubsession* subsession = NULL);
+ // alternative constructor for creating "PLAY" requests that include 'absolute' time values
+ virtual ~RequestRecord();
+
+ RequestRecord*& next() { return fNext; }
+ unsigned& cseq() { return fCSeq; }
+ char const* commandName() const { return fCommandName; }
+ MediaSession* session() const { return fSession; }
+ MediaSubsession* subsession() const { return fSubsession; }
+ u_int32_t booleanFlags() const { return fBooleanFlags; }
+ double start() const { return fStart; }
+ double end() const { return fEnd; }
+ char const* absStartTime() const { return fAbsStartTime; }
+ char const* absEndTime() const { return fAbsEndTime; }
+ float scale() const { return fScale; }
+ char* contentStr() const { return fContentStr; }
+ responseHandler*& handler() { return fHandler; }
+
+ private:
+ RequestRecord* fNext;
+ unsigned fCSeq;
+ char const* fCommandName;
+ MediaSession* fSession;
+ MediaSubsession* fSubsession;
+ u_int32_t fBooleanFlags;
+ double fStart, fEnd;
+ char *fAbsStartTime, *fAbsEndTime; // used for optional 'absolute' (i.e., "time=") range specifications
+ float fScale;
+ char* fContentStr;
+ responseHandler* fHandler;
+ };
+
+protected:
+ RTSPClient(UsageEnvironment& env, char const* rtspURL,
+ int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum, int socketNumToServer);
+ // called only by createNew();
+ virtual ~RTSPClient();
+
+ void reset();
+ void setBaseURL(char const* url);
+ int grabSocket(); // allows a subclass to reuse our input socket, so that it won't get closed when we're deleted
+ virtual unsigned sendRequest(RequestRecord* request);
+ virtual Boolean setRequestFields(RequestRecord* request,
+ char*& cmdURL, Boolean& cmdURLWasAllocated,
+ char const*& protocolStr,
+ char*& extraHeaders, Boolean& extraHeadersWereAllocated);
+ // used to implement "sendRequest()"; subclasses may reimplement this (e.g., when implementing a new command name)
+ virtual int connectToServer(int socketNum, portNumBits remotePortNum); // used to implement "openConnection()"; result values: -1: failure; 0: pending; 1: success
+
+private: // redefined virtual functions
+ virtual Boolean isRTSPClient() const;
+
+private:
+ class RequestQueue {
+ public:
+ RequestQueue();
+ RequestQueue(RequestQueue& origQueue); // moves the queue contents to the new queue
+ virtual ~RequestQueue();
+
+ void enqueue(RequestRecord* request); // "request" must not be NULL
+ RequestRecord* dequeue();
+ void putAtHead(RequestRecord* request); // "request" must not be NULL
+ RequestRecord* findByCSeq(unsigned cseq);
+ Boolean isEmpty() const { return fHead == NULL; }
+ void reset();
+
+ private:
+ RequestRecord* fHead;
+ RequestRecord* fTail;
+ };
+
+ void resetTCPSockets();
+ void resetResponseBuffer();
+ int openConnection(); // result values: -1: failure; 0: pending; 1: success
+ char* createAuthenticatorString(char const* cmd, char const* url);
+ char* createBlocksizeString(Boolean streamUsingTCP);
+ char* createKeyMgmtString(char const* url, MediaSubsession const& subsession);
+ void handleRequestError(RequestRecord* request);
+ Boolean parseResponseCode(char const* line, unsigned& responseCode, char const*& responseString);
+ void handleIncomingRequest();
+ static Boolean checkForHeader(char const* line, char const* headerName, unsigned headerNameLength, char const*& headerParams);
+ Boolean parseTransportParams(char const* paramsStr,
+ char*& serverAddressStr, portNumBits& serverPortNum,
+ unsigned char& rtpChannelId, unsigned char& rtcpChannelId);
+ Boolean parseScaleParam(char const* paramStr, float& scale);
+ Boolean parseSpeedParam(char const* paramStr, float& speed);
+ Boolean parseRTPInfoParams(char const*& paramStr, u_int16_t& seqNum, u_int32_t& timestamp);
+ Boolean handleSETUPResponse(MediaSubsession& subsession, char const* sessionParamsStr, char const* transportParamsStr,
+ Boolean streamUsingTCP);
+ Boolean handlePLAYResponse(MediaSession* session, MediaSubsession* subsession,
+ char const* scaleParamsStr, const char* speedParamsStr,
+ char const* rangeParamsStr, char const* rtpInfoParamsStr);
+ Boolean handleTEARDOWNResponse(MediaSession& session, MediaSubsession& subsession);
+ Boolean handleGET_PARAMETERResponse(char const* parameterName, char*& resultValueString, char* resultValueStringEnd);
+ Boolean handleAuthenticationFailure(char const* wwwAuthenticateParamsStr);
+ Boolean resendCommand(RequestRecord* request);
+ char const* sessionURL(MediaSession const& session) const;
+ static void handleAlternativeRequestByte(void*, u_int8_t requestByte);
+ void handleAlternativeRequestByte1(u_int8_t requestByte);
+ void constructSubsessionURL(MediaSubsession const& subsession,
+ char const*& prefix,
+ char const*& separator,
+ char const*& suffix);
+
+ // Support for tunneling RTSP-over-HTTP:
+ Boolean setupHTTPTunneling1(); // send the HTTP "GET"
+ static void responseHandlerForHTTP_GET(RTSPClient* rtspClient, int responseCode, char* responseString);
+ void responseHandlerForHTTP_GET1(int responseCode, char* responseString);
+ Boolean setupHTTPTunneling2(); // send the HTTP "POST"
+
+ // Support for asynchronous connections to the server:
+ static void connectionHandler(void*, int /*mask*/);
+ void connectionHandler1();
+
+ // Support for handling data sent back by a server:
+ static void incomingDataHandler(void*, int /*mask*/);
+ void incomingDataHandler1();
+ void handleResponseBytes(int newBytesRead);
+
+ // Writing/reading data over a (already set-up) connection:
+ int write(const char* data, unsigned count);
+ int read(u_int8_t* buffer, unsigned bufferSize);
+
+public:
+ u_int16_t desiredMaxIncomingPacketSize;
+ // If set to a value >0, then a "Blocksize:" header with this value (minus an allowance for
+ // IP, UDP, and RTP headers) will be sent with each "SETUP" request.
+
+protected:
+ int fVerbosityLevel;
+ unsigned fCSeq; // sequence number, used in consecutive requests
+ Authenticator fCurrentAuthenticator;
+ Boolean fAllowBasicAuthentication;
+ netAddressBits fServerAddress;
+
+private:
+ portNumBits fTunnelOverHTTPPortNum;
+ char* fUserAgentHeaderStr;
+ unsigned fUserAgentHeaderStrLen;
+ int fInputSocketNum, fOutputSocketNum;
+ char* fBaseURL;
+ unsigned char fTCPStreamIdCount; // used for (optional) RTP/TCP
+ char* fLastSessionId;
+ unsigned fSessionTimeoutParameter; // optionally set in response "Session:" headers
+ char* fResponseBuffer;
+ unsigned fResponseBytesAlreadySeen, fResponseBufferBytesLeft;
+ RequestQueue fRequestsAwaitingConnection, fRequestsAwaitingHTTPTunneling, fRequestsAwaitingResponse;
+
+ // Support for tunneling RTSP-over-HTTP:
+ char fSessionCookie[33];
+ unsigned fSessionCookieCounter;
+ Boolean fHTTPTunnelingConnectionIsPending;
+
+ // Optional support for TLS:
+ TLSState fTLS;
+ friend class TLSState;
+};
+
+
+#ifndef OMIT_REGISTER_HANDLING
+////////// HandlerServerForREGISTERCommand /////////
+
+// A simple server that creates a new "RTSPClient" object whenever a "REGISTER" request arrives (specifying the "rtsp://" URL
+// of a stream). The new "RTSPClient" object will be created with the specified URL, and passed to the provided handler function.
+
+typedef void onRTSPClientCreationFunc(RTSPClient* newRTSPClient, Boolean requestStreamingOverTCP);
+
+class HandlerServerForREGISTERCommand: public RTSPServer {
+public:
+ static HandlerServerForREGISTERCommand* createNew(UsageEnvironment& env, onRTSPClientCreationFunc* creationFunc,
+ Port ourPort = 0, UserAuthenticationDatabase* authDatabase = NULL,
+ int verbosityLevel = 0, char const* applicationName = NULL);
+ // If ourPort.num() == 0, we'll choose the port number ourself. (Use the following function to get it.)
+ portNumBits serverPortNum() const { return ntohs(fServerPort.num()); }
+
+protected:
+ HandlerServerForREGISTERCommand(UsageEnvironment& env, onRTSPClientCreationFunc* creationFunc, int ourSocket, Port ourPort,
+ UserAuthenticationDatabase* authDatabase, int verbosityLevel, char const* applicationName);
+ // called only by createNew();
+ virtual ~HandlerServerForREGISTERCommand();
+
+ virtual RTSPClient* createNewRTSPClient(char const* rtspURL, int verbosityLevel, char const* applicationName,
+ int socketNumToServer);
+ // This function - by default - creates a (base) "RTSPClient" object. If you want to create a subclass
+ // of "RTSPClient" instead, then subclass this class, and redefine this virtual function.
+
+protected: // redefined virtual functions
+ virtual char const* allowedCommandNames(); // "OPTIONS", "REGISTER", and (perhaps) "DEREGISTER" only
+ virtual Boolean weImplementREGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* proxyURLSuffix, char*& responseStr);
+ // redefined to return True (for cmd=="REGISTER")
+ virtual void implementCmd_REGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* url, char const* urlSuffix, int socketToRemoteServer,
+ Boolean deliverViaTCP, char const* proxyURLSuffix);
+
+private:
+ onRTSPClientCreationFunc* fCreationFunc;
+ int fVerbosityLevel;
+ char* fApplicationName;
+};
+#endif
+
+#endif
diff --git a/liveMedia/include/RTSPCommon.hh b/liveMedia/include/RTSPCommon.hh
new file mode 100644
index 0000000..3d38fa3
--- /dev/null
+++ b/liveMedia/include/RTSPCommon.hh
@@ -0,0 +1,65 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Common routines used by both RTSP clients and servers
+// C++ header
+
+#ifndef _RTSP_COMMON_HH
+#define _RTSP_COMMON_HH
+
+#ifndef _BOOLEAN_HH
+#include "Boolean.hh"
+#endif
+
+#ifndef _MEDIA_HH
+#include <Media.hh> // includes some definitions perhaps needed for Borland compilers?
+#endif
+
+#if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4)
+#define _strncasecmp _strnicmp
+#define snprintf _snprintf
+#else
+#define _strncasecmp strncasecmp
+#endif
+
+#define RTSP_PARAM_STRING_MAX 200
+
+Boolean parseRTSPRequestString(char const *reqStr, unsigned reqStrSize,
+ char *resultCmdName,
+ unsigned resultCmdNameMaxSize,
+ char* resultURLPreSuffix,
+ unsigned resultURLPreSuffixMaxSize,
+ char* resultURLSuffix,
+ unsigned resultURLSuffixMaxSize,
+ char* resultCSeq,
+ unsigned resultCSeqMaxSize,
+ char* resultSessionId,
+ unsigned resultSessionIdMaxSize,
+ unsigned& contentLength);
+
+Boolean parseRangeParam(char const* paramStr, double& rangeStart, double& rangeEnd, char*& absStartTime, char*& absEndTime, Boolean& startTimeIsNow);
+Boolean parseRangeHeader(char const* buf, double& rangeStart, double& rangeEnd, char*& absStartTime, char*& absEndTime, Boolean& startTimeIsNow);
+
+Boolean parseScaleHeader(char const* buf, float& scale);
+
+Boolean RTSPOptionIsSupported(char const* commandName, char const* optionsResponseString);
+ // Returns True iff the RTSP command "commandName" is mentioned as one of the commands supported in "optionsResponseString"
+ // (which should be the 'resultString' from a previous RTSP "OPTIONS" request).
+
+char const* dateHeader(); // A "Date:" header that can be used in a RTSP (or HTTP) response
+
+#endif
diff --git a/liveMedia/include/RTSPRegisterSender.hh b/liveMedia/include/RTSPRegisterSender.hh
new file mode 100644
index 0000000..6d68bd0
--- /dev/null
+++ b/liveMedia/include/RTSPRegisterSender.hh
@@ -0,0 +1,138 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Special objects which, when created, sends a custom RTSP "REGISTER" (or "DEREGISTER") command
+// to a specified client.
+// C++ header
+
+#ifndef _RTSP_REGISTER_SENDER_HH
+#define _RTSP_REGISTER_SENDER_HH
+
+#ifndef _RTSP_CLIENT_HH
+#include "RTSPClient.hh"
+#endif
+
+class RTSPRegisterOrDeregisterSender: public RTSPClient {
+public:
+ virtual ~RTSPRegisterOrDeregisterSender();
+protected: // we're a virtual base class
+ RTSPRegisterOrDeregisterSender(UsageEnvironment& env,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum,
+ Authenticator* authenticator,
+ int verbosityLevel, char const* applicationName);
+
+public: // Some compilers complain if this is "protected:"
+ // A subclass of "RTSPClient::RequestRecord", specific to our "REGISTER" and "DEREGISTER" commands:
+ class RequestRecord_REGISTER_or_DEREGISTER: public RTSPClient::RequestRecord {
+ public:
+ RequestRecord_REGISTER_or_DEREGISTER(unsigned cseq, char const* cmdName, RTSPClient::responseHandler* rtspResponseHandler, char const* rtspURLToRegisterOrDeregister, char const* proxyURLSuffix);
+ virtual ~RequestRecord_REGISTER_or_DEREGISTER();
+
+ char const* proxyURLSuffix() const { return fProxyURLSuffix; }
+
+ protected:
+ char* fRTSPURLToRegisterOrDeregister;
+ char* fProxyURLSuffix;
+ };
+
+protected:
+ portNumBits fRemoteClientPortNum;
+};
+
+//////////
+
+class RTSPRegisterSender: public RTSPRegisterOrDeregisterSender {
+public:
+ static RTSPRegisterSender*
+ createNew(UsageEnvironment& env,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToRegister,
+ RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator = NULL,
+ Boolean requestStreamingViaTCP = False, char const* proxyURLSuffix = NULL, Boolean reuseConnection = False,
+ int verbosityLevel = 0, char const* applicationName = NULL);
+
+ void grabConnection(int& sock, struct sockaddr_in& remoteAddress); // so that the socket doesn't get closed when we're deleted
+
+protected:
+ RTSPRegisterSender(UsageEnvironment& env,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToRegister,
+ RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator,
+ Boolean requestStreamingViaTCP, char const* proxyURLSuffix, Boolean reuseConnection,
+ int verbosityLevel, char const* applicationName);
+ // called only by "createNew()"
+ virtual ~RTSPRegisterSender();
+
+ // Redefined virtual functions:
+ virtual Boolean setRequestFields(RequestRecord* request,
+ char*& cmdURL, Boolean& cmdURLWasAllocated,
+ char const*& protocolStr,
+ char*& extraHeaders, Boolean& extraHeadersWereAllocated);
+
+public: // Some compilers complain if this is "protected:"
+ // A subclass of "RequestRecord_REGISTER_or_DEREGISTER", specific to our "REGISTER" command:
+ class RequestRecord_REGISTER: public RTSPRegisterOrDeregisterSender::RequestRecord_REGISTER_or_DEREGISTER {
+ public:
+ RequestRecord_REGISTER(unsigned cseq, RTSPClient::responseHandler* rtspResponseHandler, char const* rtspURLToRegister,
+ Boolean reuseConnection, Boolean requestStreamingViaTCP, char const* proxyURLSuffix);
+ virtual ~RequestRecord_REGISTER();
+
+ char const* rtspURLToRegister() const { return fRTSPURLToRegisterOrDeregister; }
+ Boolean reuseConnection() const { return fReuseConnection; }
+ Boolean requestStreamingViaTCP() const { return fRequestStreamingViaTCP; }
+
+ private:
+ Boolean fReuseConnection, fRequestStreamingViaTCP;
+ };
+};
+
+//////////
+
+class RTSPDeregisterSender: public RTSPRegisterOrDeregisterSender {
+public:
+ static RTSPDeregisterSender*
+ createNew(UsageEnvironment& env,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToDeregister,
+ RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator = NULL,
+ char const* proxyURLSuffix = NULL,
+ int verbosityLevel = 0, char const* applicationName = NULL);
+
+protected:
+ RTSPDeregisterSender(UsageEnvironment& env,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum, char const* rtspURLToDeregister,
+ RTSPClient::responseHandler* rtspResponseHandler, Authenticator* authenticator,
+ char const* proxyURLSuffix,
+ int verbosityLevel, char const* applicationName);
+ // called only by "createNew()"
+ virtual ~RTSPDeregisterSender();
+
+ // Redefined virtual functions:
+ virtual Boolean setRequestFields(RequestRecord* request,
+ char*& cmdURL, Boolean& cmdURLWasAllocated,
+ char const*& protocolStr,
+ char*& extraHeaders, Boolean& extraHeadersWereAllocated);
+
+public: // Some compilers complain if this is "protected:"
+ // A subclass of "RequestRecord_REGISTER_or_DEREGISTER", specific to our "DEREGISTER" command:
+ class RequestRecord_DEREGISTER: public RTSPRegisterOrDeregisterSender::RequestRecord_REGISTER_or_DEREGISTER {
+ public:
+ RequestRecord_DEREGISTER(unsigned cseq, RTSPClient::responseHandler* rtspResponseHandler, char const* rtspURLToDeregister, char const* proxyURLSuffix);
+ virtual ~RequestRecord_DEREGISTER();
+
+ char const* rtspURLToDeregister() const { return fRTSPURLToRegisterOrDeregister; }
+ };
+};
+
+#endif
diff --git a/liveMedia/include/RTSPServer.hh b/liveMedia/include/RTSPServer.hh
new file mode 100644
index 0000000..313aec0
--- /dev/null
+++ b/liveMedia/include/RTSPServer.hh
@@ -0,0 +1,353 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A RTSP server
+// C++ header
+
+#ifndef _RTSP_SERVER_HH
+#define _RTSP_SERVER_HH
+
+#ifndef _GENERIC_MEDIA_SERVER_HH
+#include "GenericMediaServer.hh"
+#endif
+#ifndef _DIGEST_AUTHENTICATION_HH
+#include "DigestAuthentication.hh"
+#endif
+
+class RTSPServer: public GenericMediaServer {
+public:
+ static RTSPServer* createNew(UsageEnvironment& env, Port ourPort = 554,
+ UserAuthenticationDatabase* authDatabase = NULL,
+ unsigned reclamationSeconds = 65);
+ // If ourPort.num() == 0, we'll choose the port number
+ // Note: The caller is responsible for reclaiming "authDatabase"
+ // If "reclamationSeconds" > 0, then the "RTSPClientSession" state for
+ // each client will get reclaimed (and the corresponding RTP stream(s)
+ // torn down) if no RTSP commands - or RTCP "RR" packets - from the
+ // client are received in at least "reclamationSeconds" seconds.
+
+ static Boolean lookupByName(UsageEnvironment& env, char const* name,
+ RTSPServer*& resultServer);
+
+ typedef void (responseHandlerForREGISTER)(RTSPServer* rtspServer, unsigned requestId, int resultCode, char* resultString);
+ unsigned registerStream(ServerMediaSession* serverMediaSession,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum,
+ responseHandlerForREGISTER* responseHandler,
+ char const* username = NULL, char const* password = NULL,
+ Boolean receiveOurStreamViaTCP = False,
+ char const* proxyURLSuffix = NULL);
+ // 'Register' the stream represented by "serverMediaSession" with the given remote client (specifed by name and port number).
+ // This is done using our custom "REGISTER" RTSP command.
+ // The function returns a unique number that can be used to identify the request; this number is also passed to "responseHandler".
+ // When a response is received from the remote client (or the "REGISTER" request fails), the specified response handler
+ // (if non-NULL) is called. (Note that the "resultString" passed to the handler was dynamically allocated,
+ // and should be delete[]d by the handler after use.)
+ // If "receiveOurStreamViaTCP" is True, then we're requesting that the remote client access our stream using RTP/RTCP-over-TCP.
+ // (Otherwise, the remote client may choose regular RTP/RTCP-over-UDP streaming.)
+ // "proxyURLSuffix" (optional) is used only when the remote client is also a proxy server.
+ // It tells the proxy server the suffix that it should use in its "rtsp://" URL (when front-end clients access the stream)
+
+ typedef void (responseHandlerForDEREGISTER)(RTSPServer* rtspServer, unsigned requestId, int resultCode, char* resultString);
+ unsigned deregisterStream(ServerMediaSession* serverMediaSession,
+ char const* remoteClientNameOrAddress, portNumBits remoteClientPortNum,
+ responseHandlerForDEREGISTER* responseHandler,
+ char const* username = NULL, char const* password = NULL,
+ char const* proxyURLSuffix = NULL);
+ // Used to turn off a previous "registerStream()" - using our custom "DEREGISTER" RTSP command.
+
+ char* rtspURL(ServerMediaSession const* serverMediaSession, int clientSocket = -1) const;
+ // returns a "rtsp://" URL that could be used to access the
+ // specified session (which must already have been added to
+ // us using "addServerMediaSession()".
+ // This string is dynamically allocated; caller should delete[]
+ // (If "clientSocket" is non-negative, then it is used (by calling "getsockname()") to determine
+ // the IP address to be used in the URL.)
+ char* rtspURLPrefix(int clientSocket = -1) const;
+ // like "rtspURL()", except that it returns just the common prefix used by
+ // each session's "rtsp://" URL.
+ // This string is dynamically allocated; caller should delete[]
+
+ UserAuthenticationDatabase* setAuthenticationDatabase(UserAuthenticationDatabase* newDB);
+ // Changes the server's authentication database to "newDB", returning a pointer to the old database (if there was one).
+ // "newDB" may be NULL (you can use this to disable authentication at runtime, if desired).
+
+ void disableStreamingRTPOverTCP() {
+ fAllowStreamingRTPOverTCP = False;
+ }
+
+ Boolean setUpTunnelingOverHTTP(Port httpPort);
+ // (Attempts to) enable RTSP-over-HTTP tunneling on the specified port.
+ // Returns True iff the specified port can be used in this way (i.e., it's not already being used for a separate HTTP server).
+ // Note: RTSP-over-HTTP tunneling is described in
+ // http://mirror.informatimago.com/next/developer.apple.com/quicktime/icefloe/dispatch028.html
+ // and http://images.apple.com/br/quicktime/pdf/QTSS_Modules.pdf
+ portNumBits httpServerPortNum() const; // in host byte order. (Returns 0 if not present.)
+
+protected:
+ RTSPServer(UsageEnvironment& env,
+ int ourSocket, Port ourPort,
+ UserAuthenticationDatabase* authDatabase,
+ unsigned reclamationSeconds);
+ // called only by createNew();
+ virtual ~RTSPServer();
+
+ virtual char const* allowedCommandNames(); // used to implement "RTSPClientConnection::handleCmd_OPTIONS()"
+ virtual Boolean weImplementREGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* proxyURLSuffix, char*& responseStr);
+ // used to implement "RTSPClientConnection::handleCmd_REGISTER()"
+ // Note: "responseStr" is dynamically allocated (or NULL), and should be delete[]d after the call
+ virtual void implementCmd_REGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* url, char const* urlSuffix, int socketToRemoteServer,
+ Boolean deliverViaTCP, char const* proxyURLSuffix);
+ // used to implement "RTSPClientConnection::handleCmd_REGISTER()"
+
+ virtual UserAuthenticationDatabase* getAuthenticationDatabaseForCommand(char const* cmdName);
+ virtual Boolean specialClientAccessCheck(int clientSocket, struct sockaddr_in& clientAddr,
+ char const* urlSuffix);
+ // a hook that allows subclassed servers to do server-specific access checking
+ // on each client (e.g., based on client IP address), without using digest authentication.
+ virtual Boolean specialClientUserAccessCheck(int clientSocket, struct sockaddr_in& clientAddr,
+ char const* urlSuffix, char const *username);
+ // another hook that allows subclassed servers to do server-specific access checking
+ // - this time after normal digest authentication has already taken place (and would otherwise allow access).
+ // (This test can only be used to further restrict access, not to grant additional access.)
+
+private: // redefined virtual functions
+ virtual Boolean isRTSPServer() const;
+
+public: // should be protected, but some old compilers complain otherwise
+ // The state of a TCP connection used by a RTSP client:
+ class RTSPClientSession; // forward
+ class RTSPClientConnection: public GenericMediaServer::ClientConnection {
+ public:
+ // A data structure that's used to implement the "REGISTER" command:
+ class ParamsForREGISTER {
+ public:
+ ParamsForREGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ RTSPClientConnection* ourConnection, char const* url, char const* urlSuffix,
+ Boolean reuseConnection, Boolean deliverViaTCP, char const* proxyURLSuffix);
+ virtual ~ParamsForREGISTER();
+ private:
+ friend class RTSPClientConnection;
+ char const* fCmd;
+ RTSPClientConnection* fOurConnection;
+ char* fURL;
+ char* fURLSuffix;
+ Boolean fReuseConnection, fDeliverViaTCP;
+ char* fProxyURLSuffix;
+ };
+ protected: // redefined virtual functions:
+ virtual void handleRequestBytes(int newBytesRead);
+
+ protected:
+ RTSPClientConnection(RTSPServer& ourServer, int clientSocket, struct sockaddr_in clientAddr);
+ virtual ~RTSPClientConnection();
+
+ friend class RTSPServer;
+ friend class RTSPClientSession;
+
+ // Make the handler functions for each command virtual, to allow subclasses to reimplement them, if necessary:
+ virtual void handleCmd_OPTIONS();
+ // You probably won't need to subclass/reimplement this function; reimplement "RTSPServer::allowedCommandNames()" instead.
+ virtual void handleCmd_GET_PARAMETER(char const* fullRequestStr); // when operating on the entire server
+ virtual void handleCmd_SET_PARAMETER(char const* fullRequestStr); // when operating on the entire server
+ virtual void handleCmd_DESCRIBE(char const* urlPreSuffix, char const* urlSuffix, char const* fullRequestStr);
+ virtual void handleCmd_REGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* url, char const* urlSuffix, char const* fullRequestStr,
+ Boolean reuseConnection, Boolean deliverViaTCP, char const* proxyURLSuffix);
+ // You probably won't need to subclass/reimplement this function;
+ // reimplement "RTSPServer::weImplementREGISTER()" and "RTSPServer::implementCmd_REGISTER()" instead.
+ virtual void handleCmd_bad();
+ virtual void handleCmd_notSupported();
+ virtual void handleCmd_notFound();
+ virtual void handleCmd_sessionNotFound();
+ virtual void handleCmd_unsupportedTransport();
+ // Support for optional RTSP-over-HTTP tunneling:
+ virtual Boolean parseHTTPRequestString(char* resultCmdName, unsigned resultCmdNameMaxSize,
+ char* urlSuffix, unsigned urlSuffixMaxSize,
+ char* sessionCookie, unsigned sessionCookieMaxSize,
+ char* acceptStr, unsigned acceptStrMaxSize);
+ virtual void handleHTTPCmd_notSupported();
+ virtual void handleHTTPCmd_notFound();
+ virtual void handleHTTPCmd_OPTIONS();
+ virtual void handleHTTPCmd_TunnelingGET(char const* sessionCookie);
+ virtual Boolean handleHTTPCmd_TunnelingPOST(char const* sessionCookie, unsigned char const* extraData, unsigned extraDataSize);
+ virtual void handleHTTPCmd_StreamingGET(char const* urlSuffix, char const* fullRequestStr);
+ protected:
+ void resetRequestBuffer();
+ void closeSocketsRTSP();
+ static void handleAlternativeRequestByte(void*, u_int8_t requestByte);
+ void handleAlternativeRequestByte1(u_int8_t requestByte);
+ Boolean authenticationOK(char const* cmdName, char const* urlSuffix, char const* fullRequestStr);
+ void changeClientInputSocket(int newSocketNum, unsigned char const* extraData, unsigned extraDataSize);
+ // used to implement RTSP-over-HTTP tunneling
+ static void continueHandlingREGISTER(ParamsForREGISTER* params);
+ virtual void continueHandlingREGISTER1(ParamsForREGISTER* params);
+
+ // Shortcuts for setting up a RTSP response (prior to sending it):
+ void setRTSPResponse(char const* responseStr);
+ void setRTSPResponse(char const* responseStr, u_int32_t sessionId);
+ void setRTSPResponse(char const* responseStr, char const* contentStr);
+ void setRTSPResponse(char const* responseStr, u_int32_t sessionId, char const* contentStr);
+
+ RTSPServer& fOurRTSPServer; // same as ::fOurServer
+ int& fClientInputSocket; // aliased to ::fOurSocket
+ int fClientOutputSocket;
+ Boolean fIsActive;
+ unsigned char* fLastCRLF;
+ unsigned fRecursionCount;
+ char const* fCurrentCSeq;
+ Authenticator fCurrentAuthenticator; // used if access control is needed
+ char* fOurSessionCookie; // used for optional RTSP-over-HTTP tunneling
+ unsigned fBase64RemainderCount; // used for optional RTSP-over-HTTP tunneling (possible values: 0,1,2,3)
+ };
+
+ // The state of an individual client session (using one or more sequential TCP connections) handled by a RTSP server:
+ class RTSPClientSession: public GenericMediaServer::ClientSession {
+ protected:
+ RTSPClientSession(RTSPServer& ourServer, u_int32_t sessionId);
+ virtual ~RTSPClientSession();
+
+ friend class RTSPServer;
+ friend class RTSPClientConnection;
+ // Make the handler functions for each command virtual, to allow subclasses to redefine them:
+ virtual void handleCmd_SETUP(RTSPClientConnection* ourClientConnection,
+ char const* urlPreSuffix, char const* urlSuffix, char const* fullRequestStr);
+ virtual void handleCmd_withinSession(RTSPClientConnection* ourClientConnection,
+ char const* cmdName,
+ char const* urlPreSuffix, char const* urlSuffix,
+ char const* fullRequestStr);
+ virtual void handleCmd_TEARDOWN(RTSPClientConnection* ourClientConnection,
+ ServerMediaSubsession* subsession);
+ virtual void handleCmd_PLAY(RTSPClientConnection* ourClientConnection,
+ ServerMediaSubsession* subsession, char const* fullRequestStr);
+ virtual void handleCmd_PAUSE(RTSPClientConnection* ourClientConnection,
+ ServerMediaSubsession* subsession);
+ virtual void handleCmd_GET_PARAMETER(RTSPClientConnection* ourClientConnection,
+ ServerMediaSubsession* subsession, char const* fullRequestStr);
+ virtual void handleCmd_SET_PARAMETER(RTSPClientConnection* ourClientConnection,
+ ServerMediaSubsession* subsession, char const* fullRequestStr);
+ protected:
+ void deleteStreamByTrack(unsigned trackNum);
+ void reclaimStreamStates();
+ Boolean isMulticast() const { return fIsMulticast; }
+
+ // Shortcuts for setting up a RTSP response (prior to sending it):
+ void setRTSPResponse(RTSPClientConnection* ourClientConnection, char const* responseStr) { ourClientConnection->setRTSPResponse(responseStr); }
+ void setRTSPResponse(RTSPClientConnection* ourClientConnection, char const* responseStr, u_int32_t sessionId) { ourClientConnection->setRTSPResponse(responseStr, sessionId); }
+ void setRTSPResponse(RTSPClientConnection* ourClientConnection, char const* responseStr, char const* contentStr) { ourClientConnection->setRTSPResponse(responseStr, contentStr); }
+ void setRTSPResponse(RTSPClientConnection* ourClientConnection, char const* responseStr, u_int32_t sessionId, char const* contentStr) { ourClientConnection->setRTSPResponse(responseStr, sessionId, contentStr); }
+
+ protected:
+ RTSPServer& fOurRTSPServer; // same as ::fOurServer
+ Boolean fIsMulticast, fStreamAfterSETUP;
+ unsigned char fTCPStreamIdCount; // used for (optional) RTP/TCP
+ Boolean usesTCPTransport() const { return fTCPStreamIdCount > 0; }
+ unsigned fNumStreamStates;
+ struct streamState {
+ ServerMediaSubsession* subsession;
+ int tcpSocketNum;
+ void* streamToken;
+ } * fStreamStates;
+ };
+
+protected: // redefined virtual functions
+ // If you subclass "RTSPClientConnection", then you must also redefine this virtual function in order
+ // to create new objects of your subclass:
+ virtual ClientConnection* createNewClientConnection(int clientSocket, struct sockaddr_in clientAddr);
+
+protected:
+ // If you subclass "RTSPClientSession", then you must also redefine this virtual function in order
+ // to create new objects of your subclass:
+ virtual ClientSession* createNewClientSession(u_int32_t sessionId);
+
+private:
+ static void incomingConnectionHandlerHTTP(void*, int /*mask*/);
+ void incomingConnectionHandlerHTTP();
+
+ void noteTCPStreamingOnSocket(int socketNum, RTSPClientSession* clientSession, unsigned trackNum);
+ void unnoteTCPStreamingOnSocket(int socketNum, RTSPClientSession* clientSession, unsigned trackNum);
+ void stopTCPStreamingOnSocket(int socketNum);
+
+private:
+ friend class RTSPClientConnection;
+ friend class RTSPClientSession;
+ friend class RegisterRequestRecord;
+ friend class DeregisterRequestRecord;
+ int fHTTPServerSocket; // for optional RTSP-over-HTTP tunneling
+ Port fHTTPServerPort; // ditto
+ HashTable* fClientConnectionsForHTTPTunneling; // maps client-supplied 'session cookie' strings to "RTSPClientConnection"s
+ // (used only for optional RTSP-over-HTTP tunneling)
+ HashTable* fTCPStreamingDatabase;
+ // maps TCP socket numbers to ids of sessions that are streaming over it (RTP/RTCP-over-TCP)
+ HashTable* fPendingRegisterOrDeregisterRequests;
+ unsigned fRegisterOrDeregisterRequestCounter;
+ UserAuthenticationDatabase* fAuthDB;
+ Boolean fAllowStreamingRTPOverTCP; // by default, True
+};
+
+
+////////// A subclass of "RTSPServer" that implements the "REGISTER" command to set up proxying on the specified URL //////////
+
+class RTSPServerWithREGISTERProxying: public RTSPServer {
+public:
+ static RTSPServerWithREGISTERProxying* createNew(UsageEnvironment& env, Port ourPort = 554,
+ UserAuthenticationDatabase* authDatabase = NULL,
+ UserAuthenticationDatabase* authDatabaseForREGISTER = NULL,
+ unsigned reclamationSeconds = 65,
+ Boolean streamRTPOverTCP = False,
+ int verbosityLevelForProxying = 0,
+ char const* backEndUsername = NULL,
+ char const* backEndPassword = NULL);
+
+protected:
+ RTSPServerWithREGISTERProxying(UsageEnvironment& env, int ourSocket, Port ourPort,
+ UserAuthenticationDatabase* authDatabase, UserAuthenticationDatabase* authDatabaseForREGISTER,
+ unsigned reclamationSeconds,
+ Boolean streamRTPOverTCP, int verbosityLevelForProxying,
+ char const* backEndUsername, char const* backEndPassword);
+ // called only by createNew();
+ virtual ~RTSPServerWithREGISTERProxying();
+
+protected: // redefined virtual functions
+ virtual char const* allowedCommandNames();
+ virtual Boolean weImplementREGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* proxyURLSuffix, char*& responseStr);
+ virtual void implementCmd_REGISTER(char const* cmd/*"REGISTER" or "DEREGISTER"*/,
+ char const* url, char const* urlSuffix, int socketToRemoteServer,
+ Boolean deliverViaTCP, char const* proxyURLSuffix);
+ virtual UserAuthenticationDatabase* getAuthenticationDatabaseForCommand(char const* cmdName);
+
+private:
+ Boolean fStreamRTPOverTCP;
+ int fVerbosityLevelForProxying;
+ unsigned fRegisteredProxyCounter;
+ char* fAllowedCommandNames;
+ UserAuthenticationDatabase* fAuthDBForREGISTER;
+ char* fBackEndUsername;
+ char* fBackEndPassword;
+};
+
+
+// A special version of "parseTransportHeader()", used just for parsing the "Transport:" header
+// in an incoming "REGISTER" command:
+void parseTransportHeaderForREGISTER(char const* buf, // in
+ Boolean &reuseConnection, // out
+ Boolean& deliverViaTCP, // out
+ char*& proxyURLSuffix); // out
+
+#endif
diff --git a/liveMedia/include/RTSPServerSupportingHTTPStreaming.hh b/liveMedia/include/RTSPServerSupportingHTTPStreaming.hh
new file mode 100644
index 0000000..0e08d2d
--- /dev/null
+++ b/liveMedia/include/RTSPServerSupportingHTTPStreaming.hh
@@ -0,0 +1,73 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A server that supports both RTSP, and HTTP streaming (using Apple's "HTTP Live Streaming" protocol)
+// C++ header
+
+#ifndef _RTSP_SERVER_SUPPORTING_HTTP_STREAMING_HH
+#define _RTSP_SERVER_SUPPORTING_HTTP_STREAMING_HH
+
+#ifndef _RTSP_SERVER_HH
+#include "RTSPServer.hh"
+#endif
+#ifndef _BYTE_STREAM_MEMORY_BUFFER_SOURCE_HH
+#include "ByteStreamMemoryBufferSource.hh"
+#endif
+#ifndef _TCP_STREAM_SINK_HH
+#include "TCPStreamSink.hh"
+#endif
+
+class RTSPServerSupportingHTTPStreaming: public RTSPServer {
+public:
+ static RTSPServerSupportingHTTPStreaming* createNew(UsageEnvironment& env, Port rtspPort = 554,
+ UserAuthenticationDatabase* authDatabase = NULL,
+ unsigned reclamationTestSeconds = 65);
+
+ Boolean setHTTPPort(Port httpPort) { return setUpTunnelingOverHTTP(httpPort); }
+
+protected:
+ RTSPServerSupportingHTTPStreaming(UsageEnvironment& env,
+ int ourSocket, Port ourPort,
+ UserAuthenticationDatabase* authDatabase,
+ unsigned reclamationTestSeconds);
+ // called only by createNew();
+ virtual ~RTSPServerSupportingHTTPStreaming();
+
+protected: // redefined virtual functions
+ virtual ClientConnection* createNewClientConnection(int clientSocket, struct sockaddr_in clientAddr);
+
+public: // should be protected, but some old compilers complain otherwise
+ class RTSPClientConnectionSupportingHTTPStreaming: public RTSPServer::RTSPClientConnection {
+ public:
+ RTSPClientConnectionSupportingHTTPStreaming(RTSPServer& ourServer, int clientSocket, struct sockaddr_in clientAddr);
+ virtual ~RTSPClientConnectionSupportingHTTPStreaming();
+
+ protected: // redefined virtual functions
+ virtual void handleHTTPCmd_StreamingGET(char const* urlSuffix, char const* fullRequestStr);
+
+ protected:
+ static void afterStreaming(void* clientData);
+
+ private:
+ u_int32_t fClientSessionId;
+ FramedSource* fStreamSource;
+ ByteStreamMemoryBufferSource* fPlaylistSource;
+ TCPStreamSink* fTCPSink;
+ };
+};
+
+#endif
diff --git a/liveMedia/include/RawVideoRTPSink.hh b/liveMedia/include/RawVideoRTPSink.hh
new file mode 100644
index 0000000..455c6c7
--- /dev/null
+++ b/liveMedia/include/RawVideoRTPSink.hh
@@ -0,0 +1,85 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for Raw video
+// C++ header
+
+#ifndef _RAW_VIDEO_RTP_SINK_HH
+#define _RAW_VIDEO_RTP_SINK_HH
+
+#ifndef _VIDEO_RTP_SINK_HH
+#include "VideoRTPSink.hh"
+#endif
+
+////////// FrameParameters //////////
+
+struct FrameParameters {
+ u_int16_t pGroupSize;
+ u_int16_t nbOfPixelInPGroup;
+ u_int32_t scanLineSize ;
+ u_int32_t frameSize;
+ u_int16_t scanLineIterationStep;
+};
+
+
+class RawVideoRTPSink: public VideoRTPSink {
+public:
+ static RawVideoRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat,
+ // The following headers provide the 'configuration' information, for the SDP description:
+ unsigned height, unsigned width, unsigned depth,
+ char const* sampling, char const* colorimetry = "BT709-2");
+
+protected:
+ RawVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat,
+ unsigned height, unsigned width, unsigned depth,
+ char const* sampling, char const* colorimetry = "BT709-2");
+ // called only by createNew()
+
+ virtual ~RawVideoRTPSink();
+
+private: // redefined virtual functions:
+ virtual char const* auxSDPLine(); // for the "a=fmtp:" SDP line
+
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ virtual unsigned specialHeaderSize() const;
+ virtual unsigned computeOverflowForNewFrame(unsigned newFrameSize) const;
+
+private:
+ char* fFmtpSDPLine;
+ char* fSampling;
+ unsigned fWidth;
+ unsigned fHeight;
+ unsigned fDepth;
+ char* fColorimetry;
+ unsigned fLineindex;
+ FrameParameters fFrameParameters;
+
+ unsigned getNbLineInPacket(unsigned fragOffset, unsigned*& lengths, unsigned*& offsets) const;
+ // return the number of lines, their lengths and offsets from the fragmentation offset of the whole frame.
+ // call delete[] on lengths and offsets after use of the function
+ void setFrameParameters();
+};
+
+#endif
diff --git a/liveMedia/include/RawVideoRTPSource.hh b/liveMedia/include/RawVideoRTPSource.hh
new file mode 100644
index 0000000..3ee5966
--- /dev/null
+++ b/liveMedia/include/RawVideoRTPSource.hh
@@ -0,0 +1,60 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Raw Video RTP Sources (RFC 4175)
+// C++ header
+
+#ifndef _RAW_VIDEO_RTP_SOURCE_HH
+#define _RAW_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class RawVideoRTPSource: public MultiFramedRTPSource {
+public:
+ static RawVideoRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+
+ u_int16_t currentLineNumber() const; // of the most recently-read/processed scan line
+ u_int8_t currentLineFieldId() const; // of the most recently-read/processed scan line (0 or 1)
+ u_int16_t currentOffsetWithinLine() const; // of the most recently-read/processed scan line
+
+protected:
+ RawVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency = 90000);
+ // called only by createNew()
+
+ virtual ~RawVideoRTPSource();
+
+protected:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ unsigned fNumLines; // in the most recently read packet
+ unsigned fNextLine; // index of the next AU Header to read
+ struct LineHeader* fLineHeaders;
+
+ friend class RawVideoBufferedPacket;
+};
+
+#endif
diff --git a/liveMedia/include/SIPClient.hh b/liveMedia/include/SIPClient.hh
new file mode 100644
index 0000000..e220f2c
--- /dev/null
+++ b/liveMedia/include/SIPClient.hh
@@ -0,0 +1,149 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic SIP client
+// C++ header
+
+#ifndef _SIP_CLIENT_HH
+#define _SIP_CLIENT_HH
+
+#ifndef _MEDIA_SESSION_HH
+#include "MediaSession.hh"
+#endif
+#ifndef _NET_ADDRESS_HH
+#include "NetAddress.hh"
+#endif
+#ifndef _DIGEST_AUTHENTICATION_HH
+#include "DigestAuthentication.hh"
+#endif
+
+// Possible states in the "INVITE" transition diagram (RFC 3261, Figure 5)
+enum inviteClientState { Calling, Proceeding, Completed, Terminated };
+
+class SIPClient: public Medium {
+public:
+ static SIPClient* createNew(UsageEnvironment& env,
+ unsigned char desiredAudioRTPPayloadFormat,
+ char const* mimeSubtype = NULL,
+ int verbosityLevel = 0,
+ char const* applicationName = NULL);
+
+ void setProxyServer(unsigned proxyServerAddress,
+ portNumBits proxyServerPortNum);
+
+ void setClientStartPortNum(portNumBits clientStartPortNum) {
+ fClientStartPortNum = clientStartPortNum;
+ }
+
+ char* invite(char const* url, Authenticator* authenticator = NULL);
+ // Issues a SIP "INVITE" command
+ // Returns the session SDP description if this command succeeds
+ char* inviteWithPassword(char const* url,
+ char const* username, char const* password);
+ // Uses "invite()" to do an "INVITE" - first
+ // without using "password", then (if we get an Unauthorized
+ // response) with an authentication response computed from "password"
+
+ Boolean sendACK(); // on current call
+ Boolean sendBYE(); // on current call
+
+ static Boolean parseSIPURL(UsageEnvironment& env, char const* url,
+ NetAddress& address, portNumBits& portNum);
+ // (ignores any "<username>[:<password>]@" in "url")
+ static Boolean parseSIPURLUsernamePassword(char const* url,
+ char*& username,
+ char*& password);
+ char const* getInviteSdpReply() const { return fInviteSDPDescriptionReturned; }
+
+ void setUserAgentString(char const* userAgentName);
+ // sets an alternative string to be used in SIP "User-Agent:" headers
+
+protected:
+ virtual ~SIPClient();
+
+private:
+ SIPClient(UsageEnvironment& env,
+ unsigned char desiredAudioRTPPayloadFormat,
+ char const* mimeSubtype,
+ int verbosityLevel,
+ char const* applicationName);
+ // called only by createNew();
+
+ void reset();
+
+ // Routines used to implement invite*():
+ char* invite1(Authenticator* authenticator);
+ Boolean processURL(char const* url);
+ Boolean sendINVITE();
+ static void inviteResponseHandler(void* clientData, int mask);
+ void doInviteStateMachine(unsigned responseCode);
+ void doInviteStateTerminated(unsigned responseCode);
+ TaskToken fTimerA, fTimerB, fTimerD;
+ static void timerAHandler(void* clientData);
+ static void timerBHandler(void* clientData);
+ static void timerDHandler(void* clientData);
+ unsigned const fT1; // in microseconds
+ unsigned fTimerALen; // in microseconds; initially fT1, then doubles
+ unsigned fTimerACount;
+
+ // Routines used to implement all commands:
+ char* createAuthenticatorString(Authenticator const* authenticator,
+ char const* cmd, char const* url);
+ Boolean sendRequest(char const* requestString, unsigned requestLength);
+ unsigned getResponseCode();
+ unsigned getResponse(char*& responseBuffer, unsigned responseBufferSize);
+ Boolean parseResponseCode(char const* line, unsigned& responseCode);
+
+private:
+ // Set for all calls:
+ unsigned char fDesiredAudioRTPPayloadFormat;
+ char* fMIMESubtype;
+ unsigned fMIMESubtypeSize;
+ int fVerbosityLevel;
+ unsigned fCSeq; // sequence number, used in consecutive requests
+ char const* fApplicationName;
+ unsigned fApplicationNameSize;
+ char const* fOurAddressStr;
+ unsigned fOurAddressStrSize;
+ portNumBits fOurPortNum;
+ Groupsock* fOurSocket;
+ char* fUserAgentHeaderStr;
+ unsigned fUserAgentHeaderStrLen;
+
+ // Set for each call:
+ char const* fURL;
+ unsigned fURLSize;
+ struct in_addr fServerAddress;
+ portNumBits fServerPortNum; // in host order
+ portNumBits fClientStartPortNum; // in host order
+ unsigned fCallId, fFromTag; // set by us
+ char const* fToTagStr; // set by the responder
+ unsigned fToTagStrSize;
+ Authenticator fValidAuthenticator;
+ char const* fUserName; // 'user' name used in "From:" & "Contact:" lines
+ unsigned fUserNameSize;
+
+ char* fInviteSDPDescription;
+ char* fInviteSDPDescriptionReturned;
+ char* fInviteCmd;
+ unsigned fInviteCmdSize;
+ Authenticator* fWorkingAuthenticator;
+ inviteClientState fInviteClientState;
+ char fEventLoopStopFlag;
+};
+
+#endif
diff --git a/liveMedia/include/SRTPCryptographicContext.hh b/liveMedia/include/SRTPCryptographicContext.hh
new file mode 100644
index 0000000..0236c8d
--- /dev/null
+++ b/liveMedia/include/SRTPCryptographicContext.hh
@@ -0,0 +1,142 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// The SRTP 'Cryptographic Context', used in all of our uses of SRTP.
+// Definition
+
+#ifndef _SRTP_CRYPTOGRAPHIC_CONTEXT_HH
+#define _SRTP_CRYPTOGRAPHIC_CONTEXT_HH
+
+#ifndef _MIKEY_HH
+#include "MIKEY.hh"
+#endif
+
+class SRTPCryptographicContext {
+public:
+ SRTPCryptographicContext(MIKEYState const& mikeyState);
+ virtual ~SRTPCryptographicContext();
+
+ // Authenticate (if necessary) and decrypt (if necessary) incoming SRTP and SRTCP packets.
+ // Returns True iff the packet is well-formed and authenticates OK.
+ // ("outPacketSize" will be <= "inPacketSize".)
+ Boolean processIncomingSRTPPacket(u_int8_t* buffer, unsigned inPacketSize,
+ unsigned& outPacketSize);
+ Boolean processIncomingSRTCPPacket(u_int8_t* buffer, unsigned inPacketSize,
+ unsigned& outPacketSize);
+
+ // Encrypt (if necessary) and add an authentication tag (if necessary) to an outgoing
+ // RTCP packet.
+ // Returns True iff the packet is well-formed.
+ // ("outPacketSize" will be >= "inPacketSize"; there must be enough space at the end of
+ // "buffer" for the extra SRTCP tags (4+4+10 bytes).)
+ Boolean processOutgoingSRTCPPacket(u_int8_t* buffer, unsigned inPacketSize,
+ unsigned& outPacketSize);
+
+#ifndef NO_OPENSSL
+private:
+ // Definitions specific to the "SRTP_AES128_CM_HMAC_SHA1_80" ciphersuite.
+ // Later generalize to support more SRTP ciphersuites #####
+#define SRTP_CIPHER_KEY_LENGTH (128/8) // in bytes
+#define SRTP_CIPHER_SALT_LENGTH (112/8) // in bytes
+#define SRTP_MKI_LENGTH 4 // in bytes
+#define SRTP_AUTH_KEY_LENGTH (160/8) // in bytes
+#define SRTP_AUTH_TAG_LENGTH (80/8) // in bytes
+
+ struct derivedKeys {
+ u_int8_t cipherKey[SRTP_CIPHER_KEY_LENGTH];
+ u_int8_t salt[SRTP_CIPHER_SALT_LENGTH];
+ u_int8_t authKey[SRTP_AUTH_KEY_LENGTH];
+ };
+
+ struct allDerivedKeys {
+ derivedKeys srtp;
+ derivedKeys srtcp;
+ };
+
+ typedef enum {
+ label_srtp_encryption = 0x00,
+ label_srtp_msg_auth = 0x01,
+ label_srtp_salt = 0x02,
+ label_srtcp_encryption = 0x03,
+ label_srtcp_msg_auth = 0x04,
+ label_srtcp_salt = 0x05
+ } SRTPKeyDerivationLabel;
+
+ unsigned generateSRTCPAuthenticationTag(u_int8_t const* dataToAuthenticate, unsigned numBytesToAuthenticate,
+ u_int8_t* resultAuthenticationTag);
+ // returns the size of the resulting authentication tag
+
+ Boolean verifySRTPAuthenticationTag(u_int8_t* dataToAuthenticate, unsigned numBytesToAuthenticate,
+ u_int32_t roc, u_int8_t const* authenticationTag);
+ Boolean verifySRTCPAuthenticationTag(u_int8_t const* dataToAuthenticate, unsigned numBytesToAuthenticate,
+ u_int8_t const* authenticationTag);
+
+ void decryptSRTPPacket(u_int64_t index, u_int32_t ssrc, u_int8_t* data, unsigned numDataBytes);
+ void decryptSRTCPPacket(u_int32_t index, u_int32_t ssrc, u_int8_t* data, unsigned numDataBytes);
+
+ void encryptSRTCPPacket(u_int32_t index, u_int32_t ssrc, u_int8_t* data, unsigned numDataBytes);
+
+ unsigned generateAuthenticationTag(derivedKeys& keysToUse,
+ u_int8_t const* dataToAuthenticate, unsigned numBytesToAuthenticate,
+ u_int8_t* resultAuthenticationTag);
+ // returns the size of the resulting authentication tag
+ // "resultAuthenticationTag" must point to an array of at least SRTP_AUTH_TAG_LENGTH
+ Boolean verifyAuthenticationTag(derivedKeys& keysToUse,
+ u_int8_t const* dataToAuthenticate, unsigned numBytesToAuthenticate,
+ u_int8_t const* authenticationTag);
+
+ void cryptData(derivedKeys& keys, u_int64_t index, u_int32_t ssrc,
+ u_int8_t* data, unsigned numDataBytes);
+
+ void performKeyDerivation();
+
+ void deriveKeysFromMaster(u_int8_t const* masterKey, u_int8_t const* salt,
+ allDerivedKeys& allKeysResult);
+ // used to implement "performKeyDerivation()"
+ void deriveSingleKey(u_int8_t const* masterKey, u_int8_t const* salt,
+ SRTPKeyDerivationLabel label,
+ unsigned resultKeyLength, u_int8_t* resultKey);
+ // used to implement "deriveKeysFromMaster()".
+ // ("resultKey" must be an existing buffer, of size >= "resultKeyLength")
+
+private:
+ MIKEYState const& fMIKEYState;
+
+ // Master key + salt:
+ u_int8_t const* masterKeyPlusSalt() const { return fMIKEYState.keyData(); }
+
+ u_int8_t const* masterKey() const { return &masterKeyPlusSalt()[0]; }
+ u_int8_t const* masterSalt() const { return &masterKeyPlusSalt()[SRTP_CIPHER_KEY_LENGTH]; }
+
+ Boolean weEncryptSRTP() const { return fMIKEYState.encryptSRTP(); }
+ Boolean weEncryptSRTCP() const { return fMIKEYState.encryptSRTCP(); }
+ Boolean weAuthenticate() const { return fMIKEYState.useAuthentication(); }
+ u_int32_t MKI() const { return fMIKEYState.MKI(); }
+
+ // Derived (i.e., session) keys:
+ allDerivedKeys fDerivedKeys;
+
+ // State used for handling the reception of SRTP packets:
+ Boolean fHaveReceivedSRTPPackets;
+ u_int16_t fPreviousHighRTPSeqNum;
+ u_int32_t fROC; // rollover counter
+
+ // State used for handling the sending of SRTCP packets:
+ u_int32_t fSRTCPIndex;
+#endif
+};
+
+#endif
diff --git a/liveMedia/include/ServerMediaSession.hh b/liveMedia/include/ServerMediaSession.hh
new file mode 100644
index 0000000..51b91ca
--- /dev/null
+++ b/liveMedia/include/ServerMediaSession.hh
@@ -0,0 +1,203 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A data structure that represents a session that consists of
+// potentially multiple (audio and/or video) sub-sessions
+// (This data structure is used for media *streamers* - i.e., servers.
+// For media receivers, use "MediaSession" instead.)
+// C++ header
+
+#ifndef _SERVER_MEDIA_SESSION_HH
+#define _SERVER_MEDIA_SESSION_HH
+
+#ifndef _RTCP_HH
+#include "RTCP.hh"
+#endif
+
+class ServerMediaSubsession; // forward
+
+class ServerMediaSession: public Medium {
+public:
+ static ServerMediaSession* createNew(UsageEnvironment& env,
+ char const* streamName = NULL,
+ char const* info = NULL,
+ char const* description = NULL,
+ Boolean isSSM = False,
+ char const* miscSDPLines = NULL);
+
+ static Boolean lookupByName(UsageEnvironment& env,
+ char const* mediumName,
+ ServerMediaSession*& resultSession);
+
+ char* generateSDPDescription(); // based on the entire session
+ // Note: The caller is responsible for freeing the returned string
+
+ char const* streamName() const { return fStreamName; }
+
+ Boolean addSubsession(ServerMediaSubsession* subsession);
+ unsigned numSubsessions() const { return fSubsessionCounter; }
+
+ void testScaleFactor(float& scale); // sets "scale" to the actual supported scale
+ float duration() const;
+ // a result == 0 means an unbounded session (the default)
+ // a result < 0 means: subsession durations differ; the result is -(the largest).
+ // a result > 0 means: this is the duration of a bounded session
+
+ virtual void noteLiveness();
+ // called whenever a client - accessing this media - notes liveness.
+ // The default implementation does nothing, but subclasses can redefine this - e.g., if you
+ // want to remove long-unused "ServerMediaSession"s from the server.
+
+ unsigned referenceCount() const { return fReferenceCount; }
+ void incrementReferenceCount() { ++fReferenceCount; }
+ void decrementReferenceCount() { if (fReferenceCount > 0) --fReferenceCount; }
+ Boolean& deleteWhenUnreferenced() { return fDeleteWhenUnreferenced; }
+
+ void deleteAllSubsessions();
+ // Removes and deletes all subsessions added by "addSubsession()", returning us to an 'empty' state
+ // Note: If you have already added this "ServerMediaSession" to a "RTSPServer" then, before calling this function,
+ // you must first close any client connections that use it,
+ // by calling "RTSPServer::closeAllClientSessionsForServerMediaSession()".
+
+protected:
+ ServerMediaSession(UsageEnvironment& env, char const* streamName,
+ char const* info, char const* description,
+ Boolean isSSM, char const* miscSDPLines);
+ // called only by "createNew()"
+
+ virtual ~ServerMediaSession();
+
+private: // redefined virtual functions
+ virtual Boolean isServerMediaSession() const;
+
+private:
+ Boolean fIsSSM;
+
+ // Linkage fields:
+ friend class ServerMediaSubsessionIterator;
+ ServerMediaSubsession* fSubsessionsHead;
+ ServerMediaSubsession* fSubsessionsTail;
+ unsigned fSubsessionCounter;
+
+ char* fStreamName;
+ char* fInfoSDPString;
+ char* fDescriptionSDPString;
+ char* fMiscSDPLines;
+ struct timeval fCreationTime;
+ unsigned fReferenceCount;
+ Boolean fDeleteWhenUnreferenced;
+};
+
+
+class ServerMediaSubsessionIterator {
+public:
+ ServerMediaSubsessionIterator(ServerMediaSession& session);
+ virtual ~ServerMediaSubsessionIterator();
+
+ ServerMediaSubsession* next(); // NULL if none
+ void reset();
+
+private:
+ ServerMediaSession& fOurSession;
+ ServerMediaSubsession* fNextPtr;
+};
+
+
+class ServerMediaSubsession: public Medium {
+public:
+ unsigned trackNumber() const { return fTrackNumber; }
+ char const* trackId();
+ virtual char const* sdpLines() = 0;
+ virtual void getStreamParameters(unsigned clientSessionId, // in
+ netAddressBits clientAddress, // in
+ Port const& clientRTPPort, // in
+ Port const& clientRTCPPort, // in
+ int tcpSocketNum, // in (-1 means use UDP, not TCP)
+ unsigned char rtpChannelId, // in (used if TCP)
+ unsigned char rtcpChannelId, // in (used if TCP)
+ netAddressBits& destinationAddress, // in out
+ u_int8_t& destinationTTL, // in out
+ Boolean& isMulticast, // out
+ Port& serverRTPPort, // out
+ Port& serverRTCPPort, // out
+ void*& streamToken // out
+ ) = 0;
+ virtual void startStream(unsigned clientSessionId, void* streamToken,
+ TaskFunc* rtcpRRHandler,
+ void* rtcpRRHandlerClientData,
+ unsigned short& rtpSeqNum,
+ unsigned& rtpTimestamp,
+ ServerRequestAlternativeByteHandler* serverRequestAlternativeByteHandler,
+ void* serverRequestAlternativeByteHandlerClientData) = 0;
+ virtual void pauseStream(unsigned clientSessionId, void* streamToken);
+ virtual void seekStream(unsigned clientSessionId, void* streamToken, double& seekNPT,
+ double streamDuration, u_int64_t& numBytes);
+ // This routine is used to seek by relative (i.e., NPT) time.
+ // "streamDuration", if >0.0, specifies how much data to stream, past "seekNPT". (If <=0.0, all remaining data is streamed.)
+ // "numBytes" returns the size (in bytes) of the data to be streamed, or 0 if unknown or unlimited.
+ virtual void seekStream(unsigned clientSessionId, void* streamToken, char*& absStart, char*& absEnd);
+ // This routine is used to seek by 'absolute' time.
+ // "absStart" should be a string of the form "YYYYMMDDTHHMMSSZ" or "YYYYMMDDTHHMMSS.<frac>Z".
+ // "absEnd" should be either NULL (for no end time), or a string of the same form as "absStart".
+ // These strings may be modified in-place, or can be reassigned to a newly-allocated value (after delete[]ing the original).
+ virtual void nullSeekStream(unsigned clientSessionId, void* streamToken,
+ double streamEndTime, u_int64_t& numBytes);
+ // Called whenever we're handling a "PLAY" command without a specified start time.
+ virtual void setStreamScale(unsigned clientSessionId, void* streamToken, float scale);
+ virtual float getCurrentNPT(void* streamToken);
+ virtual FramedSource* getStreamSource(void* streamToken);
+ virtual void getRTPSinkandRTCP(void* streamToken,
+ RTPSink const*& rtpSink, RTCPInstance const*& rtcp) = 0;
+ // Returns pointers to the "RTPSink" and "RTCPInstance" objects for "streamToken".
+ // (This can be useful if you want to get the associated 'Groupsock' objects, for example.)
+ // You must not delete these objects, or start/stop playing them; instead, that is done
+ // using the "startStream()" and "deleteStream()" functions.
+ virtual void deleteStream(unsigned clientSessionId, void*& streamToken);
+
+ virtual void testScaleFactor(float& scale); // sets "scale" to the actual supported scale
+ virtual float duration() const;
+ // returns 0 for an unbounded session (the default)
+ // returns > 0 for a bounded session
+ virtual void getAbsoluteTimeRange(char*& absStartTime, char*& absEndTime) const;
+ // Subclasses can reimplement this iff they support seeking by 'absolute' time.
+
+ // The following may be called by (e.g.) SIP servers, for which the
+ // address and port number fields in SDP descriptions need to be non-zero:
+ void setServerAddressAndPortForSDP(netAddressBits addressBits,
+ portNumBits portBits);
+
+protected: // we're a virtual base class
+ ServerMediaSubsession(UsageEnvironment& env);
+ virtual ~ServerMediaSubsession();
+
+ char const* rangeSDPLine() const;
+ // returns a string to be delete[]d
+
+ ServerMediaSession* fParentSession;
+ netAddressBits fServerAddressForSDP;
+ portNumBits fPortNumForSDP;
+
+private:
+ friend class ServerMediaSession;
+ friend class ServerMediaSubsessionIterator;
+ ServerMediaSubsession* fNext;
+
+ unsigned fTrackNumber; // within an enclosing ServerMediaSession
+ char const* fTrackId;
+};
+
+#endif
diff --git a/liveMedia/include/SimpleRTPSink.hh b/liveMedia/include/SimpleRTPSink.hh
new file mode 100644
index 0000000..14dfaef
--- /dev/null
+++ b/liveMedia/include/SimpleRTPSink.hh
@@ -0,0 +1,76 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A simple RTP sink that packs frames into each outgoing
+// packet, without any fragmentation or special headers.
+// C++ header
+
+#ifndef _SIMPLE_RTP_SINK_HH
+#define _SIMPLE_RTP_SINK_HH
+
+#ifndef _MULTI_FRAMED_RTP_SINK_HH
+#include "MultiFramedRTPSink.hh"
+#endif
+
+class SimpleRTPSink: public MultiFramedRTPSink {
+public:
+ static SimpleRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* sdpMediaTypeString,
+ char const* rtpPayloadFormatName,
+ unsigned numChannels = 1,
+ Boolean allowMultipleFramesPerPacket = True,
+ Boolean doNormalMBitRule = True);
+ // "doNormalMBitRule" means: If the medium (i.e., "sdpMediaTypeString") is other than "audio", set the RTP "M" bit
+ // on each outgoing packet iff it contains the last (or only) fragment of a frame.
+ // Otherwise (i.e., if "doNormalMBitRule" is False, or the medium is "audio"), leave the "M" bit unset.
+
+ void setMBitOnNextPacket() { fSetMBitOnNextPacket = True; } // hack for optionally setting the RTP 'M' bit from outside the class
+
+protected:
+ SimpleRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* sdpMediaTypeString,
+ char const* rtpPayloadFormatName,
+ unsigned numChannels,
+ Boolean allowMultipleFramesPerPacket,
+ Boolean doNormalMBitRule);
+ // called only by createNew()
+
+ virtual ~SimpleRTPSink();
+
+protected: // redefined virtual functions
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual
+ Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ virtual char const* sdpMediaType() const;
+
+private:
+ char const* fSDPMediaTypeString;
+ Boolean fAllowMultipleFramesPerPacket;
+ Boolean fSetMBitOnLastFrames, fSetMBitOnNextPacket;
+};
+
+#endif
diff --git a/liveMedia/include/SimpleRTPSource.hh b/liveMedia/include/SimpleRTPSource.hh
new file mode 100644
index 0000000..693c4b9
--- /dev/null
+++ b/liveMedia/include/SimpleRTPSource.hh
@@ -0,0 +1,65 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A RTP source for a simple RTP payload format that
+// - doesn't have any special headers following the RTP header
+// (if necessary, the "offset" parameter can be used to specify a
+// special header that we just skip over)
+// - doesn't have any special framing apart from the packet data itself
+// C++ header
+
+#ifndef _SIMPLE_RTP_SOURCE_HH
+#define _SIMPLE_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class SimpleRTPSource: public MultiFramedRTPSource {
+public:
+ static SimpleRTPSource* createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* mimeTypeString,
+ unsigned offset = 0,
+ Boolean doNormalMBitRule = True);
+ // "doNormalMBitRule" means: If the medium is not audio, use the RTP "M"
+ // bit on each incoming packet to indicate the last (or only) fragment
+ // of a frame. Otherwise (i.e., if "doNormalMBitRule" is False, or the medium is "audio"), the "M" bit is ignored.
+
+protected:
+ SimpleRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency,
+ char const* mimeTypeString, unsigned offset,
+ Boolean doNormalMBitRule);
+ // called only by createNew(), or by subclass constructors
+ virtual ~SimpleRTPSource();
+
+protected:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ char const* fMIMEtypeString;
+ unsigned fOffset;
+ Boolean fUseMBitForFrameEnd;
+};
+
+#endif
diff --git a/liveMedia/include/StreamReplicator.hh b/liveMedia/include/StreamReplicator.hh
new file mode 100644
index 0000000..3427789
--- /dev/null
+++ b/liveMedia/include/StreamReplicator.hh
@@ -0,0 +1,84 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// An class that can be used to create (possibly multiple) 'replicas' of an incoming stream.
+// C++ header
+
+#ifndef _STREAM_REPLICATOR_HH
+#define _STREAM_REPLICATOR_HH
+
+#ifndef _FRAMED_SOURCE_HH
+#include "FramedSource.hh"
+#endif
+
+class StreamReplica; // forward
+
+class StreamReplicator: public Medium {
+public:
+ static StreamReplicator* createNew(UsageEnvironment& env, FramedSource* inputSource, Boolean deleteWhenLastReplicaDies = True);
+ // If "deleteWhenLastReplicaDies" is True (the default), then the "StreamReplicator" object is deleted when (and only when)
+ // all replicas have been deleted. (In this case, you must *not* call "Medium::close()" on the "StreamReplicator" object,
+ // unless you never created any replicas from it to begin with.)
+ // If "deleteWhenLastReplicaDies" is False, then the "StreamReplicator" object remains in existence, even when all replicas
+ // have been deleted. (This allows you to create new replicas later, if you wish.) In this case, you delete the
+ // "StreamReplicator" object by calling "Medium::close()" on it - but you must do so only when "numReplicas()" returns 0.
+
+ FramedSource* createStreamReplica();
+
+ unsigned numReplicas() const { return fNumReplicas; }
+
+ FramedSource* inputSource() const { return fInputSource; }
+
+ // Call before destruction if you want to prevent the destructor from closing the input source
+ void detachInputSource() { fInputSource = NULL; }
+
+protected:
+ StreamReplicator(UsageEnvironment& env, FramedSource* inputSource, Boolean deleteWhenLastReplicaDies);
+ // called only by "createNew()"
+ virtual ~StreamReplicator();
+
+private:
+ // Routines called by replicas to implement frame delivery, and the stopping/restarting/deletion of replicas:
+ friend class StreamReplica;
+ void getNextFrame(StreamReplica* replica);
+ void deactivateStreamReplica(StreamReplica* replica);
+ void removeStreamReplica(StreamReplica* replica);
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime, unsigned durationInMicroseconds);
+
+ static void onSourceClosure(void* clientData);
+ void onSourceClosure();
+
+ void deliverReceivedFrame();
+
+private:
+ FramedSource* fInputSource;
+ Boolean fDeleteWhenLastReplicaDies, fInputSourceHasClosed;
+ unsigned fNumReplicas, fNumActiveReplicas, fNumDeliveriesMadeSoFar;
+ int fFrameIndex; // 0 or 1; used to figure out if a replica is requesting the current frame, or the next frame
+
+ StreamReplica* fMasterReplica; // the first replica that requests each frame. We use its buffer when copying to the others.
+ StreamReplica* fReplicasAwaitingCurrentFrame; // other than the 'master' replica
+ StreamReplica* fReplicasAwaitingNextFrame; // replicas that have already received the current frame, and have asked for the next
+};
+#endif
diff --git a/liveMedia/include/T140TextRTPSink.hh b/liveMedia/include/T140TextRTPSink.hh
new file mode 100644
index 0000000..abe3d28
--- /dev/null
+++ b/liveMedia/include/T140TextRTPSink.hh
@@ -0,0 +1,103 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for T.140 text (RFC 2793)
+// C++ header
+
+#ifndef _T140_TEXT_RTP_SINK_HH
+#define _T140_TEXT_RTP_SINK_HH
+
+#ifndef _TEXT_RTP_SINK_HH
+#include "TextRTPSink.hh"
+#endif
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+class T140IdleFilter;
+
+class T140TextRTPSink: public TextRTPSink {
+public:
+ static T140TextRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat);
+
+protected:
+ T140TextRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat);
+ // called only by createNew()
+
+ virtual ~T140TextRTPSink();
+
+protected: // redefined virtual functions:
+ virtual Boolean continuePlaying();
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+
+protected:
+ T140IdleFilter* fOurIdleFilter;
+ Boolean fAreInIdlePeriod;
+};
+
+
+////////// T140IdleFilter definition //////////
+
+// Because the T.140 text RTP payload format specification recommends that (empty) RTP packets be sent during 'idle periods'
+// when no new text is available, we implement "T140TextRTPSink" using a separate "T140IdleFilter" class - sitting in front
+// - that delivers, to the "T140TextRTPSink", a continuous sequence of (possibly) empty frames.
+// (Note: This class should be used only by "T140TextRTPSink", or a subclass.)
+
+class T140IdleFilter: public FramedFilter {
+public:
+ T140IdleFilter(UsageEnvironment& env, FramedSource* inputSource);
+ virtual ~T140IdleFilter();
+
+private: // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+ static void handleIdleTimeout(void* clientData);
+ void handleIdleTimeout();
+
+ void deliverFromBuffer();
+ void deliverEmptyFrame();
+
+ static void onSourceClosure(void* clientData);
+ void onSourceClosure();
+
+private:
+ TaskToken fIdleTimerTask;
+ unsigned fBufferSize, fNumBufferedBytes;
+ char* fBuffer;
+ unsigned fBufferedNumTruncatedBytes; // a count of truncated bytes from the upstream
+ struct timeval fBufferedDataPresentationTime;
+ unsigned fBufferedDataDurationInMicroseconds;
+};
+
+#endif
diff --git a/liveMedia/include/TCPStreamSink.hh b/liveMedia/include/TCPStreamSink.hh
new file mode 100644
index 0000000..eb1fdf9
--- /dev/null
+++ b/liveMedia/include/TCPStreamSink.hh
@@ -0,0 +1,67 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A sink representing a TCP output stream
+// C++ header
+
+#ifndef _TCP_STREAM_SINK_HH
+#define _TCP_STREAM_SINK_HH
+
+#ifndef _MEDIA_SINK_HH
+#include "MediaSink.hh"
+#endif
+
+#define TCP_STREAM_SINK_BUFFER_SIZE 10000
+
+class TCPStreamSink: public MediaSink {
+public:
+ static TCPStreamSink* createNew(UsageEnvironment& env, int socketNum);
+ // "socketNum" is the socket number of an existing, writable TCP socket (which should be non-blocking).
+ // The caller is responsible for closing this socket later (when this object no longer exists).
+
+protected:
+ TCPStreamSink(UsageEnvironment& env, int socketNum); // called only by "createNew()"
+ virtual ~TCPStreamSink();
+
+protected:
+ // Redefined virtual functions:
+ virtual Boolean continuePlaying();
+
+private:
+ void processBuffer(); // common routine, called from both the 'socket writable' and 'incoming data' handlers below
+
+ static void socketWritableHandler(void* clientData, int mask);
+ void socketWritableHandler1();
+
+ static void afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval /*presentationTime*/, unsigned /*durationInMicroseconds*/);
+ void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes);
+
+ static void ourOnSourceClosure(void* clientData);
+ void ourOnSourceClosure1();
+
+ unsigned numUnwrittenBytes() const { return fUnwrittenBytesEnd - fUnwrittenBytesStart; }
+ unsigned freeBufferSpace() const { return TCP_STREAM_SINK_BUFFER_SIZE - fUnwrittenBytesEnd; }
+
+private:
+ unsigned char fBuffer[TCP_STREAM_SINK_BUFFER_SIZE];
+ unsigned fUnwrittenBytesStart, fUnwrittenBytesEnd;
+ Boolean fInputSourceIsOpen, fOutputSocketIsWritable;
+ int fOutputSocketNum;
+};
+
+#endif
diff --git a/liveMedia/include/TLSState.hh b/liveMedia/include/TLSState.hh
new file mode 100644
index 0000000..e0ac619
--- /dev/null
+++ b/liveMedia/include/TLSState.hh
@@ -0,0 +1,59 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// State encapsulating a TLS connection
+// C++ header
+
+#ifndef _TLS_STATE_HH
+#define _TLS_STATE_HH
+
+#ifndef _NET_COMMON_H
+#include "NetCommon.h"
+#endif
+#ifndef _BOOLEAN_HH
+#include "Boolean.hh"
+#endif
+#ifndef NO_OPENSSL
+#include <openssl/ssl.h>
+#endif
+
+class TLSState {
+public:
+ TLSState(class RTSPClient& client);
+ virtual ~TLSState();
+
+public:
+ Boolean isNeeded;
+
+ int connect(int socketNum); // returns: -1 (unrecoverable error), 0 (pending), 1 (done)
+ int write(const char* data, unsigned count);
+ int read(u_int8_t* buffer, unsigned bufferSize);
+
+private:
+ void reset();
+ Boolean setup(int socketNum);
+
+#ifndef NO_OPENSSL
+private:
+ class RTSPClient& fClient;
+ Boolean fHasBeenSetup;
+ SSL_CTX* fCtx;
+ SSL* fCon;
+#endif
+};
+
+#endif
diff --git a/liveMedia/include/TextRTPSink.hh b/liveMedia/include/TextRTPSink.hh
new file mode 100644
index 0000000..0ecdc04
--- /dev/null
+++ b/liveMedia/include/TextRTPSink.hh
@@ -0,0 +1,41 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic RTP sink for text codecs (abstract base class)
+// C++ header
+
+#ifndef _TEXT_RTP_SINK_HH
+#define _TEXT_RTP_SINK_HH
+
+#ifndef _MULTI_FRAMED_RTP_SINK_HH
+#include "MultiFramedRTPSink.hh"
+#endif
+
+class TextRTPSink: public MultiFramedRTPSink {
+protected:
+ TextRTPSink(UsageEnvironment& env,
+ Groupsock* rtpgs, unsigned char rtpPayloadType,
+ unsigned rtpTimestampFrequency,
+ char const* rtpPayloadFormatName);
+ // (we're an abstract base class)
+ virtual ~TextRTPSink();
+
+private: // redefined virtual functions:
+ virtual char const* sdpMediaType() const;
+};
+
+#endif
diff --git a/liveMedia/include/TheoraVideoRTPSink.hh b/liveMedia/include/TheoraVideoRTPSink.hh
new file mode 100644
index 0000000..8e72a5b
--- /dev/null
+++ b/liveMedia/include/TheoraVideoRTPSink.hh
@@ -0,0 +1,72 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for Theora video
+// C++ header
+
+#ifndef _THEORA_VIDEO_RTP_SINK_HH
+#define _THEORA_VIDEO_RTP_SINK_HH
+
+#ifndef _VIDEO_RTP_SINK_HH
+#include "VideoRTPSink.hh"
+#endif
+
+class TheoraVideoRTPSink: public VideoRTPSink {
+public:
+ static TheoraVideoRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat,
+ // The following headers provide the 'configuration' information, for the SDP description:
+ u_int8_t* identificationHeader, unsigned identificationHeaderSize,
+ u_int8_t* commentHeader, unsigned commentHeaderSize,
+ u_int8_t* setupHeader, unsigned setupHeaderSize,
+ u_int32_t identField = 0xFACADE);
+
+ static TheoraVideoRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat,
+ char const* configStr);
+ // an optional variant of "createNew()" that takes a Base-64-encoded 'configuration' string,
+ // rather than the raw configuration headers as parameter.
+
+protected:
+ TheoraVideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat,
+ u_int8_t* identificationHeader, unsigned identificationHeaderSize,
+ u_int8_t* commentHeader, unsigned commentHeaderSize,
+ u_int8_t* setupHeader, unsigned setupHeaderSize,
+ u_int32_t identField);
+ // called only by createNew()
+
+ virtual ~TheoraVideoRTPSink();
+
+private: // redefined virtual functions:
+ virtual char const* auxSDPLine(); // for the "a=fmtp:" SDP line
+
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ virtual unsigned specialHeaderSize() const;
+
+private:
+ u_int32_t fIdent; // "Ident" field used by this stream. (Only the low 24 bits of this are used.)
+ char* fFmtpSDPLine;
+};
+
+#endif
diff --git a/liveMedia/include/TheoraVideoRTPSource.hh b/liveMedia/include/TheoraVideoRTPSource.hh
new file mode 100644
index 0000000..4dbd20a
--- /dev/null
+++ b/liveMedia/include/TheoraVideoRTPSource.hh
@@ -0,0 +1,53 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Theora Video Audio RTP Sources
+// C++ header
+
+#ifndef _THEORA_VIDEO_RTP_SOURCE_HH
+#define _THEORA_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class TheoraVideoRTPSource: public MultiFramedRTPSource {
+public:
+ static TheoraVideoRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat);
+
+ u_int32_t curPacketIdent() const { return fCurPacketIdent; } // The current "Ident" field; only the low-order 24 bits are used
+
+protected:
+ TheoraVideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat);
+ // called only by createNew()
+
+ virtual ~TheoraVideoRTPSource();
+
+protected:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ u_int32_t fCurPacketIdent; // only the low-order 24 bits are used
+};
+
+#endif
diff --git a/liveMedia/include/VP8VideoRTPSink.hh b/liveMedia/include/VP8VideoRTPSink.hh
new file mode 100644
index 0000000..9f3012e
--- /dev/null
+++ b/liveMedia/include/VP8VideoRTPSink.hh
@@ -0,0 +1,50 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for VP8 video
+// C++ header
+
+#ifndef _VP8_VIDEO_RTP_SINK_HH
+#define _VP8_VIDEO_RTP_SINK_HH
+
+#ifndef _VIDEO_RTP_SINK_HH
+#include "VideoRTPSink.hh"
+#endif
+
+class VP8VideoRTPSink: public VideoRTPSink {
+public:
+ static VP8VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat);
+
+protected:
+ VP8VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat);
+ // called only by createNew()
+
+ virtual ~VP8VideoRTPSink();
+
+private: // redefined virtual functions:
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual
+ Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ virtual unsigned specialHeaderSize() const;
+};
+
+#endif
diff --git a/liveMedia/include/VP8VideoRTPSource.hh b/liveMedia/include/VP8VideoRTPSource.hh
new file mode 100644
index 0000000..cb1d3a5
--- /dev/null
+++ b/liveMedia/include/VP8VideoRTPSource.hh
@@ -0,0 +1,50 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// VP8 Video RTP Sources
+// C++ header
+
+#ifndef _VP8_VIDEO_RTP_SOURCE_HH
+#define _VP8_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class VP8VideoRTPSource: public MultiFramedRTPSource {
+public:
+ static VP8VideoRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency = 90000);
+
+protected:
+ VP8VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+ virtual ~VP8VideoRTPSource();
+
+protected:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+};
+
+#endif
diff --git a/liveMedia/include/VP9VideoRTPSink.hh b/liveMedia/include/VP9VideoRTPSink.hh
new file mode 100644
index 0000000..ced515a
--- /dev/null
+++ b/liveMedia/include/VP9VideoRTPSink.hh
@@ -0,0 +1,50 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for VP9 video
+// C++ header
+
+#ifndef _VP9_VIDEO_RTP_SINK_HH
+#define _VP9_VIDEO_RTP_SINK_HH
+
+#ifndef _VIDEO_RTP_SINK_HH
+#include "VideoRTPSink.hh"
+#endif
+
+class VP9VideoRTPSink: public VideoRTPSink {
+public:
+ static VP9VideoRTPSink* createNew(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat);
+
+protected:
+ VP9VideoRTPSink(UsageEnvironment& env, Groupsock* RTPgs, unsigned char rtpPayloadFormat);
+ // called only by createNew()
+
+ virtual ~VP9VideoRTPSink();
+
+private: // redefined virtual functions:
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual
+ Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ virtual unsigned specialHeaderSize() const;
+};
+
+#endif
diff --git a/liveMedia/include/VP9VideoRTPSource.hh b/liveMedia/include/VP9VideoRTPSource.hh
new file mode 100644
index 0000000..a812a28
--- /dev/null
+++ b/liveMedia/include/VP9VideoRTPSource.hh
@@ -0,0 +1,50 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// VP9 Video RTP Sources
+// C++ header
+
+#ifndef _VP9_VIDEO_RTP_SOURCE_HH
+#define _VP9_VIDEO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class VP9VideoRTPSource: public MultiFramedRTPSource {
+public:
+ static VP9VideoRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency = 90000);
+
+protected:
+ VP9VideoRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+ virtual ~VP9VideoRTPSource();
+
+protected:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+};
+
+#endif
diff --git a/liveMedia/include/VideoRTPSink.hh b/liveMedia/include/VideoRTPSink.hh
new file mode 100644
index 0000000..6511e73
--- /dev/null
+++ b/liveMedia/include/VideoRTPSink.hh
@@ -0,0 +1,41 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A generic RTP sink for video codecs (abstract base class)
+// C++ header
+
+#ifndef _VIDEO_RTP_SINK_HH
+#define _VIDEO_RTP_SINK_HH
+
+#ifndef _MULTI_FRAMED_RTP_SINK_HH
+#include "MultiFramedRTPSink.hh"
+#endif
+
+class VideoRTPSink: public MultiFramedRTPSink {
+protected:
+ VideoRTPSink(UsageEnvironment& env,
+ Groupsock* rtpgs, unsigned char rtpPayloadType,
+ unsigned rtpTimestampFrequency,
+ char const* rtpPayloadFormatName);
+ // (we're an abstract base class)
+ virtual ~VideoRTPSink();
+
+private: // redefined virtual functions:
+ virtual char const* sdpMediaType() const;
+};
+
+#endif
diff --git a/liveMedia/include/VorbisAudioRTPSink.hh b/liveMedia/include/VorbisAudioRTPSink.hh
new file mode 100644
index 0000000..7d3facf
--- /dev/null
+++ b/liveMedia/include/VorbisAudioRTPSink.hh
@@ -0,0 +1,85 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// RTP sink for Vorbis audio
+// C++ header
+
+#ifndef _VORBIS_AUDIO_RTP_SINK_HH
+#define _VORBIS_AUDIO_RTP_SINK_HH
+
+#ifndef _AUDIO_RTP_SINK_HH
+#include "AudioRTPSink.hh"
+#endif
+
+class VorbisAudioRTPSink: public AudioRTPSink {
+public:
+ static VorbisAudioRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency, unsigned numChannels,
+ // The following headers provide the 'configuration' information, for the SDP description:
+ u_int8_t* identificationHeader, unsigned identificationHeaderSize,
+ u_int8_t* commentHeader, unsigned commentHeaderSize,
+ u_int8_t* setupHeader, unsigned setupHeaderSize,
+ u_int32_t identField = 0xFACADE);
+
+ static VorbisAudioRTPSink*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs, u_int8_t rtpPayloadFormat,
+ u_int32_t rtpTimestampFrequency, unsigned numChannels,
+ char const* configStr);
+ // an optional variant of "createNew()" that takes a Base-64-encoded 'configuration' string,
+ // rather than the raw configuration headers as parameter.
+
+protected:
+ VorbisAudioRTPSink(UsageEnvironment& env, Groupsock* RTPgs,
+ u_int8_t rtpPayloadFormat, u_int32_t rtpTimestampFrequency, unsigned numChannels,
+ u_int8_t* identificationHeader, unsigned identificationHeaderSize,
+ u_int8_t* commentHeader, unsigned commentHeaderSize,
+ u_int8_t* setupHeader, unsigned setupHeaderSize,
+ u_int32_t identField);
+ // called only by createNew()
+
+ virtual ~VorbisAudioRTPSink();
+
+private: // redefined virtual functions:
+ virtual char const* auxSDPLine(); // for the "a=fmtp:" SDP line
+
+ virtual void doSpecialFrameHandling(unsigned fragmentationOffset,
+ unsigned char* frameStart,
+ unsigned numBytesInFrame,
+ struct timeval framePresentationTime,
+ unsigned numRemainingBytes);
+ virtual Boolean frameCanAppearAfterPacketStart(unsigned char const* frameStart,
+ unsigned numBytesInFrame) const;
+ virtual unsigned specialHeaderSize() const;
+ virtual unsigned frameSpecificHeaderSize() const;
+
+private:
+ u_int32_t fIdent; // "Ident" field used by this stream. (Only the low 24 bits of this are used.)
+ char* fFmtpSDPLine;
+};
+
+
+// A general function used by both "VorbisAudioRTPSink" and "TheoraVideoRTPSink" to construct
+// a Base64-encoded 'config' string (for SDP) from "identification", "comment", "setup" headers.
+// (Note: The result string was heap-allocated, and the caller should delete[] it afterwards.)
+
+char* generateVorbisOrTheoraConfigStr(u_int8_t* identificationHeader, unsigned identificationHeaderSize,
+ u_int8_t* commentHeader, unsigned commentHeaderSize,
+ u_int8_t* setupHeader, unsigned setupHeaderSize,
+ u_int32_t identField);
+
+#endif
diff --git a/liveMedia/include/VorbisAudioRTPSource.hh b/liveMedia/include/VorbisAudioRTPSource.hh
new file mode 100644
index 0000000..8250da8
--- /dev/null
+++ b/liveMedia/include/VorbisAudioRTPSource.hh
@@ -0,0 +1,66 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Vorbis Audio RTP Sources
+// C++ header
+
+#ifndef _VORBIS_AUDIO_RTP_SOURCE_HH
+#define _VORBIS_AUDIO_RTP_SOURCE_HH
+
+#ifndef _MULTI_FRAMED_RTP_SOURCE_HH
+#include "MultiFramedRTPSource.hh"
+#endif
+
+class VorbisAudioRTPSource: public MultiFramedRTPSource {
+public:
+ static VorbisAudioRTPSource*
+ createNew(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+
+ u_int32_t curPacketIdent() const { return fCurPacketIdent; } // The current "Ident" field; only the low-order 24 bits are used
+
+protected:
+ VorbisAudioRTPSource(UsageEnvironment& env, Groupsock* RTPgs,
+ unsigned char rtpPayloadFormat,
+ unsigned rtpTimestampFrequency);
+ // called only by createNew()
+
+ virtual ~VorbisAudioRTPSource();
+
+protected:
+ // redefined virtual functions:
+ virtual Boolean processSpecialHeader(BufferedPacket* packet,
+ unsigned& resultSpecialHeaderSize);
+ virtual char const* MIMEtype() const;
+
+private:
+ u_int32_t fCurPacketIdent; // only the low-order 24 bits are used
+};
+
+void parseVorbisOrTheoraConfigStr(char const* configStr,
+ u_int8_t*& identificationHdr, unsigned& identificationHdrSize,
+ u_int8_t*& commentHdr, unsigned& commentHdrSize,
+ u_int8_t*& setupHdr, unsigned& setupHdrSize,
+ u_int32_t& identField);
+ // Returns (in each of the result parameters) unpacked Vorbis or Theora
+ // "identification", "comment", and "setup" headers that were specified in a
+ // "config" string (in the SDP description for a Vorbis/RTP or Theora/RTP stream).
+ // Each of the "*Hdr" result arrays are dynamically allocated by this routine,
+ // and must be delete[]d by the caller.
+
+#endif
diff --git a/liveMedia/include/WAVAudioFileServerMediaSubsession.hh b/liveMedia/include/WAVAudioFileServerMediaSubsession.hh
new file mode 100644
index 0000000..3ffd881
--- /dev/null
+++ b/liveMedia/include/WAVAudioFileServerMediaSubsession.hh
@@ -0,0 +1,68 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A 'ServerMediaSubsession' object that creates new, unicast, "RTPSink"s
+// on demand, from an WAV audio file.
+// C++ header
+
+#ifndef _WAV_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH
+#define _WAV_AUDIO_FILE_SERVER_MEDIA_SUBSESSION_HH
+
+#ifndef _FILE_SERVER_MEDIA_SUBSESSION_HH
+#include "FileServerMediaSubsession.hh"
+#endif
+
+class WAVAudioFileServerMediaSubsession: public FileServerMediaSubsession{
+public:
+ static WAVAudioFileServerMediaSubsession*
+ createNew(UsageEnvironment& env, char const* fileName, Boolean reuseFirstSource,
+ Boolean convertToULaw = False);
+ // If "convertToULaw" is True, 16-bit audio streams are converted to
+ // 8-bit u-law audio prior to streaming.
+
+protected:
+ WAVAudioFileServerMediaSubsession(UsageEnvironment& env, char const* fileName,
+ Boolean reuseFirstSource, Boolean convertToULaw);
+ // called only by createNew();
+ virtual ~WAVAudioFileServerMediaSubsession();
+
+protected: // redefined virtual functions
+ virtual void seekStreamSource(FramedSource* inputSource, double& seekNPT, double streamDuration, u_int64_t& numBytes);
+ virtual void setStreamSourceScale(FramedSource* inputSource, float scale);
+ virtual void setStreamSourceDuration(FramedSource* inputSource, double streamDuration, u_int64_t& numBytes);
+
+ virtual FramedSource* createNewStreamSource(unsigned clientSessionId,
+ unsigned& estBitrate);
+ virtual RTPSink* createNewRTPSink(Groupsock* rtpGroupsock,
+ unsigned char rtpPayloadTypeIfDynamic,
+ FramedSource* inputSource);
+ virtual void testScaleFactor(float& scale);
+ virtual float duration() const;
+
+protected:
+ Boolean fConvertToULaw;
+
+ // The following parameters of the input stream are set after
+ // "createNewStreamSource" is called:
+ unsigned char fAudioFormat;
+ unsigned char fBitsPerSample;
+ unsigned fSamplingFrequency;
+ unsigned fNumChannels;
+ float fFileDuration;
+};
+
+#endif
diff --git a/liveMedia/include/WAVAudioFileSource.hh b/liveMedia/include/WAVAudioFileSource.hh
new file mode 100644
index 0000000..bffe67b
--- /dev/null
+++ b/liveMedia/include/WAVAudioFileSource.hh
@@ -0,0 +1,86 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// A WAV audio file source
+// NOTE: Samples are returned in little-endian order (the same order in which
+// they were stored in the file).
+// C++ header
+
+#ifndef _WAV_AUDIO_FILE_SOURCE_HH
+#define _WAV_AUDIO_FILE_SOURCE_HH
+
+#ifndef _AUDIO_INPUT_DEVICE_HH
+#include "AudioInputDevice.hh"
+#endif
+
+typedef enum {
+ WA_PCM = 0x01,
+ WA_PCMA = 0x06,
+ WA_PCMU = 0x07,
+ WA_IMA_ADPCM = 0x11,
+ WA_UNKNOWN
+} WAV_AUDIO_FORMAT;
+
+
+class WAVAudioFileSource: public AudioInputDevice {
+public:
+
+ static WAVAudioFileSource* createNew(UsageEnvironment& env,
+ char const* fileName);
+
+ unsigned numPCMBytes() const;
+ void setScaleFactor(int scale);
+ void seekToPCMByte(unsigned byteNumber);
+ void limitNumBytesToStream(unsigned numBytesToStream);
+ // if "numBytesToStream" is >0, then we limit the stream to that number of bytes, before treating it as EOF
+
+ unsigned char getAudioFormat();
+
+protected:
+ WAVAudioFileSource(UsageEnvironment& env, FILE* fid);
+ // called only by createNew()
+
+ virtual ~WAVAudioFileSource();
+
+ static void fileReadableHandler(WAVAudioFileSource* source, int mask);
+ void doReadFromFile();
+
+private:
+ // redefined virtual functions:
+ virtual void doGetNextFrame();
+ virtual void doStopGettingFrames();
+ virtual Boolean setInputPort(int portIndex);
+ virtual double getAverageLevel() const;
+
+protected:
+ unsigned fPreferredFrameSize;
+
+private:
+ FILE* fFid;
+ double fPlayTimePerSample; // useconds
+ Boolean fFidIsSeekable;
+ unsigned fLastPlayTime; // useconds
+ Boolean fHaveStartedReading;
+ unsigned fWAVHeaderSize;
+ unsigned fFileSize;
+ int fScaleFactor;
+ Boolean fLimitNumBytesToStream;
+ unsigned fNumBytesToStream; // used iff "fLimitNumBytesToStream" is True
+ unsigned char fAudioFormat;
+};
+
+#endif
diff --git a/liveMedia/include/liveMedia.hh b/liveMedia/include/liveMedia.hh
new file mode 100644
index 0000000..cb4774d
--- /dev/null
+++ b/liveMedia/include/liveMedia.hh
@@ -0,0 +1,137 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Inclusion of header files representing the interface
+// for the entire library
+//
+// Programs that use the library can include this header file,
+// instead of each of the individual media header files
+
+#ifndef _LIVEMEDIA_HH
+#define _LIVEMEDIA_HH
+#include "JPEG2000VideoRTPSource.hh"
+#include "JPEG2000VideoRTPSink.hh"
+//#include "JPEG2000VideoStreamFramer.hh"
+//#include "JPEG2000VideoFileServerMediaSubsession.hh"
+#include "MPEG1or2AudioRTPSink.hh"
+#include "MP3ADURTPSink.hh"
+#include "MPEG1or2VideoRTPSink.hh"
+#include "MPEG4ESVideoRTPSink.hh"
+#include "AMRAudioFileSink.hh"
+#include "H264VideoFileSink.hh"
+#include "H265VideoFileSink.hh"
+#include "OggFileSink.hh"
+#include "BasicUDPSink.hh"
+#include "GSMAudioRTPSink.hh"
+#include "H263plusVideoRTPSink.hh"
+#include "H264VideoRTPSink.hh"
+#include "H265VideoRTPSink.hh"
+#include "DVVideoRTPSource.hh"
+#include "DVVideoRTPSink.hh"
+#include "DVVideoStreamFramer.hh"
+#include "H264VideoStreamFramer.hh"
+#include "H265VideoStreamFramer.hh"
+#include "H264VideoStreamDiscreteFramer.hh"
+#include "H265VideoStreamDiscreteFramer.hh"
+#include "JPEGVideoRTPSink.hh"
+#include "SimpleRTPSink.hh"
+#include "uLawAudioFilter.hh"
+#include "MPEG2IndexFromTransportStream.hh"
+#include "MPEG2TransportStreamTrickModeFilter.hh"
+#include "ByteStreamMultiFileSource.hh"
+#include "ByteStreamMemoryBufferSource.hh"
+#include "BasicUDPSource.hh"
+#include "SimpleRTPSource.hh"
+#include "MPEG1or2AudioRTPSource.hh"
+#include "MPEG4LATMAudioRTPSource.hh"
+#include "MPEG4LATMAudioRTPSink.hh"
+#include "MPEG4ESVideoRTPSource.hh"
+#include "MPEG4GenericRTPSource.hh"
+#include "MP3ADURTPSource.hh"
+#include "QCELPAudioRTPSource.hh"
+#include "AMRAudioRTPSource.hh"
+#include "JPEGVideoRTPSource.hh"
+#include "JPEGVideoSource.hh"
+#include "MPEG1or2VideoRTPSource.hh"
+#include "VorbisAudioRTPSource.hh"
+#include "TheoraVideoRTPSource.hh"
+#include "VP8VideoRTPSource.hh"
+#include "VP9VideoRTPSource.hh"
+#include "RawVideoRTPSource.hh"
+#include "MPEG2TransportStreamFromPESSource.hh"
+#include "MPEG2TransportStreamFromESSource.hh"
+#include "MPEG2TransportStreamFramer.hh"
+#include "ADTSAudioFileSource.hh"
+#include "H261VideoRTPSource.hh"
+#include "H263plusVideoRTPSource.hh"
+#include "H264VideoRTPSource.hh"
+#include "H265VideoRTPSource.hh"
+#include "MP3FileSource.hh"
+#include "MP3ADU.hh"
+#include "MP3ADUinterleaving.hh"
+#include "MP3Transcoder.hh"
+#include "MPEG1or2DemuxedElementaryStream.hh"
+#include "MPEG1or2AudioStreamFramer.hh"
+#include "H263plusVideoStreamFramer.hh"
+#include "AC3AudioStreamFramer.hh"
+#include "AC3AudioRTPSource.hh"
+#include "AC3AudioRTPSink.hh"
+#include "VorbisAudioRTPSink.hh"
+#include "TheoraVideoRTPSink.hh"
+#include "VP8VideoRTPSink.hh"
+#include "VP9VideoRTPSink.hh"
+#include "MPEG4GenericRTPSink.hh"
+#include "RawVideoRTPSink.hh"
+#include "MPEG1or2VideoStreamDiscreteFramer.hh"
+#include "MPEG4VideoStreamDiscreteFramer.hh"
+#include "DeviceSource.hh"
+#include "AudioInputDevice.hh"
+#include "WAVAudioFileSource.hh"
+#include "StreamReplicator.hh"
+#include "RTSPRegisterSender.hh"
+#include "RTSPServerSupportingHTTPStreaming.hh"
+#include "RTSPClient.hh"
+#include "SIPClient.hh"
+#include "QuickTimeFileSink.hh"
+#include "QuickTimeGenericRTPSource.hh"
+#include "AVIFileSink.hh"
+#include "PassiveServerMediaSubsession.hh"
+#include "MPEG4VideoFileServerMediaSubsession.hh"
+#include "H264VideoFileServerMediaSubsession.hh"
+#include "H265VideoFileServerMediaSubsession.hh"
+#include "WAVAudioFileServerMediaSubsession.hh"
+#include "AMRAudioFileServerMediaSubsession.hh"
+#include "AMRAudioFileSource.hh"
+#include "AMRAudioRTPSink.hh"
+#include "T140TextRTPSink.hh"
+#include "TCPStreamSink.hh"
+#include "MP3AudioFileServerMediaSubsession.hh"
+#include "MPEG1or2VideoFileServerMediaSubsession.hh"
+#include "MPEG1or2FileServerDemux.hh"
+#include "MPEG2TransportFileServerMediaSubsession.hh"
+#include "H263plusVideoFileServerMediaSubsession.hh"
+#include "ADTSAudioFileServerMediaSubsession.hh"
+#include "DVVideoFileServerMediaSubsession.hh"
+#include "AC3AudioFileServerMediaSubsession.hh"
+#include "MPEG2TransportUDPServerMediaSubsession.hh"
+#include "MatroskaFileServerDemux.hh"
+#include "OggFileServerDemux.hh"
+#include "MPEG2TransportStreamDemux.hh"
+#include "ProxyServerMediaSession.hh"
+#include "HLSSegmenter.hh"
+
+#endif
diff --git a/liveMedia/include/liveMedia_version.hh b/liveMedia/include/liveMedia_version.hh
new file mode 100644
index 0000000..6363939
--- /dev/null
+++ b/liveMedia/include/liveMedia_version.hh
@@ -0,0 +1,10 @@
+// Version information for the "liveMedia" library
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+
+#ifndef _LIVEMEDIA_VERSION_HH
+#define _LIVEMEDIA_VERSION_HH
+
+#define LIVEMEDIA_LIBRARY_VERSION_STRING "2020.03.06"
+#define LIVEMEDIA_LIBRARY_VERSION_INT 1583452800
+
+#endif
diff --git a/liveMedia/include/ourMD5.hh b/liveMedia/include/ourMD5.hh
new file mode 100644
index 0000000..722dcd5
--- /dev/null
+++ b/liveMedia/include/ourMD5.hh
@@ -0,0 +1,38 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Because MD5 may not be implemented (at least, with the same interface) on all systems,
+// we have our own implementation.
+// C++ header
+
+#ifndef _OUR_MD5_HH
+#define _OUR_MD5_HH
+
+extern char* our_MD5Data(unsigned char const* data, unsigned dataSize, char* outputDigest);
+ // "outputDigest" must be either NULL (in which case this function returns a heap-allocated
+ // buffer, which should be later delete[]d by the caller), or else it must point to
+ // a (>=)33-byte buffer (which this function will also return).
+
+extern unsigned char* our_MD5DataRaw(unsigned char const* data, unsigned dataSize,
+ unsigned char* outputDigest);
+ // Like "ourMD5Data()", except that it returns the digest in 'raw' binary form, rather than
+ // as an ASCII hex string.
+ // "outputDigest" must be either NULL (in which case this function returns a heap-allocated
+ // buffer, which should be later delete[]d by the caller), or else it must point to
+ // a (>=)16-byte buffer (which this function will also return).
+
+#endif
diff --git a/liveMedia/include/uLawAudioFilter.hh b/liveMedia/include/uLawAudioFilter.hh
new file mode 100644
index 0000000..fd3f5b0
--- /dev/null
+++ b/liveMedia/include/uLawAudioFilter.hh
@@ -0,0 +1,208 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Filters for converting between raw PCM audio and uLaw
+// C++ header
+
+#ifndef _ULAW_AUDIO_FILTER_HH
+#define _ULAW_AUDIO_FILTER_HH
+
+#ifndef _FRAMED_FILTER_HH
+#include "FramedFilter.hh"
+#endif
+
+////////// 16-bit PCM (in various byte orderings) -> 8-bit u-Law //////////
+
+class uLawFromPCMAudioSource: public FramedFilter {
+public:
+ static uLawFromPCMAudioSource*
+ createNew(UsageEnvironment& env, FramedSource* inputSource,
+ int byteOrdering = 0);
+ // "byteOrdering" == 0 => host order (the default)
+ // "byteOrdering" == 1 => little-endian order
+ // "byteOrdering" == 2 => network (i.e., big-endian) order
+
+protected:
+ uLawFromPCMAudioSource(UsageEnvironment& env, FramedSource* inputSource,
+ int byteOrdering);
+ // called only by createNew()
+ virtual ~uLawFromPCMAudioSource();
+
+private:
+ // Redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+private:
+ int fByteOrdering;
+ unsigned char* fInputBuffer;
+ unsigned fInputBufferSize;
+};
+
+
+////////// u-Law -> 16-bit PCM (in host order) //////////
+
+class PCMFromuLawAudioSource: public FramedFilter {
+public:
+ static PCMFromuLawAudioSource*
+ createNew(UsageEnvironment& env, FramedSource* inputSource);
+
+protected:
+ PCMFromuLawAudioSource(UsageEnvironment& env,
+ FramedSource* inputSource);
+ // called only by createNew()
+ virtual ~PCMFromuLawAudioSource();
+
+private:
+ // Redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+
+private:
+ unsigned char* fInputBuffer;
+ unsigned fInputBufferSize;
+};
+
+
+////////// 16-bit values (in host order) -> 16-bit network order //////////
+
+class NetworkFromHostOrder16: public FramedFilter {
+public:
+ static NetworkFromHostOrder16*
+ createNew(UsageEnvironment& env, FramedSource* inputSource);
+
+protected:
+ NetworkFromHostOrder16(UsageEnvironment& env, FramedSource* inputSource);
+ // called only by createNew()
+ virtual ~NetworkFromHostOrder16();
+
+private:
+ // Redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+};
+
+
+////////// 16-bit values (in network order) -> 16-bit host order //////////
+
+class HostFromNetworkOrder16: public FramedFilter {
+public:
+ static HostFromNetworkOrder16*
+ createNew(UsageEnvironment& env, FramedSource* inputSource);
+
+protected:
+ HostFromNetworkOrder16(UsageEnvironment& env, FramedSource* inputSource);
+ // called only by createNew()
+ virtual ~HostFromNetworkOrder16();
+
+private:
+ // Redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+};
+
+
+////////// 16-bit values: little-endian <-> big-endian //////////
+
+class EndianSwap16: public FramedFilter {
+public:
+ static EndianSwap16* createNew(UsageEnvironment& env, FramedSource* inputSource);
+
+protected:
+ EndianSwap16(UsageEnvironment& env, FramedSource* inputSource);
+ // called only by createNew()
+ virtual ~EndianSwap16();
+
+private:
+ // Redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+};
+
+
+////////// 24-bit values: little-endian <-> big-endian //////////
+
+class EndianSwap24: public FramedFilter {
+public:
+ static EndianSwap24* createNew(UsageEnvironment& env, FramedSource* inputSource);
+
+protected:
+ EndianSwap24(UsageEnvironment& env, FramedSource* inputSource);
+ // called only by createNew()
+ virtual ~EndianSwap24();
+
+private:
+ // Redefined virtual functions:
+ virtual void doGetNextFrame();
+
+private:
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame1(unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+};
+
+#endif
diff --git a/liveMedia/ourMD5.cpp b/liveMedia/ourMD5.cpp
new file mode 100644
index 0000000..a9a67c3
--- /dev/null
+++ b/liveMedia/ourMD5.cpp
@@ -0,0 +1,325 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Because MD5 may not be implemented (at least, with the same interface) on all systems,
+// we have our own implementation.
+// Implementation
+
+#include "ourMD5.hh"
+#include <NetCommon.h> // for u_int32_t, u_int64_t
+#include <string.h>
+
+#define DIGEST_SIZE_IN_BYTES 16
+#define DIGEST_SIZE_IN_HEX_DIGITS (2*DIGEST_SIZE_IN_BYTES)
+#define DIGEST_SIZE_AS_STRING (DIGEST_SIZE_IN_HEX_DIGITS+1)
+
+// The state of a MD5 computation in progress:
+
+class MD5Context {
+public:
+ MD5Context();
+ ~MD5Context();
+
+ void addData(unsigned char const* inputData, unsigned inputDataSize);
+ void end(char* outputDigest /*must point to an array of size DIGEST_SIZE_AS_STRING*/);
+ void finalize(unsigned char* outputDigestInBytes);
+ // Like "end()", except that the argument is a byte array, of size DIGEST_SIZE_IN_BYTES.
+ // This function is used to implement "end()".
+
+private:
+ void zeroize(); // to remove potentially sensitive information
+ void transform64Bytes(unsigned char const block[64]); // does the actual MD5 transform
+
+private:
+ u_int32_t fState[4]; // ABCD
+ u_int64_t fBitCount; // number of bits, modulo 2^64
+ unsigned char fWorkingBuffer[64];
+};
+
+char* our_MD5Data(unsigned char const* data, unsigned dataSize, char* outputDigest) {
+ MD5Context ctx;
+
+ ctx.addData(data, dataSize);
+
+ if (outputDigest == NULL) outputDigest = new char[DIGEST_SIZE_AS_STRING];
+ ctx.end(outputDigest);
+
+ return outputDigest;
+}
+
+unsigned char* our_MD5DataRaw(unsigned char const* data, unsigned dataSize,
+ unsigned char* outputDigest) {
+ MD5Context ctx;
+
+ ctx.addData(data, dataSize);
+
+ if (outputDigest == NULL) outputDigest = new unsigned char[DIGEST_SIZE_IN_BYTES];
+ ctx.finalize(outputDigest);
+
+ return outputDigest;
+}
+
+
+////////// MD5Context implementation //////////
+
+MD5Context::MD5Context()
+ : fBitCount(0) {
+ // Initialize with magic constants:
+ fState[0] = 0x67452301;
+ fState[1] = 0xefcdab89;
+ fState[2] = 0x98badcfe;
+ fState[3] = 0x10325476;
+}
+
+MD5Context::~MD5Context() {
+ zeroize();
+}
+
+void MD5Context::addData(unsigned char const* inputData, unsigned inputDataSize) {
+ // Begin by noting how much of our 64-byte working buffer remains unfilled:
+ u_int64_t const byteCount = fBitCount>>3;
+ unsigned bufferBytesInUse = (unsigned)(byteCount&0x3F);
+ unsigned bufferBytesRemaining = 64 - bufferBytesInUse;
+
+ // Then update our bit count:
+ fBitCount += inputDataSize<<3;
+
+ unsigned i = 0;
+ if (inputDataSize >= bufferBytesRemaining) {
+ // We have enough input data to do (64-byte) MD5 transforms.
+ // Do this now, starting with a transform on our working buffer, then with
+ // (as many as possible) transforms on rest of the input data.
+
+ memcpy((unsigned char*)&fWorkingBuffer[bufferBytesInUse], (unsigned char*)inputData, bufferBytesRemaining);
+ transform64Bytes(fWorkingBuffer);
+ bufferBytesInUse = 0;
+
+ for (i = bufferBytesRemaining; i + 63 < inputDataSize; i += 64) {
+ transform64Bytes(&inputData[i]);
+ }
+ }
+
+ // Copy any remaining (and currently un-transformed) input data into our working buffer:
+ if (i < inputDataSize) {
+ memcpy((unsigned char*)&fWorkingBuffer[bufferBytesInUse], (unsigned char*)&inputData[i], inputDataSize - i);
+ }
+}
+
+void MD5Context::end(char* outputDigest) {
+ unsigned char digestInBytes[DIGEST_SIZE_IN_BYTES];
+ finalize(digestInBytes);
+
+ // Convert the digest from bytes (binary) to hex digits:
+ static char const hex[]="0123456789abcdef";
+ unsigned i;
+ for (i = 0; i < DIGEST_SIZE_IN_BYTES; ++i) {
+ outputDigest[2*i] = hex[digestInBytes[i] >> 4];
+ outputDigest[2*i+1] = hex[digestInBytes[i] & 0x0F];
+ }
+ outputDigest[2*i] = '\0';
+}
+
+// Routines that unpack 32 and 64-bit values into arrays of bytes (in little-endian order).
+// (These are used to implement "finalize()".)
+
+static void unpack32(unsigned char out[4], u_int32_t in) {
+ for (unsigned i = 0; i < 4; ++i) {
+ out[i] = (unsigned char)((in>>(8*i))&0xFF);
+ }
+}
+
+static void unpack64(unsigned char out[8], u_int64_t in) {
+ for (unsigned i = 0; i < 8; ++i) {
+ out[i] = (unsigned char)((in>>(8*i))&0xFF);
+ }
+}
+
+static unsigned char const PADDING[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+void MD5Context::finalize(unsigned char* outputDigestInBytes) {
+ // Unpack our bit count:
+ unsigned char bitCountInBytes[8];
+ unpack64(bitCountInBytes, fBitCount);
+
+ // Before 'finalizing', make sure that we transform any remaining bytes in our working buffer:
+ u_int64_t const byteCount = fBitCount>>3;
+ unsigned bufferBytesInUse = (unsigned)(byteCount&0x3F);
+ unsigned numPaddingBytes
+ = (bufferBytesInUse < 56) ? (56 - bufferBytesInUse) : (64 + 56 - bufferBytesInUse);
+ addData(PADDING, numPaddingBytes);
+
+ addData(bitCountInBytes, 8);
+
+ // Unpack our 'state' into the output digest:
+ unpack32(&outputDigestInBytes[0], fState[0]);
+ unpack32(&outputDigestInBytes[4], fState[1]);
+ unpack32(&outputDigestInBytes[8], fState[2]);
+ unpack32(&outputDigestInBytes[12], fState[3]);
+
+ zeroize();
+}
+
+void MD5Context::zeroize() {
+ fState[0] = fState[1] = fState[2] = fState[3] = 0;
+ fBitCount = 0;
+ for (unsigned i = 0; i < 64; ++i) fWorkingBuffer[i] = 0;
+}
+
+
+////////// Implementation of the MD5 transform ("MD5Context::transform64Bytes()") //////////
+
+// Constants for the transform:
+#define S11 7
+#define S12 12
+#define S13 17
+#define S14 22
+#define S21 5
+#define S22 9
+#define S23 14
+#define S24 20
+#define S31 4
+#define S32 11
+#define S33 16
+#define S34 23
+#define S41 6
+#define S42 10
+#define S43 15
+#define S44 21
+
+// Basic MD5 functions:
+#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
+#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define I(x, y, z) ((y) ^ ((x) | (~z)))
+
+// Rotate "x" left "n" bits:
+#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
+
+// Other transforms:
+#define FF(a, b, c, d, x, s, ac) { \
+ (a) += F((b), (c), (d)) + (x) + (u_int32_t)(ac); \
+ (a) = ROTATE_LEFT((a), (s)); \
+ (a) += (b); \
+}
+#define GG(a, b, c, d, x, s, ac) { \
+ (a) += G((b), (c), (d)) + (x) + (u_int32_t)(ac); \
+ (a) = ROTATE_LEFT((a), (s)); \
+ (a) += (b); \
+}
+#define HH(a, b, c, d, x, s, ac) { \
+ (a) += H((b), (c), (d)) + (x) + (u_int32_t)(ac); \
+ (a) = ROTATE_LEFT((a), (s)); \
+ (a) += (b); \
+}
+#define II(a, b, c, d, x, s, ac) { \
+ (a) += I((b), (c), (d)) + (x) + (u_int32_t)(ac); \
+ (a) = ROTATE_LEFT((a), (s)); \
+ (a) += (b); \
+}
+
+void MD5Context::transform64Bytes(unsigned char const block[64]) {
+ u_int32_t a = fState[0], b = fState[1], c = fState[2], d = fState[3];
+
+ // Begin by packing "block" into an array ("x") of 16 32-bit values (in little-endian order):
+ u_int32_t x[16];
+ for (unsigned i = 0, j = 0; i < 16; ++i, j += 4) {
+ x[i] = ((u_int32_t)block[j]) | (((u_int32_t)block[j+1]) << 8) | (((u_int32_t)block[j+2]) << 16) | (((u_int32_t)block[j+3]) << 24);
+ }
+
+ // Now, perform the transform on the array "x":
+
+ // Round 1
+ FF(a, b, c, d, x[0], S11, 0xd76aa478); // 1
+ FF(d, a, b, c, x[1], S12, 0xe8c7b756); // 2
+ FF(c, d, a, b, x[2], S13, 0x242070db); // 3
+ FF(b, c, d, a, x[3], S14, 0xc1bdceee); // 4
+ FF(a, b, c, d, x[4], S11, 0xf57c0faf); // 5
+ FF(d, a, b, c, x[5], S12, 0x4787c62a); // 6
+ FF(c, d, a, b, x[6], S13, 0xa8304613); // 7
+ FF(b, c, d, a, x[7], S14, 0xfd469501); // 8
+ FF(a, b, c, d, x[8], S11, 0x698098d8); // 9
+ FF(d, a, b, c, x[9], S12, 0x8b44f7af); // 10
+ FF(c, d, a, b, x[10], S13, 0xffff5bb1); // 11
+ FF(b, c, d, a, x[11], S14, 0x895cd7be); // 12
+ FF(a, b, c, d, x[12], S11, 0x6b901122); // 13
+ FF(d, a, b, c, x[13], S12, 0xfd987193); // 14
+ FF(c, d, a, b, x[14], S13, 0xa679438e); // 15
+ FF(b, c, d, a, x[15], S14, 0x49b40821); // 16
+
+ // Round 2
+ GG(a, b, c, d, x[1], S21, 0xf61e2562); // 17
+ GG(d, a, b, c, x[6], S22, 0xc040b340); // 18
+ GG(c, d, a, b, x[11], S23, 0x265e5a51); // 19
+ GG(b, c, d, a, x[0], S24, 0xe9b6c7aa); // 20
+ GG(a, b, c, d, x[5], S21, 0xd62f105d); // 21
+ GG(d, a, b, c, x[10], S22, 0x2441453); // 22
+ GG(c, d, a, b, x[15], S23, 0xd8a1e681); // 23
+ GG(b, c, d, a, x[4], S24, 0xe7d3fbc8); // 24
+ GG(a, b, c, d, x[9], S21, 0x21e1cde6); // 25
+ GG(d, a, b, c, x[14], S22, 0xc33707d6); // 26
+ GG(c, d, a, b, x[3], S23, 0xf4d50d87); // 27
+ GG(b, c, d, a, x[8], S24, 0x455a14ed); // 28
+ GG(a, b, c, d, x[13], S21, 0xa9e3e905); // 29
+ GG(d, a, b, c, x[2], S22, 0xfcefa3f8); // 30
+ GG(c, d, a, b, x[7], S23, 0x676f02d9); // 31
+ GG(b, c, d, a, x[12], S24, 0x8d2a4c8a); // 32
+
+ // Round 3
+ HH(a, b, c, d, x[5], S31, 0xfffa3942); // 33
+ HH(d, a, b, c, x[8], S32, 0x8771f681); // 34
+ HH(c, d, a, b, x[11], S33, 0x6d9d6122); // 35
+ HH(b, c, d, a, x[14], S34, 0xfde5380c); // 36
+ HH(a, b, c, d, x[1], S31, 0xa4beea44); // 37
+ HH(d, a, b, c, x[4], S32, 0x4bdecfa9); // 38
+ HH(c, d, a, b, x[7], S33, 0xf6bb4b60); // 39
+ HH(b, c, d, a, x[10], S34, 0xbebfbc70); // 40
+ HH(a, b, c, d, x[13], S31, 0x289b7ec6); // 41
+ HH(d, a, b, c, x[0], S32, 0xeaa127fa); // 42
+ HH(c, d, a, b, x[3], S33, 0xd4ef3085); // 43
+ HH(b, c, d, a, x[6], S34, 0x4881d05); // 44
+ HH(a, b, c, d, x[9], S31, 0xd9d4d039); // 45
+ HH(d, a, b, c, x[12], S32, 0xe6db99e5); // 46
+ HH(c, d, a, b, x[15], S33, 0x1fa27cf8); // 47
+ HH(b, c, d, a, x[2], S34, 0xc4ac5665); // 48
+
+ // Round 4
+ II(a, b, c, d, x[0], S41, 0xf4292244); // 49
+ II(d, a, b, c, x[7], S42, 0x432aff97); // 50
+ II(c, d, a, b, x[14], S43, 0xab9423a7); // 51
+ II(b, c, d, a, x[5], S44, 0xfc93a039); // 52
+ II(a, b, c, d, x[12], S41, 0x655b59c3); // 53
+ II(d, a, b, c, x[3], S42, 0x8f0ccc92); // 54
+ II(c, d, a, b, x[10], S43, 0xffeff47d); // 55
+ II(b, c, d, a, x[1], S44, 0x85845dd1); // 56
+ II(a, b, c, d, x[8], S41, 0x6fa87e4f); // 57
+ II(d, a, b, c, x[15], S42, 0xfe2ce6e0); // 58
+ II(c, d, a, b, x[6], S43, 0xa3014314); // 59
+ II(b, c, d, a, x[13], S44, 0x4e0811a1); // 60
+ II(a, b, c, d, x[4], S41, 0xf7537e82); // 61
+ II(d, a, b, c, x[11], S42, 0xbd3af235); // 62
+ II(c, d, a, b, x[2], S43, 0x2ad7d2bb); // 63
+ II(b, c, d, a, x[9], S44, 0xeb86d391); // 64
+
+ fState[0] += a; fState[1] += b; fState[2] += c; fState[3] += d;
+
+ // Zeroize sensitive information.
+ for (unsigned k = 0; k < 16; ++k) x[k] = 0;
+}
diff --git a/liveMedia/rtcp_from_spec.c b/liveMedia/rtcp_from_spec.c
new file mode 100644
index 0000000..a828ec7
--- /dev/null
+++ b/liveMedia/rtcp_from_spec.c
@@ -0,0 +1,289 @@
+/* RTCP code taken directly from the most recent RTP specification:
+ * RFC 3550
+ * Implementation
+ */
+
+#include "rtcp_from_spec.h"
+
+/*****
+
+A.7 Computing the RTCP Transmission Interval
+
+ The following functions implement the RTCP transmission and reception
+ rules described in Section 6.2. These rules are coded in several
+ functions:
+
+ o rtcp_interval() computes the deterministic calculated
+ interval, measured in seconds. The parameters are defined in
+ Section 6.3.
+
+ o OnExpire() is called when the RTCP transmission timer expires.
+
+ o OnReceive() is called whenever an RTCP packet is received.
+
+ Both OnExpire() and OnReceive() have event e as an argument. This is
+ the next scheduled event for that participant, either an RTCP report
+ or a BYE packet. It is assumed that the following functions are
+ available:
+
+ o Schedule(time t, event e) schedules an event e to occur at
+ time t. When time t arrives, the function OnExpire is called
+ with e as an argument.
+
+ o Reschedule(time t, event e) reschedules a previously scheduled
+ event e for time t.
+
+ o SendRTCPReport(event e) sends an RTCP report.
+
+ o SendBYEPacket(event e) sends a BYE packet.
+
+ o TypeOfEvent(event e) returns EVENT_BYE if the event being
+ processed is for a BYE packet to be sent, else it returns
+ EVENT_REPORT.
+
+ o PacketType(p) returns PACKET_RTCP_REPORT if packet p is an
+ RTCP report (not BYE), PACKET_BYE if its a BYE RTCP packet,
+ and PACKET_RTP if its a regular RTP data packet.
+
+ o ReceivedPacketSize() and SentPacketSize() return the size of
+ the referenced packet in octets.
+
+ o NewMember(p) returns a 1 if the participant who sent packet p
+ is not currently in the member list, 0 otherwise. Note this
+ function is not sufficient for a complete implementation
+ because each CSRC identifier in an RTP packet and each SSRC in
+ a BYE packet should be processed.
+
+ o NewSender(p) returns a 1 if the participant who sent packet p
+ is not currently in the sender sublist of the member list, 0
+ otherwise.
+
+ o AddMember() and RemoveMember() to add and remove participants
+ from the member list.
+
+ o AddSender() and RemoveSender() to add and remove participants
+ from the sender sublist of the member list.
+*****/
+
+
+ double rtcp_interval(int members,
+ int senders,
+ double rtcp_bw,
+ int we_sent,
+ double avg_rtcp_size,
+ int initial)
+ {
+ /*
+ * Minimum average time between RTCP packets from this site (in
+ * seconds). This time prevents the reports from `clumping' when
+ * sessions are small and the law of large numbers isn't helping
+ * to smooth out the traffic. It also keeps the report interval
+ * from becoming ridiculously small during transient outages like
+ * a network partition.
+ */
+ double const RTCP_MIN_TIME = 5.;
+ /*
+ * Fraction of the RTCP bandwidth to be shared among active
+ * senders. (This fraction was chosen so that in a typical
+ * session with one or two active senders, the computed report
+ * time would be roughly equal to the minimum report time so that
+ * we don't unnecessarily slow down receiver reports.) The
+ * receiver fraction must be 1 - the sender fraction.
+ */
+ double const RTCP_SENDER_BW_FRACTION = 0.25;
+ double const RTCP_RCVR_BW_FRACTION = (1-RTCP_SENDER_BW_FRACTION);
+ /*
+ * To compensate for "unconditional reconsideration" converging to a
+ * value below the intended average.
+ */
+ double const COMPENSATION = 2.71828 - 1.5;
+
+ double t; /* interval */
+ double rtcp_min_time = RTCP_MIN_TIME;
+ int n; /* no. of members for computation */
+
+ /*
+ * Very first call at application start-up uses half the min
+ * delay for quicker notification while still allowing some time
+ * before reporting for randomization and to learn about other
+ * sources so the report interval will converge to the correct
+ * interval more quickly.
+ */
+ if (initial) {
+ rtcp_min_time /= 2;
+ }
+
+ /*
+ * If there were active senders, give them at least a minimum
+ * share of the RTCP bandwidth. Otherwise all participants share
+ * the RTCP bandwidth equally.
+ */
+ n = members;
+ if (senders > 0 && senders < members * RTCP_SENDER_BW_FRACTION) {
+ if (we_sent) {
+ rtcp_bw *= RTCP_SENDER_BW_FRACTION;
+ n = senders;
+ } else {
+ rtcp_bw *= RTCP_RCVR_BW_FRACTION;
+ n -= senders;
+ }
+ }
+
+ /*
+ * The effective number of sites times the average packet size is
+ * the total number of octets sent when each site sends a report.
+ * Dividing this by the effective bandwidth gives the time
+ * interval over which those packets must be sent in order to
+ * meet the bandwidth target, with a minimum enforced. In that
+ * time interval we send one report so this time is also our
+ * average time between reports.
+ */
+ t = avg_rtcp_size * n / rtcp_bw;
+ if (t < rtcp_min_time) t = rtcp_min_time;
+
+ /*
+ * To avoid traffic bursts from unintended synchronization with
+ * other sites, we then pick our actual next report interval as a
+ * random number uniformly distributed between 0.5*t and 1.5*t.
+ */
+ t = t * (drand48() + 0.5);
+ t = t / COMPENSATION;
+ return t;
+ }
+
+ void OnExpire(event e,
+ int members,
+ int senders,
+ double rtcp_bw,
+ int we_sent,
+ double *avg_rtcp_size,
+ int *initial,
+ time_tp tc,
+ time_tp *tp,
+ int *pmembers)
+ {
+ /* This function is responsible for deciding whether to send
+ * an RTCP report or BYE packet now, or to reschedule transmission.
+ * It is also responsible for updating the pmembers, initial, tp,
+ * and avg_rtcp_size state variables. This function should be called
+ * upon expiration of the event timer used by Schedule(). */
+
+ double t; /* Interval */
+ double tn; /* Next transmit time */
+
+ /* In the case of a BYE, we use "unconditional reconsideration" to
+ * reschedule the transmission of the BYE if necessary */
+
+ if (TypeOfEvent(e) == EVENT_BYE) {
+ t = rtcp_interval(members,
+ senders,
+ rtcp_bw,
+ we_sent,
+ *avg_rtcp_size,
+ *initial);
+ tn = *tp + t;
+ if (tn <= tc) {
+ SendBYEPacket(e);
+ exit(1);
+ } else {
+ Schedule(tn, e);
+ }
+
+ } else if (TypeOfEvent(e) == EVENT_REPORT) {
+ t = rtcp_interval(members,
+ senders,
+ rtcp_bw,
+ we_sent,
+ *avg_rtcp_size,
+ *initial);
+ tn = *tp + t;
+
+ if (tn <= tc) {
+ SendRTCPReport(e);
+ *avg_rtcp_size = (1./16.)*SentPacketSize(e) +
+ (15./16.)*(*avg_rtcp_size);
+ *tp = tc;
+
+ /* We must redraw the interval. Don't reuse the
+ one computed above, since its not actually
+ distributed the same, as we are conditioned
+ on it being small enough to cause a packet to
+ be sent */
+
+ t = rtcp_interval(members,
+ senders,
+ rtcp_bw,
+ we_sent,
+ *avg_rtcp_size,
+ *initial);
+
+ Schedule(t+tc,e);
+ *initial = 0;
+ } else {
+ Schedule(tn, e);
+ }
+ *pmembers = members;
+ }
+ }
+
+
+ void OnReceive(packet p,
+ event e,
+ int *members,
+ int *pmembers,
+ int *senders,
+ double *avg_rtcp_size,
+ double *tp,
+ double tc,
+ double tn)
+ {
+ /* What we do depends on whether we have left the group, and
+ * are waiting to send a BYE (TypeOfEvent(e) == EVENT_BYE) or
+ * an RTCP report. p represents the packet that was just received. */
+
+ if (PacketType(p) == PACKET_RTCP_REPORT) {
+ if (NewMember(p) && (TypeOfEvent(e) == EVENT_REPORT)) {
+ AddMember(p);
+ *members += 1;
+ }
+ *avg_rtcp_size = (1./16.)*ReceivedPacketSize(p) +
+ (15./16.)*(*avg_rtcp_size);
+ } else if (PacketType(p) == PACKET_RTP) {
+ if (NewMember(p) && (TypeOfEvent(e) == EVENT_REPORT)) {
+ AddMember(p);
+ *members += 1;
+ }
+ if (NewSender(p) && (TypeOfEvent(e) == EVENT_REPORT)) {
+ AddSender(p);
+ *senders += 1;
+ }
+ } else if (PacketType(p) == PACKET_BYE) {
+ *avg_rtcp_size = (1./16.)*ReceivedPacketSize(p) +
+ (15./16.)*(*avg_rtcp_size);
+
+ if (TypeOfEvent(e) == EVENT_REPORT) {
+ if (NewSender(p) == FALSE) {
+ RemoveSender(p);
+ *senders -= 1;
+ }
+
+ if (NewMember(p) == FALSE) {
+ RemoveMember(p);
+ *members -= 1;
+ }
+
+ if(*members < *pmembers) {
+ tn = tc + (((double) *members)/(*pmembers))*(tn - tc);
+ *tp = tc - (((double) *members)/(*pmembers))*(tc - *tp);
+
+ /* Reschedule the next report for time tn */
+
+ Reschedule(tn, e);
+ *pmembers = *members;
+ }
+
+ } else if (TypeOfEvent(e) == EVENT_BYE) {
+ *members += 1;
+ }
+ }
+ }
diff --git a/liveMedia/rtcp_from_spec.h b/liveMedia/rtcp_from_spec.h
new file mode 100644
index 0000000..629971f
--- /dev/null
+++ b/liveMedia/rtcp_from_spec.h
@@ -0,0 +1,82 @@
+/* RTCP code taken directly from the most recent RTP specification:
+ * draft-ietf-avt-rtp-new-11.txt
+ * C header
+ */
+
+#ifndef _RTCP_FROM_SPEC_H
+#define _RTCP_FROM_SPEC_H
+
+#include <stdlib.h>
+
+/* Definitions of _ANSI_ARGS and EXTERN that will work in either
+ C or C++ code:
+ */
+#undef _ANSI_ARGS_
+#if ((defined(__STDC__) || defined(SABER)) && !defined(NO_PROTOTYPE)) || defined(__cplusplus) || defined(USE_PROTOTYPE)
+# define _ANSI_ARGS_(x) x
+#else
+# define _ANSI_ARGS_(x) ()
+#endif
+#ifdef __cplusplus
+# define EXTERN extern "C"
+#else
+# define EXTERN extern
+#endif
+
+/* The code from the spec assumes a type "event"; make this a void*: */
+typedef void* event;
+
+#define EVENT_UNKNOWN 0
+#define EVENT_REPORT 1
+#define EVENT_BYE 2
+
+/* The code from the spec assumes a type "time_tp"; make this a double: */
+typedef double time_tp;
+
+/* The code from the spec assumes a type "packet"; make this a void*: */
+typedef void* packet;
+
+#define PACKET_UNKNOWN_TYPE 0
+#define PACKET_RTP 1
+#define PACKET_RTCP_REPORT 2
+#define PACKET_BYE 3
+#define PACKET_RTCP_APP 4
+
+/* The code from the spec calls drand48(), but we have drand30() instead */
+#define drand48 drand30
+
+/* The code calls "exit()", but we don't want to exit, so make it a noop: */
+#define exit(n) do {} while (0)
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+/* EXPORTS: */
+
+EXTERN void OnExpire _ANSI_ARGS_((event, int, int, double, int, double*, int*, time_tp, time_tp*, int*));
+
+EXTERN void OnReceive _ANSI_ARGS_((packet, event, int*, int*, int*, double*, double*, double, double));
+
+/* IMPORTS: */
+
+EXTERN void Schedule _ANSI_ARGS_((double,event));
+EXTERN void Reschedule _ANSI_ARGS_((double,event));
+EXTERN void SendRTCPReport _ANSI_ARGS_((event));
+EXTERN void SendBYEPacket _ANSI_ARGS_((event));
+EXTERN int TypeOfEvent _ANSI_ARGS_((event));
+EXTERN int SentPacketSize _ANSI_ARGS_((event));
+EXTERN int PacketType _ANSI_ARGS_((packet));
+EXTERN int ReceivedPacketSize _ANSI_ARGS_((packet));
+EXTERN int NewMember _ANSI_ARGS_((packet));
+EXTERN int NewSender _ANSI_ARGS_((packet));
+EXTERN void AddMember _ANSI_ARGS_((packet));
+EXTERN void AddSender _ANSI_ARGS_((packet));
+EXTERN void RemoveMember _ANSI_ARGS_((packet));
+EXTERN void RemoveSender _ANSI_ARGS_((packet));
+EXTERN double drand30 _ANSI_ARGS_((void));
+
+#endif
diff --git a/liveMedia/uLawAudioFilter.cpp b/liveMedia/uLawAudioFilter.cpp
new file mode 100644
index 0000000..1954b47
--- /dev/null
+++ b/liveMedia/uLawAudioFilter.cpp
@@ -0,0 +1,431 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// "liveMedia"
+// Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved.
+// Filters for converting between raw PCM audio and uLaw
+// Implementation
+
+#include "uLawAudioFilter.hh"
+
+////////// 16-bit PCM (in various byte orders) -> 8-bit u-Law //////////
+
+uLawFromPCMAudioSource* uLawFromPCMAudioSource
+::createNew(UsageEnvironment& env, FramedSource* inputSource, int byteOrdering) {
+ // "byteOrdering" must be 0, 1, or 2:
+ if (byteOrdering < 0 || byteOrdering > 2) {
+ env.setResultMsg("uLawFromPCMAudioSource::createNew(): bad \"byteOrdering\" parameter");
+ return NULL;
+ }
+ return new uLawFromPCMAudioSource(env, inputSource, byteOrdering);
+}
+
+uLawFromPCMAudioSource
+::uLawFromPCMAudioSource(UsageEnvironment& env, FramedSource* inputSource,
+ int byteOrdering)
+ : FramedFilter(env, inputSource),
+ fByteOrdering(byteOrdering), fInputBuffer(NULL), fInputBufferSize(0) {
+}
+
+uLawFromPCMAudioSource::~uLawFromPCMAudioSource() {
+ delete[] fInputBuffer;
+}
+
+void uLawFromPCMAudioSource::doGetNextFrame() {
+ // Figure out how many bytes of input data to ask for, and increase
+ // our input buffer if necessary:
+ unsigned bytesToRead = fMaxSize*2; // because we're converting 16 bits->8
+ if (bytesToRead > fInputBufferSize) {
+ delete[] fInputBuffer; fInputBuffer = new unsigned char[bytesToRead];
+ fInputBufferSize = bytesToRead;
+ }
+
+ // Arrange to read samples into the input buffer:
+ fInputSource->getNextFrame(fInputBuffer, bytesToRead,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+}
+
+void uLawFromPCMAudioSource
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ uLawFromPCMAudioSource* source = (uLawFromPCMAudioSource*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+#define BIAS 0x84 // the add-in bias for 16 bit samples
+#define CLIP 32635
+
+static unsigned char uLawFrom16BitLinear(u_int16_t sample) {
+ static int const exp_lut[256] = {0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,
+ 4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7};
+ unsigned char sign = (sample >> 8) & 0x80;
+ if (sign != 0) sample = -sample; // get the magnitude
+
+ if (sample > CLIP) sample = CLIP; // clip the magnitude
+ sample += BIAS;
+
+ unsigned char exponent = exp_lut[(sample>>7) & 0xFF];
+ unsigned char mantissa = (sample >> (exponent+3)) & 0x0F;
+ unsigned char result = ~(sign | (exponent << 4) | mantissa);
+ if (result == 0 ) result = 0x02; // CCITT trap
+
+ return result;
+}
+
+void uLawFromPCMAudioSource
+::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // Translate raw 16-bit PCM samples (in the input buffer)
+ // into uLaw samples (in the output buffer).
+ unsigned numSamples = frameSize/2;
+ switch (fByteOrdering) {
+ case 0: { // host order
+ u_int16_t* inputSample = (u_int16_t*)fInputBuffer;
+ for (unsigned i = 0; i < numSamples; ++i) {
+ fTo[i] = uLawFrom16BitLinear(inputSample[i]);
+ }
+ break;
+ }
+ case 1: { // little-endian order
+ for (unsigned i = 0; i < numSamples; ++i) {
+ u_int16_t const newValue = (fInputBuffer[2*i+1]<<8)|fInputBuffer[2*i];
+ fTo[i] = uLawFrom16BitLinear(newValue);
+ }
+ break;
+ }
+ case 2: { // network (i.e., big-endian) order
+ for (unsigned i = 0; i < numSamples; ++i) {
+ u_int16_t const newValue = (fInputBuffer[2*i]<<8)|fInputBuffer[2*i+i];
+ fTo[i] = uLawFrom16BitLinear(newValue);
+ }
+ break;
+ }
+ }
+
+ // Complete delivery to the client:
+ fFrameSize = numSamples;
+ fNumTruncatedBytes = numTruncatedBytes;
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+ afterGetting(this);
+}
+
+
+////////// u-Law -> 16-bit PCM (in host order) //////////
+
+PCMFromuLawAudioSource* PCMFromuLawAudioSource
+::createNew(UsageEnvironment& env, FramedSource* inputSource) {
+ return new PCMFromuLawAudioSource(env, inputSource);
+}
+
+PCMFromuLawAudioSource
+::PCMFromuLawAudioSource(UsageEnvironment& env,
+ FramedSource* inputSource)
+ : FramedFilter(env, inputSource),
+ fInputBuffer(NULL), fInputBufferSize(0) {
+}
+
+PCMFromuLawAudioSource::~PCMFromuLawAudioSource() {
+ delete[] fInputBuffer;
+}
+
+void PCMFromuLawAudioSource::doGetNextFrame() {
+ // Figure out how many bytes of input data to ask for, and increase
+ // our input buffer if necessary:
+ unsigned bytesToRead = fMaxSize/2; // because we're converting 8 bits->16
+ if (bytesToRead > fInputBufferSize) {
+ delete[] fInputBuffer; fInputBuffer = new unsigned char[bytesToRead];
+ fInputBufferSize = bytesToRead;
+ }
+
+ // Arrange to read samples into the input buffer:
+ fInputSource->getNextFrame(fInputBuffer, bytesToRead,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+}
+
+void PCMFromuLawAudioSource
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ PCMFromuLawAudioSource* source = (PCMFromuLawAudioSource*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+static u_int16_t linear16FromuLaw(unsigned char uLawByte) {
+ static int const exp_lut[8] = {0,132,396,924,1980,4092,8316,16764};
+ uLawByte = ~uLawByte;
+
+ Boolean sign = (uLawByte & 0x80) != 0;
+ unsigned char exponent = (uLawByte>>4) & 0x07;
+ unsigned char mantissa = uLawByte & 0x0F;
+
+ u_int16_t result = exp_lut[exponent] + (mantissa << (exponent+3));
+ if (sign) result = -result;
+ return result;
+}
+
+void PCMFromuLawAudioSource
+::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // Translate uLaw samples (in the input buffer)
+ // into 16-bit PCM samples (in the output buffer), in host order.
+ unsigned numSamples = frameSize;
+ u_int16_t* outputSample = (u_int16_t*)fTo;
+ for (unsigned i = 0; i < numSamples; ++i) {
+ outputSample[i] = linear16FromuLaw(fInputBuffer[i]);
+ }
+
+ // Complete delivery to the client:
+ fFrameSize = numSamples*2;
+ fNumTruncatedBytes = numTruncatedBytes;
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+ afterGetting(this);
+}
+
+
+////////// 16-bit values (in host order) -> 16-bit network order //////////
+
+NetworkFromHostOrder16* NetworkFromHostOrder16
+::createNew(UsageEnvironment& env, FramedSource* inputSource) {
+ return new NetworkFromHostOrder16(env, inputSource);
+}
+
+NetworkFromHostOrder16
+::NetworkFromHostOrder16(UsageEnvironment& env,
+ FramedSource* inputSource)
+ : FramedFilter(env, inputSource) {
+}
+
+NetworkFromHostOrder16::~NetworkFromHostOrder16() {
+}
+
+void NetworkFromHostOrder16::doGetNextFrame() {
+ // Arrange to read data directly into the client's buffer:
+ fInputSource->getNextFrame(fTo, fMaxSize,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+}
+
+void NetworkFromHostOrder16
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ NetworkFromHostOrder16* source = (NetworkFromHostOrder16*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+void NetworkFromHostOrder16
+::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // Translate the 16-bit values that we have just read from host
+ // to network order (in-place)
+ unsigned numValues = frameSize/2;
+ u_int16_t* value = (u_int16_t*)fTo;
+ for (unsigned i = 0; i < numValues; ++i) {
+ value[i] = htons(value[i]);
+ }
+
+ // Complete delivery to the client:
+ fFrameSize = numValues*2;
+ fNumTruncatedBytes = numTruncatedBytes;
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+ afterGetting(this);
+}
+
+
+////////// 16-bit values (in network order) -> 16-bit host order //////////
+
+HostFromNetworkOrder16* HostFromNetworkOrder16
+::createNew(UsageEnvironment& env, FramedSource* inputSource) {
+ return new HostFromNetworkOrder16(env, inputSource);
+}
+
+HostFromNetworkOrder16
+::HostFromNetworkOrder16(UsageEnvironment& env,
+ FramedSource* inputSource)
+ : FramedFilter(env, inputSource) {
+}
+
+HostFromNetworkOrder16::~HostFromNetworkOrder16() {
+}
+
+void HostFromNetworkOrder16::doGetNextFrame() {
+ // Arrange to read data directly into the client's buffer:
+ fInputSource->getNextFrame(fTo, fMaxSize,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+}
+
+void HostFromNetworkOrder16
+::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ HostFromNetworkOrder16* source = (HostFromNetworkOrder16*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+void HostFromNetworkOrder16
+::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // Translate the 16-bit values that we have just read from network
+ // to host order (in-place):
+ unsigned numValues = frameSize/2;
+ u_int16_t* value = (u_int16_t*)fTo;
+ for (unsigned i = 0; i < numValues; ++i) {
+ value[i] = ntohs(value[i]);
+ }
+
+ // Complete delivery to the client:
+ fFrameSize = numValues*2;
+ fNumTruncatedBytes = numTruncatedBytes;
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+ afterGetting(this);
+}
+
+
+////////// 16-bit values: little-endian <-> big-endian //////////
+
+EndianSwap16*
+EndianSwap16::createNew(UsageEnvironment& env, FramedSource* inputSource) {
+ return new EndianSwap16(env, inputSource);
+}
+
+EndianSwap16::EndianSwap16(UsageEnvironment& env,
+ FramedSource* inputSource)
+ : FramedFilter(env, inputSource) {
+}
+
+EndianSwap16::~EndianSwap16() {
+}
+
+void EndianSwap16::doGetNextFrame() {
+ // Arrange to read data directly into the client's buffer:
+ fInputSource->getNextFrame(fTo, fMaxSize,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+}
+
+void EndianSwap16::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ EndianSwap16* source = (EndianSwap16*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+void EndianSwap16::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // Swap the byte order of the 16-bit values that we have just read (in place):
+ unsigned numValues = frameSize/2;
+ u_int16_t* value = (u_int16_t*)fTo;
+ for (unsigned i = 0; i < numValues; ++i) {
+ u_int16_t const orig = value[i];
+ value[i] = ((orig&0xFF)<<8) | ((orig&0xFF00)>>8);
+ }
+
+ // Complete delivery to the client:
+ fFrameSize = numValues*2;
+ fNumTruncatedBytes = numTruncatedBytes + (frameSize - fFrameSize);
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+ afterGetting(this);
+}
+
+
+////////// 24-bit values: little-endian <-> big-endian //////////
+
+EndianSwap24*
+EndianSwap24::createNew(UsageEnvironment& env, FramedSource* inputSource) {
+ return new EndianSwap24(env, inputSource);
+}
+
+EndianSwap24::EndianSwap24(UsageEnvironment& env,
+ FramedSource* inputSource)
+ : FramedFilter(env, inputSource) {
+}
+
+EndianSwap24::~EndianSwap24() {
+}
+
+void EndianSwap24::doGetNextFrame() {
+ // Arrange to read data directly into the client's buffer:
+ fInputSource->getNextFrame(fTo, fMaxSize,
+ afterGettingFrame, this,
+ FramedSource::handleClosure, this);
+}
+
+void EndianSwap24::afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ EndianSwap24* source = (EndianSwap24*)clientData;
+ source->afterGettingFrame1(frameSize, numTruncatedBytes,
+ presentationTime, durationInMicroseconds);
+}
+
+void EndianSwap24::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds) {
+ // Swap the byte order of the 24-bit values that we have just read (in place):
+ unsigned const numValues = frameSize/3;
+ u_int8_t* p = fTo;
+ for (unsigned i = 0; i < numValues; ++i) {
+ u_int8_t tmp = p[0];
+ p[0] = p[2];
+ p[2] = tmp;
+ p += 3;
+ }
+
+ // Complete delivery to the client:
+ fFrameSize = numValues*3;
+ fNumTruncatedBytes = numTruncatedBytes + (frameSize - fFrameSize);
+ fPresentationTime = presentationTime;
+ fDurationInMicroseconds = durationInMicroseconds;
+ afterGetting(this);
+}
diff --git a/mediaServer/COPYING b/mediaServer/COPYING
new file mode 120000
index 0000000..012065c
--- /dev/null
+++ b/mediaServer/COPYING
@@ -0,0 +1 @@
+../COPYING
\ No newline at end of file
diff --git a/mediaServer/COPYING.LESSER b/mediaServer/COPYING.LESSER
new file mode 120000
index 0000000..ce9a3ce
--- /dev/null
+++ b/mediaServer/COPYING.LESSER
@@ -0,0 +1 @@
+../COPYING.LESSER
\ No newline at end of file
diff --git a/mediaServer/DynamicRTSPServer.cpp b/mediaServer/DynamicRTSPServer.cpp
new file mode 100644
index 0000000..e787089
--- /dev/null
+++ b/mediaServer/DynamicRTSPServer.cpp
@@ -0,0 +1,240 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A subclass of "RTSPServer" that creates "ServerMediaSession"s on demand,
+// based on whether or not the specified stream name exists as a file
+// Implementation
+
+#include "DynamicRTSPServer.hh"
+#include <liveMedia.hh>
+#include <string.h>
+
+DynamicRTSPServer*
+DynamicRTSPServer::createNew(UsageEnvironment& env, Port ourPort,
+ UserAuthenticationDatabase* authDatabase,
+ unsigned reclamationTestSeconds) {
+ int ourSocket = setUpOurSocket(env, ourPort);
+ if (ourSocket == -1) return NULL;
+
+ return new DynamicRTSPServer(env, ourSocket, ourPort, authDatabase, reclamationTestSeconds);
+}
+
+DynamicRTSPServer::DynamicRTSPServer(UsageEnvironment& env, int ourSocket,
+ Port ourPort,
+ UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds)
+ : RTSPServerSupportingHTTPStreaming(env, ourSocket, ourPort, authDatabase, reclamationTestSeconds) {
+}
+
+DynamicRTSPServer::~DynamicRTSPServer() {
+}
+
+static ServerMediaSession* createNewSMS(UsageEnvironment& env,
+ char const* fileName, FILE* fid); // forward
+
+ServerMediaSession* DynamicRTSPServer
+::lookupServerMediaSession(char const* streamName, Boolean isFirstLookupInSession) {
+ // First, check whether the specified "streamName" exists as a local file:
+ FILE* fid = fopen(streamName, "rb");
+ Boolean const fileExists = fid != NULL;
+
+ // Next, check whether we already have a "ServerMediaSession" for this file:
+ ServerMediaSession* sms = RTSPServer::lookupServerMediaSession(streamName);
+ Boolean const smsExists = sms != NULL;
+
+ // Handle the four possibilities for "fileExists" and "smsExists":
+ if (!fileExists) {
+ if (smsExists) {
+ // "sms" was created for a file that no longer exists. Remove it:
+ removeServerMediaSession(sms);
+ sms = NULL;
+ }
+
+ return NULL;
+ } else {
+ if (smsExists && isFirstLookupInSession) {
+ // Remove the existing "ServerMediaSession" and create a new one, in case the underlying
+ // file has changed in some way:
+ removeServerMediaSession(sms);
+ sms = NULL;
+ }
+
+ if (sms == NULL) {
+ sms = createNewSMS(envir(), streamName, fid);
+ addServerMediaSession(sms);
+ }
+
+ fclose(fid);
+ return sms;
+ }
+}
+
+// Special code for handling Matroska files:
+struct MatroskaDemuxCreationState {
+ MatroskaFileServerDemux* demux;
+ char watchVariable;
+};
+static void onMatroskaDemuxCreation(MatroskaFileServerDemux* newDemux, void* clientData) {
+ MatroskaDemuxCreationState* creationState = (MatroskaDemuxCreationState*)clientData;
+ creationState->demux = newDemux;
+ creationState->watchVariable = 1;
+}
+// END Special code for handling Matroska files:
+
+// Special code for handling Ogg files:
+struct OggDemuxCreationState {
+ OggFileServerDemux* demux;
+ char watchVariable;
+};
+static void onOggDemuxCreation(OggFileServerDemux* newDemux, void* clientData) {
+ OggDemuxCreationState* creationState = (OggDemuxCreationState*)clientData;
+ creationState->demux = newDemux;
+ creationState->watchVariable = 1;
+}
+// END Special code for handling Ogg files:
+
+#define NEW_SMS(description) do {\
+char const* descStr = description\
+ ", streamed by the LIVE555 Media Server";\
+sms = ServerMediaSession::createNew(env, fileName, fileName, descStr);\
+} while(0)
+
+static ServerMediaSession* createNewSMS(UsageEnvironment& env,
+ char const* fileName, FILE* /*fid*/) {
+ // Use the file name extension to determine the type of "ServerMediaSession":
+ char const* extension = strrchr(fileName, '.');
+ if (extension == NULL) return NULL;
+
+ ServerMediaSession* sms = NULL;
+ Boolean const reuseSource = False;
+ if (strcmp(extension, ".aac") == 0) {
+ // Assumed to be an AAC Audio (ADTS format) file:
+ NEW_SMS("AAC Audio");
+ sms->addSubsession(ADTSAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource));
+ } else if (strcmp(extension, ".amr") == 0) {
+ // Assumed to be an AMR Audio file:
+ NEW_SMS("AMR Audio");
+ sms->addSubsession(AMRAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource));
+ } else if (strcmp(extension, ".ac3") == 0) {
+ // Assumed to be an AC-3 Audio file:
+ NEW_SMS("AC-3 Audio");
+ sms->addSubsession(AC3AudioFileServerMediaSubsession::createNew(env, fileName, reuseSource));
+ } else if (strcmp(extension, ".m4e") == 0) {
+ // Assumed to be a MPEG-4 Video Elementary Stream file:
+ NEW_SMS("MPEG-4 Video");
+ sms->addSubsession(MPEG4VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
+ } else if (strcmp(extension, ".264") == 0) {
+ // Assumed to be a H.264 Video Elementary Stream file:
+ NEW_SMS("H.264 Video");
+ OutPacketBuffer::maxSize = 100000; // allow for some possibly large H.264 frames
+ sms->addSubsession(H264VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
+ } else if (strcmp(extension, ".265") == 0) {
+ // Assumed to be a H.265 Video Elementary Stream file:
+ NEW_SMS("H.265 Video");
+ OutPacketBuffer::maxSize = 100000; // allow for some possibly large H.265 frames
+ sms->addSubsession(H265VideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
+ } else if (strcmp(extension, ".mp3") == 0) {
+ // Assumed to be a MPEG-1 or 2 Audio file:
+ NEW_SMS("MPEG-1 or 2 Audio");
+ // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following:
+//#define STREAM_USING_ADUS 1
+ // To also reorder ADUs before streaming, uncomment the following:
+//#define INTERLEAVE_ADUS 1
+ // (For more information about ADUs and interleaving,
+ // see <http://www.live555.com/rtp-mp3/>)
+ Boolean useADUs = False;
+ Interleaving* interleaving = NULL;
+#ifdef STREAM_USING_ADUS
+ useADUs = True;
+#ifdef INTERLEAVE_ADUS
+ unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own...
+ unsigned const interleaveCycleSize
+ = (sizeof interleaveCycle)/(sizeof (unsigned char));
+ interleaving = new Interleaving(interleaveCycleSize, interleaveCycle);
+#endif
+#endif
+ sms->addSubsession(MP3AudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, useADUs, interleaving));
+ } else if (strcmp(extension, ".mpg") == 0) {
+ // Assumed to be a MPEG-1 or 2 Program Stream (audio+video) file:
+ NEW_SMS("MPEG-1 or 2 Program Stream");
+ MPEG1or2FileServerDemux* demux
+ = MPEG1or2FileServerDemux::createNew(env, fileName, reuseSource);
+ sms->addSubsession(demux->newVideoServerMediaSubsession());
+ sms->addSubsession(demux->newAudioServerMediaSubsession());
+ } else if (strcmp(extension, ".vob") == 0) {
+ // Assumed to be a VOB (MPEG-2 Program Stream, with AC-3 audio) file:
+ NEW_SMS("VOB (MPEG-2 video with AC-3 audio)");
+ MPEG1or2FileServerDemux* demux
+ = MPEG1or2FileServerDemux::createNew(env, fileName, reuseSource);
+ sms->addSubsession(demux->newVideoServerMediaSubsession());
+ sms->addSubsession(demux->newAC3AudioServerMediaSubsession());
+ } else if (strcmp(extension, ".ts") == 0) {
+ // Assumed to be a MPEG Transport Stream file:
+ // Use an index file name that's the same as the TS file name, except with ".tsx":
+ unsigned indexFileNameLen = strlen(fileName) + 2; // allow for trailing "x\0"
+ char* indexFileName = new char[indexFileNameLen];
+ sprintf(indexFileName, "%sx", fileName);
+ NEW_SMS("MPEG Transport Stream");
+ sms->addSubsession(MPEG2TransportFileServerMediaSubsession::createNew(env, fileName, indexFileName, reuseSource));
+ delete[] indexFileName;
+ } else if (strcmp(extension, ".wav") == 0) {
+ // Assumed to be a WAV Audio file:
+ NEW_SMS("WAV Audio Stream");
+ // To convert 16-bit PCM data to 8-bit u-law, prior to streaming,
+ // change the following to True:
+ Boolean convertToULaw = False;
+ sms->addSubsession(WAVAudioFileServerMediaSubsession::createNew(env, fileName, reuseSource, convertToULaw));
+ } else if (strcmp(extension, ".dv") == 0) {
+ // Assumed to be a DV Video file
+ // First, make sure that the RTPSinks' buffers will be large enough to handle the huge size of DV frames (as big as 288000).
+ OutPacketBuffer::maxSize = 300000;
+
+ NEW_SMS("DV Video");
+ sms->addSubsession(DVVideoFileServerMediaSubsession::createNew(env, fileName, reuseSource));
+ } else if (strcmp(extension, ".mkv") == 0 || strcmp(extension, ".webm") == 0) {
+ // Assumed to be a Matroska file (note that WebM ('.webm') files are also Matroska files)
+ OutPacketBuffer::maxSize = 300000; // allow for some possibly large VP8 or VP9 frames
+ NEW_SMS("Matroska video+audio+(optional)subtitles");
+
+ // Create a Matroska file server demultiplexor for the specified file.
+ // (We enter the event loop to wait for this to complete.)
+ MatroskaDemuxCreationState creationState;
+ creationState.watchVariable = 0;
+ MatroskaFileServerDemux::createNew(env, fileName, onMatroskaDemuxCreation, &creationState);
+ env.taskScheduler().doEventLoop(&creationState.watchVariable);
+
+ ServerMediaSubsession* smss;
+ while ((smss = creationState.demux->newServerMediaSubsession()) != NULL) {
+ sms->addSubsession(smss);
+ }
+ } else if (strcmp(extension, ".ogg") == 0 || strcmp(extension, ".ogv") == 0 || strcmp(extension, ".opus") == 0) {
+ // Assumed to be an Ogg file
+ NEW_SMS("Ogg video and/or audio");
+
+ // Create a Ogg file server demultiplexor for the specified file.
+ // (We enter the event loop to wait for this to complete.)
+ OggDemuxCreationState creationState;
+ creationState.watchVariable = 0;
+ OggFileServerDemux::createNew(env, fileName, onOggDemuxCreation, &creationState);
+ env.taskScheduler().doEventLoop(&creationState.watchVariable);
+
+ ServerMediaSubsession* smss;
+ while ((smss = creationState.demux->newServerMediaSubsession()) != NULL) {
+ sms->addSubsession(smss);
+ }
+ }
+
+ return sms;
+}
diff --git a/mediaServer/DynamicRTSPServer.hh b/mediaServer/DynamicRTSPServer.hh
new file mode 100644
index 0000000..73c119f
--- /dev/null
+++ b/mediaServer/DynamicRTSPServer.hh
@@ -0,0 +1,45 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A subclass of "RTSPServer" that creates "ServerMediaSession"s on demand,
+// based on whether or not the specified stream name exists as a file
+// Header file
+
+#ifndef _DYNAMIC_RTSP_SERVER_HH
+#define _DYNAMIC_RTSP_SERVER_HH
+
+#ifndef _RTSP_SERVER_SUPPORTING_HTTP_STREAMING_HH
+#include "RTSPServerSupportingHTTPStreaming.hh"
+#endif
+
+class DynamicRTSPServer: public RTSPServerSupportingHTTPStreaming {
+public:
+ static DynamicRTSPServer* createNew(UsageEnvironment& env, Port ourPort,
+ UserAuthenticationDatabase* authDatabase,
+ unsigned reclamationTestSeconds = 65);
+
+protected:
+ DynamicRTSPServer(UsageEnvironment& env, int ourSocket, Port ourPort,
+ UserAuthenticationDatabase* authDatabase, unsigned reclamationTestSeconds);
+ // called only by createNew();
+ virtual ~DynamicRTSPServer();
+
+protected: // redefined virtual functions
+ virtual ServerMediaSession*
+ lookupServerMediaSession(char const* streamName, Boolean isFirstLookupInSession);
+};
+
+#endif
diff --git a/mediaServer/Makefile.head b/mediaServer/Makefile.head
new file mode 100644
index 0000000..e81e1ba
--- /dev/null
+++ b/mediaServer/Makefile.head
@@ -0,0 +1,7 @@
+INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include -I../BasicUsageEnvironment/include
+# Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later.
+libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX)
+libBasicUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX)
+libUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX)
+libgroupsock_LIB_SUFFIX = $(LIB_SUFFIX)
+##### Change the following for your environment:
diff --git a/mediaServer/Makefile.tail b/mediaServer/Makefile.tail
new file mode 100644
index 0000000..292ebbb
--- /dev/null
+++ b/mediaServer/Makefile.tail
@@ -0,0 +1,41 @@
+##### End of variables to change
+
+MEDIA_SERVER = live555MediaServer$(EXE)
+
+PREFIX = /usr/local
+ALL = $(MEDIA_SERVER)
+all: $(ALL)
+
+.$(C).$(OBJ):
+ $(C_COMPILER) -c $(C_FLAGS) $<
+.$(CPP).$(OBJ):
+ $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $<
+
+MEDIA_SERVER_OBJS = live555MediaServer.$(OBJ) DynamicRTSPServer.$(OBJ)
+
+live555MediaServer.$(CPP): DynamicRTSPServer.hh version.hh
+DynamicRTSPServer.$(CPP): DynamicRTSPServer.hh
+
+USAGE_ENVIRONMENT_DIR = ../UsageEnvironment
+USAGE_ENVIRONMENT_LIB = $(USAGE_ENVIRONMENT_DIR)/libUsageEnvironment.$(libUsageEnvironment_LIB_SUFFIX)
+BASIC_USAGE_ENVIRONMENT_DIR = ../BasicUsageEnvironment
+BASIC_USAGE_ENVIRONMENT_LIB = $(BASIC_USAGE_ENVIRONMENT_DIR)/libBasicUsageEnvironment.$(libBasicUsageEnvironment_LIB_SUFFIX)
+LIVEMEDIA_DIR = ../liveMedia
+LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(libliveMedia_LIB_SUFFIX)
+GROUPSOCK_DIR = ../groupsock
+GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(libgroupsock_LIB_SUFFIX)
+LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \
+ $(BASIC_USAGE_ENVIRONMENT_LIB) $(USAGE_ENVIRONMENT_LIB)
+LIBS = $(LOCAL_LIBS) $(LIBS_FOR_CONSOLE_APPLICATION)
+
+live555MediaServer$(EXE): $(MEDIA_SERVER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MEDIA_SERVER_OBJS) $(LIBS)
+
+clean:
+ -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~
+
+install: $(MEDIA_SERVER)
+ install -d $(DESTDIR)$(PREFIX)/bin
+ install -m 755 $(MEDIA_SERVER) $(DESTDIR)$(PREFIX)/bin
+
+##### Any additional, platform-specific rules come here:
diff --git a/mediaServer/live555MediaServer.cpp b/mediaServer/live555MediaServer.cpp
new file mode 100644
index 0000000..a131779
--- /dev/null
+++ b/mediaServer/live555MediaServer.cpp
@@ -0,0 +1,92 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// LIVE555 Media Server
+// main program
+
+#include <BasicUsageEnvironment.hh>
+#include "DynamicRTSPServer.hh"
+#include "version.hh"
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
+
+ UserAuthenticationDatabase* authDB = NULL;
+#ifdef ACCESS_CONTROL
+ // To implement client access control to the RTSP server, do the following:
+ authDB = new UserAuthenticationDatabase;
+ authDB->addUserRecord("username1", "password1"); // replace these with real strings
+ // Repeat the above with each <username>, <password> that you wish to allow
+ // access to the server.
+#endif
+
+ // Create the RTSP server. Try first with the default port number (554),
+ // and then with the alternative port number (8554):
+ RTSPServer* rtspServer;
+ portNumBits rtspServerPortNum = 554;
+ rtspServer = DynamicRTSPServer::createNew(*env, rtspServerPortNum, authDB);
+ if (rtspServer == NULL) {
+ rtspServerPortNum = 8554;
+ rtspServer = DynamicRTSPServer::createNew(*env, rtspServerPortNum, authDB);
+ }
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+
+ *env << "LIVE555 Media Server\n";
+ *env << "\tversion " << MEDIA_SERVER_VERSION_STRING
+ << " (LIVE555 Streaming Media library version "
+ << LIVEMEDIA_LIBRARY_VERSION_STRING << ").\n";
+
+ char* urlPrefix = rtspServer->rtspURLPrefix();
+ *env << "Play streams from this server using the URL\n\t"
+ << urlPrefix << "<filename>\nwhere <filename> is a file present in the current directory.\n";
+ *env << "Each file's type is inferred from its name suffix:\n";
+ *env << "\t\".264\" => a H.264 Video Elementary Stream file\n";
+ *env << "\t\".265\" => a H.265 Video Elementary Stream file\n";
+ *env << "\t\".aac\" => an AAC Audio (ADTS format) file\n";
+ *env << "\t\".ac3\" => an AC-3 Audio file\n";
+ *env << "\t\".amr\" => an AMR Audio file\n";
+ *env << "\t\".dv\" => a DV Video file\n";
+ *env << "\t\".m4e\" => a MPEG-4 Video Elementary Stream file\n";
+ *env << "\t\".mkv\" => a Matroska audio+video+(optional)subtitles file\n";
+ *env << "\t\".mp3\" => a MPEG-1 or 2 Audio file\n";
+ *env << "\t\".mpg\" => a MPEG-1 or 2 Program Stream (audio+video) file\n";
+ *env << "\t\".ogg\" or \".ogv\" or \".opus\" => an Ogg audio and/or video file\n";
+ *env << "\t\".ts\" => a MPEG Transport Stream file\n";
+ *env << "\t\t(a \".tsx\" index file - if present - provides server 'trick play' support)\n";
+ *env << "\t\".vob\" => a VOB (MPEG-2 video with AC-3 audio) file\n";
+ *env << "\t\".wav\" => a WAV Audio file\n";
+ *env << "\t\".webm\" => a WebM audio(Vorbis)+video(VP8) file\n";
+ *env << "See http://www.live555.com/mediaServer/ for additional documentation.\n";
+
+ // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling.
+ // Try first with the default HTTP port (80), and then with the alternative HTTP
+ // port numbers (8000 and 8080).
+
+ if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) {
+ *env << "(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling, or for HTTP live streaming (for indexed Transport Stream files only).)\n";
+ } else {
+ *env << "(RTSP-over-HTTP tunneling is not available.)\n";
+ }
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
diff --git a/mediaServer/version.hh b/mediaServer/version.hh
new file mode 100644
index 0000000..929b0c0
--- /dev/null
+++ b/mediaServer/version.hh
@@ -0,0 +1,10 @@
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// Version information for the LIVE555 Media Server application
+// Header file
+
+#ifndef _MEDIA_SERVER_VERSION_HH
+#define _MEDIA_SERVER_VERSION_HH
+
+#define MEDIA_SERVER_VERSION_STRING "0.99"
+
+#endif
diff --git a/proxyServer/COPYING b/proxyServer/COPYING
new file mode 120000
index 0000000..012065c
--- /dev/null
+++ b/proxyServer/COPYING
@@ -0,0 +1 @@
+../COPYING
\ No newline at end of file
diff --git a/proxyServer/COPYING.LESSER b/proxyServer/COPYING.LESSER
new file mode 120000
index 0000000..ce9a3ce
--- /dev/null
+++ b/proxyServer/COPYING.LESSER
@@ -0,0 +1 @@
+../COPYING.LESSER
\ No newline at end of file
diff --git a/proxyServer/Makefile.head b/proxyServer/Makefile.head
new file mode 100644
index 0000000..e81e1ba
--- /dev/null
+++ b/proxyServer/Makefile.head
@@ -0,0 +1,7 @@
+INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include -I../BasicUsageEnvironment/include
+# Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later.
+libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX)
+libBasicUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX)
+libUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX)
+libgroupsock_LIB_SUFFIX = $(LIB_SUFFIX)
+##### Change the following for your environment:
diff --git a/proxyServer/Makefile.tail b/proxyServer/Makefile.tail
new file mode 100644
index 0000000..fa9e999
--- /dev/null
+++ b/proxyServer/Makefile.tail
@@ -0,0 +1,38 @@
+##### End of variables to change
+
+PROXY_SERVER = live555ProxyServer$(EXE)
+
+PREFIX = /usr/local
+ALL = $(PROXY_SERVER)
+all: $(ALL)
+
+.$(C).$(OBJ):
+ $(C_COMPILER) -c $(C_FLAGS) $<
+.$(CPP).$(OBJ):
+ $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $<
+
+PROXY_SERVER_OBJS = live555ProxyServer.$(OBJ)
+
+USAGE_ENVIRONMENT_DIR = ../UsageEnvironment
+USAGE_ENVIRONMENT_LIB = $(USAGE_ENVIRONMENT_DIR)/libUsageEnvironment.$(libUsageEnvironment_LIB_SUFFIX)
+BASIC_USAGE_ENVIRONMENT_DIR = ../BasicUsageEnvironment
+BASIC_USAGE_ENVIRONMENT_LIB = $(BASIC_USAGE_ENVIRONMENT_DIR)/libBasicUsageEnvironment.$(libBasicUsageEnvironment_LIB_SUFFIX)
+LIVEMEDIA_DIR = ../liveMedia
+LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(libliveMedia_LIB_SUFFIX)
+GROUPSOCK_DIR = ../groupsock
+GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(libgroupsock_LIB_SUFFIX)
+LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \
+ $(BASIC_USAGE_ENVIRONMENT_LIB) $(USAGE_ENVIRONMENT_LIB)
+LIBS = $(LOCAL_LIBS) $(LIBS_FOR_CONSOLE_APPLICATION)
+
+live555ProxyServer$(EXE): $(PROXY_SERVER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(PROXY_SERVER_OBJS) $(LIBS)
+
+clean:
+ -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~
+
+install: $(PROXY_SERVER)
+ install -d $(DESTDIR)$(PREFIX)/bin
+ install -m 755 $(PROXY_SERVER) $(DESTDIR)$(PREFIX)/bin
+
+##### Any additional, platform-specific rules come here:
diff --git a/proxyServer/live555ProxyServer.cpp b/proxyServer/live555ProxyServer.cpp
new file mode 100644
index 0000000..27cc58b
--- /dev/null
+++ b/proxyServer/live555ProxyServer.cpp
@@ -0,0 +1,251 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// LIVE555 Proxy Server
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+
+char const* progName;
+UsageEnvironment* env;
+UserAuthenticationDatabase* authDB = NULL;
+UserAuthenticationDatabase* authDBForREGISTER = NULL;
+
+// Default values of command-line parameters:
+int verbosityLevel = 0;
+Boolean streamRTPOverTCP = False;
+portNumBits tunnelOverHTTPPortNum = 0;
+portNumBits rtspServerPortNum = 554;
+char* username = NULL;
+char* password = NULL;
+Boolean proxyREGISTERRequests = False;
+char* usernameForREGISTER = NULL;
+char* passwordForREGISTER = NULL;
+
+static RTSPServer* createRTSPServer(Port port) {
+ if (proxyREGISTERRequests) {
+ return RTSPServerWithREGISTERProxying::createNew(*env, port, authDB, authDBForREGISTER, 65, streamRTPOverTCP, verbosityLevel, username, password);
+ } else {
+ return RTSPServer::createNew(*env, port, authDB);
+ }
+}
+
+void usage() {
+ *env << "Usage: " << progName
+ << " [-v|-V]"
+ << " [-t|-T <http-port>]"
+ << " [-p <rtspServer-port>]"
+ << " [-u <username> <password>]"
+ << " [-R] [-U <username-for-REGISTER> <password-for-REGISTER>]"
+ << " <rtsp-url-1> ... <rtsp-url-n>\n";
+ exit(1);
+}
+
+int main(int argc, char** argv) {
+ // Increase the maximum size of video frames that we can 'proxy' without truncation.
+ // (Such frames are unreasonably large; the back-end servers should really not be sending frames this large!)
+ OutPacketBuffer::maxSize = 100000; // bytes
+
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ *env << "LIVE555 Proxy Server\n"
+ << "\t(LIVE555 Streaming Media library version "
+ << LIVEMEDIA_LIBRARY_VERSION_STRING
+ << "; licensed under the GNU LGPL)\n\n";
+
+ // Check command-line arguments: optional parameters, then one or more rtsp:// URLs (of streams to be proxied):
+ progName = argv[0];
+ if (argc < 2) usage();
+ while (argc > 1) {
+ // Process initial command-line options (beginning with "-"):
+ char* const opt = argv[1];
+ if (opt[0] != '-') break; // the remaining parameters are assumed to be "rtsp://" URLs
+
+ switch (opt[1]) {
+ case 'v': { // verbose output
+ verbosityLevel = 1;
+ break;
+ }
+
+ case 'V': { // more verbose output
+ verbosityLevel = 2;
+ break;
+ }
+
+ case 't': {
+ // Stream RTP and RTCP over the TCP 'control' connection.
+ // (This is for the 'back end' (i.e., proxied) stream only.)
+ streamRTPOverTCP = True;
+ break;
+ }
+
+ case 'T': {
+ // stream RTP and RTCP over a HTTP connection
+ if (argc > 2 && argv[2][0] != '-') {
+ // The next argument is the HTTP server port number:
+ if (sscanf(argv[2], "%hu", &tunnelOverHTTPPortNum) == 1
+ && tunnelOverHTTPPortNum > 0) {
+ ++argv; --argc;
+ break;
+ }
+ }
+
+ // If we get here, the option was specified incorrectly:
+ usage();
+ break;
+ }
+
+ case 'p': {
+ // specify a rtsp server port number
+ if (argc > 2 && argv[2][0] != '-') {
+ // The next argument is the rtsp server port number:
+ if (sscanf(argv[2], "%hu", &rtspServerPortNum) == 1
+ && rtspServerPortNum > 0) {
+ ++argv; --argc;
+ break;
+ }
+ }
+
+ // If we get here, the option was specified incorrectly:
+ usage();
+ break;
+ }
+
+ case 'u': { // specify a username and password (to be used if the 'back end' (i.e., proxied) stream requires authentication)
+ if (argc < 4) usage(); // there's no argv[3] (for the "password")
+ username = argv[2];
+ password = argv[3];
+ argv += 2; argc -= 2;
+ break;
+ }
+
+ case 'U': { // specify a username and password to use to authenticate incoming "REGISTER" commands
+ if (argc < 4) usage(); // there's no argv[3] (for the "password")
+ usernameForREGISTER = argv[2];
+ passwordForREGISTER = argv[3];
+
+ if (authDBForREGISTER == NULL) authDBForREGISTER = new UserAuthenticationDatabase;
+ authDBForREGISTER->addUserRecord(usernameForREGISTER, passwordForREGISTER);
+ argv += 2; argc -= 2;
+ break;
+ }
+
+ case 'R': { // Handle incoming "REGISTER" requests by proxying the specified stream:
+ proxyREGISTERRequests = True;
+ break;
+ }
+
+ default: {
+ usage();
+ break;
+ }
+ }
+
+ ++argv; --argc;
+ }
+ if (argc < 2 && !proxyREGISTERRequests) usage(); // there must be at least one URL at the end
+ // Make sure that the remaining arguments appear to be "rtsp://" (or "rtsps://") URLs:
+ int i;
+ for (i = 1; i < argc; ++i) {
+ if (strncmp(argv[i], "rtsp://", 7) != 0 && strncmp(argv[i], "rtsps://", 8) != 0) usage();
+ }
+ // Do some additional checking for invalid command-line argument combinations:
+ if (authDBForREGISTER != NULL && !proxyREGISTERRequests) {
+ *env << "The '-U <username-for-REGISTER> <password-for-REGISTER>' option can be used only with -R\n";
+ usage();
+ }
+ if (streamRTPOverTCP) {
+ if (tunnelOverHTTPPortNum > 0) {
+ *env << "The -t and -T options cannot both be used!\n";
+ usage();
+ } else {
+ tunnelOverHTTPPortNum = (portNumBits)(~0); // hack to tell "ProxyServerMediaSession" to stream over TCP, but not using HTTP
+ }
+ }
+
+#ifdef ACCESS_CONTROL
+ // To implement client access control to the RTSP server, do the following:
+ authDB = new UserAuthenticationDatabase;
+ authDB->addUserRecord("username1", "password1"); // replace these with real strings
+ // Repeat this line with each <username>, <password> that you wish to allow access to the server.
+#endif
+
+ // Create the RTSP server. Try first with the configured port number,
+ // and then with the default port number (554) if different,
+ // and then with the alternative port number (8554):
+ RTSPServer* rtspServer;
+ rtspServer = createRTSPServer(rtspServerPortNum);
+ if (rtspServer == NULL) {
+ if (rtspServerPortNum != 554) {
+ *env << "Unable to create a RTSP server with port number " << rtspServerPortNum << ": " << env->getResultMsg() << "\n";
+ *env << "Trying instead with the standard port numbers (554 and 8554)...\n";
+
+ rtspServerPortNum = 554;
+ rtspServer = createRTSPServer(rtspServerPortNum);
+ }
+ }
+ if (rtspServer == NULL) {
+ rtspServerPortNum = 8554;
+ rtspServer = createRTSPServer(rtspServerPortNum);
+ }
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+
+ // Create a proxy for each "rtsp://" URL specified on the command line:
+ for (i = 1; i < argc; ++i) {
+ char const* proxiedStreamURL = argv[i];
+ char streamName[30];
+ if (argc == 2) {
+ sprintf(streamName, "%s", "proxyStream"); // there's just one stream; give it this name
+ } else {
+ sprintf(streamName, "proxyStream-%d", i); // there's more than one stream; distinguish them by name
+ }
+ ServerMediaSession* sms
+ = ProxyServerMediaSession::createNew(*env, rtspServer,
+ proxiedStreamURL, streamName,
+ username, password, tunnelOverHTTPPortNum, verbosityLevel);
+ rtspServer->addServerMediaSession(sms);
+
+ char* proxyStreamURL = rtspServer->rtspURL(sms);
+ *env << "RTSP stream, proxying the stream \"" << proxiedStreamURL << "\"\n";
+ *env << "\tPlay this stream using the URL: " << proxyStreamURL << "\n";
+ delete[] proxyStreamURL;
+ }
+
+ if (proxyREGISTERRequests) {
+ *env << "(We handle incoming \"REGISTER\" requests on port " << rtspServerPortNum << ")\n";
+ }
+
+ // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling.
+ // Try first with the default HTTP port (80), and then with the alternative HTTP
+ // port numbers (8000 and 8080).
+
+ if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) {
+ *env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n";
+ } else {
+ *env << "\n(RTSP-over-HTTP tunneling is not available.)\n";
+ }
+
+ // Now, enter the event loop:
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
diff --git a/testProgs/COPYING b/testProgs/COPYING
new file mode 120000
index 0000000..012065c
--- /dev/null
+++ b/testProgs/COPYING
@@ -0,0 +1 @@
+../COPYING
\ No newline at end of file
diff --git a/testProgs/COPYING.LESSER b/testProgs/COPYING.LESSER
new file mode 120000
index 0000000..ce9a3ce
--- /dev/null
+++ b/testProgs/COPYING.LESSER
@@ -0,0 +1 @@
+../COPYING.LESSER
\ No newline at end of file
diff --git a/testProgs/MPEG2TransportStreamIndexer.cpp b/testProgs/MPEG2TransportStreamIndexer.cpp
new file mode 100644
index 0000000..a913b98
--- /dev/null
+++ b/testProgs/MPEG2TransportStreamIndexer.cpp
@@ -0,0 +1,90 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A program that reads an existing MPEG-2 Transport Stream file,
+// and generates a separate index file that can be used - by our RTSP server
+// implementation - to support 'trick play' operations when streaming the
+// Transport Stream file.
+// main program
+
+#include <liveMedia.hh>
+#include <BasicUsageEnvironment.hh>
+
+void afterPlaying(void* clientData); // forward
+
+UsageEnvironment* env;
+char const* programName;
+
+void usage() {
+ *env << "usage: " << programName << " <transport-stream-file-name>\n";
+ *env << "\twhere <transport-stream-file-name> ends with \".ts\"\n";
+ exit(1);
+}
+
+int main(int argc, char const** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Parse the command line:
+ programName = argv[0];
+ if (argc != 2) usage();
+
+ char const* inputFileName = argv[1];
+ // Check whether the input file name ends with ".ts":
+ int len = strlen(inputFileName);
+ if (len < 4 || strcmp(&inputFileName[len-3], ".ts") != 0) {
+ *env << "ERROR: input file name \"" << inputFileName
+ << "\" does not end with \".ts\"\n";
+ usage();
+ }
+
+ // Open the input file (as a 'byte stream file source'):
+ FramedSource* input
+ = ByteStreamFileSource::createNew(*env, inputFileName, TRANSPORT_PACKET_SIZE);
+ if (input == NULL) {
+ *env << "Failed to open input file \"" << inputFileName << "\" (does it exist?)\n";
+ exit(1);
+ }
+
+ // Create a filter that indexes the input Transport Stream data:
+ FramedSource* indexer
+ = MPEG2IFrameIndexFromTransportStream::createNew(*env, input);
+
+ // The output file name is the same as the input file name, except with suffix ".tsx":
+ char* outputFileName = new char[len+2]; // allow for trailing x\0
+ sprintf(outputFileName, "%sx", inputFileName);
+
+ // Open the output file (for writing), as a 'file sink':
+ MediaSink* output = FileSink::createNew(*env, outputFileName);
+ if (output == NULL) {
+ *env << "Failed to open output file \"" << outputFileName << "\"\n";
+ exit(1);
+ }
+
+ // Start playing, to generate the output index file:
+ *env << "Writing index file \"" << outputFileName << "\"...";
+ output->startPlaying(*indexer, afterPlaying, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done\n";
+ exit(0);
+}
diff --git a/testProgs/Makefile.head b/testProgs/Makefile.head
new file mode 100644
index 0000000..e81e1ba
--- /dev/null
+++ b/testProgs/Makefile.head
@@ -0,0 +1,7 @@
+INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include -I../BasicUsageEnvironment/include
+# Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later.
+libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX)
+libBasicUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX)
+libUsageEnvironment_LIB_SUFFIX = $(LIB_SUFFIX)
+libgroupsock_LIB_SUFFIX = $(LIB_SUFFIX)
+##### Change the following for your environment:
diff --git a/testProgs/Makefile.tail b/testProgs/Makefile.tail
new file mode 100644
index 0000000..bab4cc5
--- /dev/null
+++ b/testProgs/Makefile.tail
@@ -0,0 +1,159 @@
+##### End of variables to change
+
+MULTICAST_STREAMER_APPS = testMP3Streamer$(EXE) testMPEG1or2VideoStreamer$(EXE) testMPEG1or2AudioVideoStreamer$(EXE) testMPEG2TransportStreamer$(EXE) testMPEG4VideoStreamer$(EXE) testH264VideoStreamer$(EXE) testH265VideoStreamer$(EXE) testDVVideoStreamer$(EXE) testWAVAudioStreamer$(EXE) testAMRAudioStreamer$(EXE) testMKVStreamer$(EXE) testOggStreamer$(EXE) vobStreamer$(EXE)
+MULTICAST_RECEIVER_APPS = testMP3Receiver$(EXE) testMPEG1or2VideoReceiver$(EXE) testMPEG2TransportReceiver$(EXE) sapWatch$(EXE)
+MULTICAST_MISC_APPS = testRelay$(EXE) testReplicator$(EXE)
+MULTICAST_APPS = $(MULTICAST_STREAMER_APPS) $(MULTICAST_RECEIVER_APPS) $(MULTICAST_MISC_APPS)
+
+UNICAST_STREAMER_APPS = testOnDemandRTSPServer$(EXE)
+UNICAST_RECEIVER_APPS = testRTSPClient$(EXE) openRTSP$(EXE) playSIP$(EXE)
+UNICAST_APPS = $(UNICAST_STREAMER_APPS) $(UNICAST_RECEIVER_APPS)
+
+HLS_APPS = testH264VideoToHLSSegments$(EXE)
+
+MISC_APPS = testMPEG1or2Splitter$(EXE) testMPEG1or2ProgramToTransportStream$(EXE) testH264VideoToTransportStream$(EXE) testH265VideoToTransportStream$(EXE) MPEG2TransportStreamIndexer$(EXE) testMPEG2TransportStreamTrickPlay$(EXE) registerRTSPStream$(EXE) testMKVSplitter$(EXE) testMPEG2TransportStreamSplitter$(EXE) mikeyParse$(EXE)
+
+PREFIX = /usr/local
+ALL = $(MULTICAST_APPS) $(UNICAST_APPS) $(HLS_APPS) $(MISC_APPS)
+all: $(ALL)
+
+extra: testGSMStreamer$(EXE)
+
+.$(C).$(OBJ):
+ $(C_COMPILER) -c $(C_FLAGS) $<
+.$(CPP).$(OBJ):
+ $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $<
+
+MP3_STREAMER_OBJS = testMP3Streamer.$(OBJ)
+MP3_RECEIVER_OBJS = testMP3Receiver.$(OBJ)
+RELAY_OBJS = testRelay.$(OBJ)
+REPLICATOR_OBJS = testReplicator.$(OBJ)
+H264_VIDEO_TO_HLS_SEGMENTS_OBJS = testH264VideoToHLSSegments.$(OBJ)
+MPEG_1OR2_SPLITTER_OBJS = testMPEG1or2Splitter.$(OBJ)
+MPEG_1OR2_VIDEO_STREAMER_OBJS = testMPEG1or2VideoStreamer.$(OBJ)
+MPEG_1OR2_VIDEO_RECEIVER_OBJS = testMPEG1or2VideoReceiver.$(OBJ)
+MPEG2_TRANSPORT_RECEIVER_OBJS = testMPEG2TransportReceiver.$(OBJ)
+MPEG_1OR2_AUDIO_VIDEO_STREAMER_OBJS = testMPEG1or2AudioVideoStreamer.$(OBJ)
+MPEG2_TRANSPORT_STREAMER_OBJS = testMPEG2TransportStreamer.$(OBJ)
+MPEG4_VIDEO_STREAMER_OBJS = testMPEG4VideoStreamer.$(OBJ)
+H264_VIDEO_STREAMER_OBJS = testH264VideoStreamer.$(OBJ)
+H265_VIDEO_STREAMER_OBJS = testH265VideoStreamer.$(OBJ)
+DV_VIDEO_STREAMER_OBJS = testDVVideoStreamer.$(OBJ)
+WAV_AUDIO_STREAMER_OBJS = testWAVAudioStreamer.$(OBJ)
+AMR_AUDIO_STREAMER_OBJS = testAMRAudioStreamer.$(OBJ)
+ON_DEMAND_RTSP_SERVER_OBJS = testOnDemandRTSPServer.$(OBJ)
+MKV_STREAMER_OBJS = testMKVStreamer.$(OBJ)
+OGG_STREAMER_OBJS = testOggStreamer.$(OBJ)
+VOB_STREAMER_OBJS = vobStreamer.$(OBJ)
+TEST_RTSP_CLIENT_OBJS = testRTSPClient.$(OBJ)
+OPEN_RTSP_OBJS = openRTSP.$(OBJ) playCommon.$(OBJ)
+PLAY_SIP_OBJS = playSIP.$(OBJ) playCommon.$(OBJ)
+SAP_WATCH_OBJS = sapWatch.$(OBJ)
+MPEG_1OR2_PROGRAM_TO_TRANSPORT_STREAM_OBJS = testMPEG1or2ProgramToTransportStream.$(OBJ)
+H264_VIDEO_TO_TRANSPORT_STREAM_OBJS = testH264VideoToTransportStream.$(OBJ)
+H265_VIDEO_TO_TRANSPORT_STREAM_OBJS = testH265VideoToTransportStream.$(OBJ)
+MPEG2_TRANSPORT_STREAM_INDEXER_OBJS = MPEG2TransportStreamIndexer.$(OBJ)
+MPEG2_TRANSPORT_STREAM_TRICK_PLAY_OBJS = testMPEG2TransportStreamTrickPlay.$(OBJ)
+REGISTER_RTSP_STREAM_OBJS = registerRTSPStream.$(OBJ)
+TEST_MKV_SPLITTER_OBJS = testMKVSplitter.$(OBJ)
+TEST_MPEG2_TRANSPORT_STREAM_SPLITTER_OBJS = testMPEG2TransportStreamSplitter.$(OBJ)
+MIKEY_PARSE_OBJS = mikeyParse.$(OBJ)
+
+GSM_STREAMER_OBJS = testGSMStreamer.$(OBJ) testGSMEncoder.$(OBJ)
+
+openRTSP.$(CPP): playCommon.hh
+playCommon.$(CPP): playCommon.hh
+playSIP.$(CPP): playCommon.hh
+
+USAGE_ENVIRONMENT_DIR = ../UsageEnvironment
+USAGE_ENVIRONMENT_LIB = $(USAGE_ENVIRONMENT_DIR)/libUsageEnvironment.$(libUsageEnvironment_LIB_SUFFIX)
+BASIC_USAGE_ENVIRONMENT_DIR = ../BasicUsageEnvironment
+BASIC_USAGE_ENVIRONMENT_LIB = $(BASIC_USAGE_ENVIRONMENT_DIR)/libBasicUsageEnvironment.$(libBasicUsageEnvironment_LIB_SUFFIX)
+LIVEMEDIA_DIR = ../liveMedia
+LIVEMEDIA_LIB = $(LIVEMEDIA_DIR)/libliveMedia.$(libliveMedia_LIB_SUFFIX)
+GROUPSOCK_DIR = ../groupsock
+GROUPSOCK_LIB = $(GROUPSOCK_DIR)/libgroupsock.$(libgroupsock_LIB_SUFFIX)
+LOCAL_LIBS = $(LIVEMEDIA_LIB) $(GROUPSOCK_LIB) \
+ $(BASIC_USAGE_ENVIRONMENT_LIB) $(USAGE_ENVIRONMENT_LIB)
+LIBS = $(LOCAL_LIBS) $(LIBS_FOR_CONSOLE_APPLICATION)
+
+testMP3Streamer$(EXE): $(MP3_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MP3_STREAMER_OBJS) $(LIBS)
+testMP3Receiver$(EXE): $(MP3_RECEIVER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MP3_RECEIVER_OBJS) $(LIBS)
+testRelay$(EXE): $(RELAY_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(RELAY_OBJS) $(LIBS)
+testReplicator$(EXE): $(REPLICATOR_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(REPLICATOR_OBJS) $(LIBS)
+testH264VideoToHLSSegments$(EXE): $(H264_VIDEO_TO_HLS_SEGMENTS_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(H264_VIDEO_TO_HLS_SEGMENTS_OBJS) $(LIBS)
+testMPEG1or2Splitter$(EXE): $(MPEG_1OR2_SPLITTER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG_1OR2_SPLITTER_OBJS) $(LIBS)
+testMPEG1or2VideoStreamer$(EXE): $(MPEG_1OR2_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG_1OR2_VIDEO_STREAMER_OBJS) $(LIBS)
+testMPEG1or2VideoReceiver$(EXE): $(MPEG_1OR2_VIDEO_RECEIVER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG_1OR2_VIDEO_RECEIVER_OBJS) $(LIBS)
+testMPEG1or2AudioVideoStreamer$(EXE): $(MPEG_1OR2_AUDIO_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG_1OR2_AUDIO_VIDEO_STREAMER_OBJS) $(LIBS)
+testMPEG2TransportStreamer$(EXE): $(MPEG2_TRANSPORT_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG2_TRANSPORT_STREAMER_OBJS) $(LIBS)
+testMPEG2TransportReceiver$(EXE): $(MPEG2_TRANSPORT_RECEIVER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG2_TRANSPORT_RECEIVER_OBJS) $(LIBS)
+testMPEG4VideoStreamer$(EXE): $(MPEG4_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG4_VIDEO_STREAMER_OBJS) $(LIBS)
+testH264VideoStreamer$(EXE): $(H264_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(H264_VIDEO_STREAMER_OBJS) $(LIBS)
+testH265VideoStreamer$(EXE): $(H265_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(H265_VIDEO_STREAMER_OBJS) $(LIBS)
+testDVVideoStreamer$(EXE): $(DV_VIDEO_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(DV_VIDEO_STREAMER_OBJS) $(LIBS)
+testWAVAudioStreamer$(EXE): $(WAV_AUDIO_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(WAV_AUDIO_STREAMER_OBJS) $(LIBS)
+testAMRAudioStreamer$(EXE): $(AMR_AUDIO_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(AMR_AUDIO_STREAMER_OBJS) $(LIBS)
+testOnDemandRTSPServer$(EXE): $(ON_DEMAND_RTSP_SERVER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(ON_DEMAND_RTSP_SERVER_OBJS) $(LIBS)
+testMKVStreamer$(EXE): $(MKV_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MKV_STREAMER_OBJS) $(LIBS)
+testOggStreamer$(EXE): $(OGG_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(OGG_STREAMER_OBJS) $(LIBS)
+vobStreamer$(EXE): $(VOB_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(VOB_STREAMER_OBJS) $(LIBS)
+testRTSPClient$(EXE): $(TEST_RTSP_CLIENT_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(TEST_RTSP_CLIENT_OBJS) $(LIBS)
+openRTSP$(EXE): $(OPEN_RTSP_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(OPEN_RTSP_OBJS) $(LIBS)
+playSIP$(EXE): $(PLAY_SIP_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(PLAY_SIP_OBJS) $(LIBS)
+sapWatch$(EXE): $(SAP_WATCH_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(SAP_WATCH_OBJS) $(LIBS)
+testMPEG1or2ProgramToTransportStream$(EXE): $(MPEG_1OR2_PROGRAM_TO_TRANSPORT_STREAM_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG_1OR2_PROGRAM_TO_TRANSPORT_STREAM_OBJS) $(LIBS)
+testH264VideoToTransportStream$(EXE): $(H264_VIDEO_TO_TRANSPORT_STREAM_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(H264_VIDEO_TO_TRANSPORT_STREAM_OBJS) $(LIBS)
+testH265VideoToTransportStream$(EXE): $(H265_VIDEO_TO_TRANSPORT_STREAM_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(H265_VIDEO_TO_TRANSPORT_STREAM_OBJS) $(LIBS)
+MPEG2TransportStreamIndexer$(EXE): $(MPEG2_TRANSPORT_STREAM_INDEXER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG2_TRANSPORT_STREAM_INDEXER_OBJS) $(LIBS)
+testMPEG2TransportStreamTrickPlay$(EXE): $(MPEG2_TRANSPORT_STREAM_TRICK_PLAY_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MPEG2_TRANSPORT_STREAM_TRICK_PLAY_OBJS) $(LIBS)
+registerRTSPStream$(EXE): $(REGISTER_RTSP_STREAM_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(REGISTER_RTSP_STREAM_OBJS) $(LIBS)
+testMKVSplitter$(EXE): $(TEST_MKV_SPLITTER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(TEST_MKV_SPLITTER_OBJS) $(LIBS)
+testMPEG2TransportStreamSplitter$(EXE): $(TEST_MPEG2_TRANSPORT_STREAM_SPLITTER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(TEST_MPEG2_TRANSPORT_STREAM_SPLITTER_OBJS) $(LIBS)
+mikeyParse$(EXE): $(MIKEY_PARSE_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(MIKEY_PARSE_OBJS) $(LIBS)
+
+testGSMStreamer$(EXE): $(GSM_STREAMER_OBJS) $(LOCAL_LIBS)
+ $(LINK)$@ $(CONSOLE_LINK_OPTS) $(GSM_STREAMER_OBJS) $(LIBS)
+
+clean:
+ -rm -rf *.$(OBJ) $(ALL) core *.core *~ include/*~
+
+install: $(ALL)
+ install -d $(DESTDIR)$(PREFIX)/bin
+ install -m 755 $(ALL) $(DESTDIR)$(PREFIX)/bin
+
+##### Any additional, platform-specific rules come here:
diff --git a/testProgs/mikeyParse.cpp b/testProgs/mikeyParse.cpp
new file mode 100644
index 0000000..47931df
--- /dev/null
+++ b/testProgs/mikeyParse.cpp
@@ -0,0 +1,414 @@
+// Parses MIKEY data (from Base64)
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <Base64.hh>
+#include <NetCommon.h>
+
+static u_int32_t get4Bytes(u_int8_t const*& ptr) {
+ u_int32_t result = (ptr[0]<<24)|(ptr[1]<<16)|(ptr[2]<<8)|ptr[3];
+ ptr += 4;
+ return result;
+}
+
+static u_int16_t get2Bytes(u_int8_t const*& ptr) {
+ u_int16_t result = (ptr[0]<<8)|ptr[1];
+ ptr += 2;
+ return result;
+}
+
+static u_int8_t getByte(u_int8_t const*& ptr) {
+ u_int8_t result = ptr[0];
+ ptr += 1;
+ return result;
+}
+
+Boolean parseMikeyUnknown(u_int8_t const*& /*ptr*/, u_int8_t const* /*endPtr*/, u_int8_t& /*nextPayloadType*/) {
+ fprintf(stderr, "\tUnknown or unhandled payload type\n");
+ return False;
+}
+
+char const* payloadTypeName[256];
+char const* dataTypeComment[256];
+#define testSize(n) do {if (ptr + (n) > endPtr) return False; } while (0)
+
+Boolean parseMikeyHDR(u_int8_t const*& ptr, u_int8_t const* endPtr, u_int8_t& nextPayloadType) {
+ testSize(10); // up to the start of "CS ID map info"
+
+ fprintf(stderr, "\tversion: %d\n", getByte(ptr));
+
+ u_int8_t const dataType = getByte(ptr);
+ fprintf(stderr, "\tdata type: %d (%s)\n", dataType, dataTypeComment[dataType]);
+
+ nextPayloadType = getByte(ptr);
+ fprintf(stderr, "\tnext payload: %d (%s)\n", nextPayloadType, payloadTypeName[nextPayloadType]);
+
+ u_int8_t const V_PRF = getByte(ptr);
+ u_int8_t const PRF = V_PRF&0x7F;
+ fprintf(stderr, "\tV:%d; PRF:%d (%s)\n", V_PRF>>7, PRF, PRF == 0 ? "MIKEY-1" : "unknown");
+
+ fprintf(stderr, "\tCSB ID:0x%08x\n", get4Bytes(ptr));
+
+ u_int8_t numCryptoSessions = getByte(ptr);
+ fprintf(stderr, "\t#CS:%d\n", numCryptoSessions);
+
+ u_int8_t const CS_ID_map_type = getByte(ptr);
+ fprintf(stderr, "\tCS ID map type:%d (%s)\n",
+ CS_ID_map_type, CS_ID_map_type == 0 ? "SRTP-ID" : "unknown");
+ if (CS_ID_map_type != 0) return False;
+
+ fprintf(stderr, "\tCS ID map info:\n");
+ testSize(numCryptoSessions * (1+4+4)); // the size of the "CS ID map info"
+ for (u_int8_t i = 1; i <= numCryptoSessions; ++i) {
+ fprintf(stderr, "\tPolicy_no_%d: %d;\tSSRC_%d: 0x%08x; ROC_%d: 0x%08x\n",
+ i, getByte(ptr),
+ i, get4Bytes(ptr),
+ i, get4Bytes(ptr));
+ }
+
+ return True;
+}
+
+static Boolean parseKeyDataSubPayload(u_int8_t const*& ptr, u_int8_t const* endPtr, u_int8_t& nextPayloadType) {
+ fprintf(stderr, "\tEncr data:\n");
+ testSize(4); // up to the start of "Key data"
+
+ nextPayloadType = getByte(ptr);
+ fprintf(stderr, "\t\tnext payload: %d (%s)\n", nextPayloadType, payloadTypeName[nextPayloadType]);
+
+ u_int8_t Type_KV = getByte(ptr);
+ u_int8_t Type = Type_KV>>4;
+ u_int8_t KV = Type_KV&0x0F;
+ fprintf(stderr, "\t\tType: %d (%s)\n", Type,
+ Type == 0 ? "TGK" : Type == 1 ? "TGK+SALT" : Type == 2 ? "TEK" : Type == 3 ? "TEK+SALT" : "unknown");
+ if (Type > 3) return False;
+ Boolean hasSalt = Type == 1 || Type == 3;
+
+ fprintf(stderr, "\t\tKey Validity: %d (%s)\n", KV,
+ KV == 0 ? "NULL" : KV == 1 ? "SPI/MKI" : KV == 2 ? "Interval" : "unknown");
+ Boolean hasKV = KV != 0;
+
+ u_int16_t keyDataLen = get2Bytes(ptr);
+ fprintf(stderr, "\t\tKey data len: %d\n", keyDataLen);
+
+ testSize(keyDataLen);
+ fprintf(stderr, "\t\tKey data: ");
+ for (unsigned i = 0; i < keyDataLen; ++i) fprintf(stderr, ":%02x", getByte(ptr));
+ fprintf(stderr, "\n");
+
+ if (hasSalt) {
+ testSize(2);
+ u_int16_t saltLen = get2Bytes(ptr);
+ fprintf(stderr, "\t\tSalt len: %d\n", saltLen);
+
+ testSize(saltLen);
+ fprintf(stderr, "\t\tSalt data: ");
+ for (unsigned i = 0; i < saltLen; ++i) fprintf(stderr, ":%02x", getByte(ptr));
+ fprintf(stderr, "\n");
+ }
+
+ if (hasKV) {
+ fprintf(stderr, "\t\tKV (key validity) data:\n");
+ if (KV == 1) { // SPI/MKI
+ testSize(1);
+ u_int8_t SPILength = getByte(ptr);
+ fprintf(stderr, "\t\t\tSPI Length: %d\n", SPILength);
+
+ testSize(SPILength);
+ fprintf(stderr, "\t\t\tSPI: ");
+ for (unsigned i = 0; i < SPILength; ++i) fprintf(stderr, ":%02x", getByte(ptr));
+ fprintf(stderr, "\n");
+ } else if (KV == 2) { // Interval
+ testSize(1);
+ u_int8_t VFLength = getByte(ptr);
+ fprintf(stderr, "\t\t\tVF Length: %d\n", VFLength);
+
+ testSize(VFLength);
+ fprintf(stderr, "\t\t\tVF: ");
+ for (unsigned i = 0; i < VFLength; ++i) fprintf(stderr, ":%02x", getByte(ptr));
+ fprintf(stderr, "\n");
+
+ testSize(1);
+ u_int8_t VTLength = getByte(ptr);
+ fprintf(stderr, "\t\t\tVT Length: %d\n", VTLength);
+
+ testSize(VTLength);
+ fprintf(stderr, "\t\t\tVT: ");
+ for (unsigned i = 0; i < VTLength; ++i) fprintf(stderr, ":%02x", getByte(ptr));
+ fprintf(stderr, "\n");
+ }
+ }
+
+ return True;
+}
+
+Boolean parseMikeyKEMAC(u_int8_t const*& ptr, u_int8_t const* endPtr, u_int8_t& nextPayloadType) {
+ testSize(4); // up to the start of "Encr data"
+
+ nextPayloadType = getByte(ptr);
+ fprintf(stderr, "\tnext payload: %d (%s)\n", nextPayloadType, payloadTypeName[nextPayloadType]);
+
+ u_int8_t encrAlg = getByte(ptr);
+ fprintf(stderr, "\tEncr alg: %d (%s)\n", encrAlg,
+ encrAlg == 0 ? "NULL" : encrAlg == 1 ? "AES-CM-128" : encrAlg == 2 ? "AES-KW-128" : "unknown");
+
+ u_int16_t encrDataLen = get2Bytes(ptr);
+ fprintf(stderr, "\tencr data len: %d\n", encrDataLen);
+
+ testSize(encrDataLen + 1/*allow for "Mac alg"*/);
+ u_int8_t const* endOfKeyData = ptr + encrDataLen;
+
+ // Allow for multiple key data sub-payloads
+ while (ptr < endOfKeyData) {
+ if (!parseKeyDataSubPayload(ptr, endOfKeyData, nextPayloadType)) return False;
+ }
+
+ u_int8_t macAlg = getByte(ptr);
+ fprintf(stderr, "\tMAC alg: %d (%s)\n", macAlg,
+ macAlg == 0 ? "NULL" : macAlg == 1 ? "HMAC-SHA-1-160" : "unknown");
+ if (macAlg > 1) return False;
+ if (macAlg == 1) { // HMAC-SHA-1-160
+ unsigned const macLen = 160/8; // bytes
+ fprintf(stderr, "\t\tMAC: ");
+ for (unsigned i = 0; i < macLen; ++i) fprintf(stderr, ":%02x", getByte(ptr));
+ fprintf(stderr, "\n");
+ }
+
+ return True;
+}
+
+Boolean parseMikeyT(u_int8_t const*& ptr, u_int8_t const* endPtr, u_int8_t& nextPayloadType) {
+ testSize(2); // up to the start of "TS value"
+
+ nextPayloadType = getByte(ptr);
+ fprintf(stderr, "\tnext payload: %d (%s)\n", nextPayloadType, payloadTypeName[nextPayloadType]);
+
+ u_int8_t TS_type = getByte(ptr);
+ unsigned TS_value_len;
+ fprintf(stderr, "\tTS type: %d (", TS_type);
+ switch (TS_type) {
+ case 0: {
+ fprintf(stderr, "NTP-UTC)\n");
+ TS_value_len = 8; // 64 bits
+ break;
+ }
+ case 1: {
+ fprintf(stderr, "NTP)\n");
+ TS_value_len = 8; // 64 bits
+ break;
+ }
+ case 2: {
+ fprintf(stderr, "COUNTER)\n");
+ TS_value_len = 4; // 32 bits
+ break;
+ }
+ default: {
+ fprintf(stderr, "unknown)\n");
+ return False;
+ }
+ }
+
+ testSize(TS_value_len);
+ fprintf(stderr, "\tTS value:");
+ for (unsigned i = 0; i < TS_value_len; ++i) fprintf(stderr, ":%02x", getByte(ptr));
+ fprintf(stderr, "\n");
+
+ return True;
+}
+
+#define MAX_SRTP_POLICY_PARAM_TYPE 12
+static char const* SRTPPolicyParamTypeExplanation[] = {
+ "Encryption algorithm",
+ "Session Encryption key length",
+ "Authentication algorithm",
+ "Session Authentication key length",
+ "Session Salt key length",
+ "SRTP Pseudo Random Function",
+ "Key derivation rate",
+ "SRTP encryption off/on",
+ "SRTCP encryption off/on",
+ "Sender's FEC order",
+ "SRTP authentication off/on",
+ "Authentication tag length",
+ "SRTP prefix length",
+};
+
+static Boolean parseSRTPPolicyParam(u_int8_t const*& ptr, u_int8_t const* endPtr) {
+ fprintf(stderr, "\tPolicy param:\n");
+ while (ptr < endPtr) {
+ testSize(2);
+
+ u_int8_t ppType = getByte(ptr);
+ fprintf(stderr, "\t\ttype: %d (%s); ", ppType,
+ ppType > MAX_SRTP_POLICY_PARAM_TYPE ? "unknown" : SRTPPolicyParamTypeExplanation[ppType]);
+
+ u_int8_t ppLen = getByte(ptr);
+ fprintf(stderr, "length: %d; value: ", ppLen);
+
+ testSize(ppLen);
+ u_int8_t ppVal = 0xFF;
+ if (ppLen == 1) {
+ ppVal = getByte(ptr);
+ fprintf(stderr, "%d", ppVal);
+ } else {
+ for (unsigned j = 0; j < ppLen; ++j) fprintf(stderr, ":%02x", getByte(ptr));
+ }
+
+ switch (ppType) {
+ case 0: { // Encryption algorithm
+ fprintf(stderr, " (%s)",
+ ppVal == 0 ? "NULL" : ppVal == 1 ? "AES-CM" : ppVal == 2 ? "AES-F8" : "unknown");
+ break;
+ }
+ case 2: { // Authentication algorithm
+ fprintf(stderr, " (%s)",
+ ppVal == 0 ? "NULL" : ppVal == 1 ? "HMAC-SHA-1" : "unknown");
+ break;
+ }
+ case 5: { // SRTP Pseudo Random Function
+ fprintf(stderr, " (%s)",
+ ppVal == 0 ? "AES-CM" : "unknown");
+ break;
+ }
+ case 9: { // sender's FEC order
+ fprintf(stderr, " (%s)",
+ ppVal == 0 ? "First FEC, then SRTP" : "unknown");
+ break;
+ }
+ }
+ fprintf(stderr, "\n");
+ }
+
+ return True;
+}
+
+Boolean parseMikeySP(u_int8_t const*& ptr, u_int8_t const* endPtr, u_int8_t& nextPayloadType) {
+ testSize(2); // up to the start of "Policy param"
+
+ nextPayloadType = getByte(ptr);
+ fprintf(stderr, "\tnext payload: %d (%s)\n", nextPayloadType, payloadTypeName[nextPayloadType]);
+
+ fprintf(stderr, "\tPolicy number: %d\n", getByte(ptr));
+
+ u_int8_t protocolType = getByte(ptr);
+ fprintf(stderr, "\tProtocol type: %d (%s)\n", protocolType, protocolType == 0 ? "SRTP" : "unknown");
+ if (protocolType != 0) return False;
+
+ u_int16_t policyParam_len = get2Bytes(ptr);
+ fprintf(stderr, "\tPolicy param len: %d\n", policyParam_len);
+
+ testSize(policyParam_len);
+ return parseSRTPPolicyParam(ptr, ptr + policyParam_len);
+}
+
+Boolean parseMikeyRAND(u_int8_t const*& ptr, u_int8_t const* endPtr, u_int8_t& nextPayloadType) {
+ testSize(2); // up to the start of "RAND"
+
+ nextPayloadType = getByte(ptr);
+ fprintf(stderr, "\tnext payload: %d (%s)\n", nextPayloadType, payloadTypeName[nextPayloadType]);
+
+ u_int8_t RAND_len = getByte(ptr);
+ fprintf(stderr, "\tRAND len: %d", RAND_len);
+
+ testSize(RAND_len);
+ fprintf(stderr, "\tRAND:");
+ for (unsigned i = 0; i < RAND_len; ++i) fprintf(stderr, ":%02x", getByte(ptr));
+ fprintf(stderr, "\n");
+
+ return True;
+}
+
+typedef Boolean (parseMikeyPayloadFunc)(u_int8_t const*& ptr, u_int8_t const* endPtr,
+ u_int8_t& nextPayloadType);
+parseMikeyPayloadFunc* payloadParser[256];
+
+int main(int argc, char** argv) {
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s <base64Data>\n", argv[0]);
+ exit(1);
+ }
+ char const* base64Data = argv[1];
+
+ unsigned mikeyDataSize;
+ u_int8_t* mikeyData = base64Decode(base64Data, mikeyDataSize);
+
+ fprintf(stderr, "Base64Data \"%s\" produces %d bytes of MIKEY data:\n", base64Data, mikeyDataSize);
+ for (unsigned i = 0; i < mikeyDataSize; ++i) fprintf(stderr, ":%02x", mikeyData[i]);
+ fprintf(stderr, "\n");
+
+ for (unsigned i = 0; i < 256; ++i) {
+ payloadTypeName[i] = "unknown or unhandled";
+ payloadParser[i] = parseMikeyUnknown;
+
+ dataTypeComment[i] = "unknown";
+ }
+
+ // Populate known payload types:
+ payloadTypeName[0] = "Last payload";
+
+ payloadTypeName[1] = "KEMAC";
+ payloadParser[1] = parseMikeyKEMAC;
+
+ payloadTypeName[2] = "PKE";
+
+ payloadTypeName[3] = "DH";
+
+ payloadTypeName[4] = "SIGN";
+
+ payloadTypeName[5] = "T";
+ payloadParser[5] = parseMikeyT;
+
+ payloadTypeName[6] = "ID";
+
+ payloadTypeName[7] = "CERT";
+
+ payloadTypeName[8] = "CHASH";
+
+ payloadTypeName[9] = "V";
+
+ payloadTypeName[10] = "SP";
+ payloadParser[10] = parseMikeySP;
+
+ payloadTypeName[11] = "RAND";
+ payloadParser[11] = parseMikeyRAND;
+
+ payloadTypeName[12] = "ERR";
+
+ payloadTypeName[20] = "Key data";
+
+ payloadTypeName[21] = "General Ext.";
+
+ // Populate known data types:
+ dataTypeComment[0] = "Initiator's pre-shared key message";
+ dataTypeComment[1] = "Verification message of a pre-shared key message";
+ dataTypeComment[2] = "Initiator's public-key transport message";
+ dataTypeComment[3] = "Verification message of a public-key message";
+ dataTypeComment[4] = "Initiator's DH exchange message";
+ dataTypeComment[5] = "Responder's DH exchange message";
+ dataTypeComment[6] = "Error message";
+
+ u_int8_t const* ptr = mikeyData;
+ u_int8_t* const endPtr = &mikeyData[mikeyDataSize];
+ u_int8_t nextPayloadType;
+
+ do {
+ // Begin by parsing an initial "HDR":
+ fprintf(stderr, "HDR:\n");
+ if (!parseMikeyHDR(ptr, endPtr, nextPayloadType)) break;
+
+ // Then parse each successive payload:
+ while (nextPayloadType != 0 /* Last payload */) {
+ fprintf(stderr, "%s:\n", payloadTypeName[nextPayloadType]);
+ if (!(*payloadParser[nextPayloadType])(ptr, endPtr, nextPayloadType)) break;
+ }
+ } while (0);
+
+ if (ptr < endPtr) {
+ fprintf(stderr, "+%ld bytes of unparsed data: ", endPtr-ptr);
+ while (ptr < endPtr) fprintf(stderr, ":%02x", *ptr++);
+ fprintf(stderr, "\n");
+ }
+
+ delete[] mikeyData;
+ return 0;
+}
diff --git a/testProgs/openRTSP.cpp b/testProgs/openRTSP.cpp
new file mode 100644
index 0000000..d57a96c
--- /dev/null
+++ b/testProgs/openRTSP.cpp
@@ -0,0 +1,68 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A RTSP client application that opens a RTSP URL argument,
+// and extracts and records the data from each incoming RTP stream.
+//
+// NOTE: If you want to develop your own RTSP client application (or embed RTSP client functionality into your own application),
+// then we don't recommend using this code as a model, because it is too complex (with many options).
+// Instead, we recommend using the "testRTSPClient" application code as a model.
+
+#include "playCommon.hh"
+
+RTSPClient* ourRTSPClient = NULL;
+Medium* createClient(UsageEnvironment& env, char const* url, int verbosityLevel, char const* applicationName) {
+ extern portNumBits tunnelOverHTTPPortNum;
+ return ourRTSPClient = RTSPClient::createNew(env, url, verbosityLevel, applicationName, tunnelOverHTTPPortNum);
+}
+
+void assignClient(Medium* client) {
+ ourRTSPClient = (RTSPClient*)client;
+}
+
+void getOptions(RTSPClient::responseHandler* afterFunc) {
+ ourRTSPClient->sendOptionsCommand(afterFunc, ourAuthenticator);
+}
+
+void getSDPDescription(RTSPClient::responseHandler* afterFunc) {
+ ourRTSPClient->sendDescribeCommand(afterFunc, ourAuthenticator);
+}
+
+void setupSubsession(MediaSubsession* subsession, Boolean streamUsingTCP, Boolean forceMulticastOnUnspecified, RTSPClient::responseHandler* afterFunc) {
+
+ ourRTSPClient->sendSetupCommand(*subsession, afterFunc, False, streamUsingTCP, forceMulticastOnUnspecified, ourAuthenticator);
+}
+
+void startPlayingSession(MediaSession* session, double start, double end, float scale, RTSPClient::responseHandler* afterFunc) {
+ ourRTSPClient->sendPlayCommand(*session, afterFunc, start, end, scale, ourAuthenticator);
+}
+
+void startPlayingSession(MediaSession* session, char const* absStartTime, char const* absEndTime, float scale, RTSPClient::responseHandler* afterFunc) {
+ ourRTSPClient->sendPlayCommand(*session, afterFunc, absStartTime, absEndTime, scale, ourAuthenticator);
+}
+
+void tearDownSession(MediaSession* session, RTSPClient::responseHandler* afterFunc) {
+ ourRTSPClient->sendTeardownCommand(*session, afterFunc, ourAuthenticator);
+}
+
+void setUserAgentString(char const* userAgentString) {
+ ourRTSPClient->setUserAgentString(userAgentString);
+}
+
+Boolean allowProxyServers = False;
+Boolean controlConnectionUsesTCP = True;
+Boolean supportCodecSelection = False;
+char const* clientProtocolName = "RTSP";
diff --git a/testProgs/playCommon.cpp b/testProgs/playCommon.cpp
new file mode 100644
index 0000000..5dde5cf
--- /dev/null
+++ b/testProgs/playCommon.cpp
@@ -0,0 +1,1544 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A common framework, used for the "openRTSP" and "playSIP" applications
+// Implementation
+//
+// NOTE: If you want to develop your own RTSP client application (or embed RTSP client functionality into your own application),
+// then we don't recommend using this code as a model, because it is too complex (with many options).
+// Instead, we recommend using the "testRTSPClient" application code as a model.
+
+#include "playCommon.hh"
+#include "BasicUsageEnvironment.hh"
+#include "GroupsockHelper.hh"
+
+#if defined(__WIN32__) || defined(_WIN32)
+#define snprintf _snprintf
+#else
+#include <signal.h>
+#define USE_SIGNALS 1
+#endif
+
+// Forward function definitions:
+void continueAfterClientCreation0(RTSPClient* client, Boolean requestStreamingOverTCP);
+void continueAfterClientCreation1();
+void continueAfterOPTIONS(RTSPClient* client, int resultCode, char* resultString);
+void continueAfterDESCRIBE(RTSPClient* client, int resultCode, char* resultString);
+void continueAfterSETUP(RTSPClient* client, int resultCode, char* resultString);
+void continueAfterPLAY(RTSPClient* client, int resultCode, char* resultString);
+void continueAfterTEARDOWN(RTSPClient* client, int resultCode, char* resultString);
+
+void createOutputFiles(char const* periodicFilenameSuffix);
+void createPeriodicOutputFiles();
+void setupStreams();
+void closeMediaSinks();
+void subsessionAfterPlaying(void* clientData);
+void subsessionByeHandler(void* clientData, char const* reason);
+void sessionAfterPlaying(void* clientData = NULL);
+void sessionTimerHandler(void* clientData);
+void periodicFileOutputTimerHandler(void* clientData);
+void shutdown(int exitCode = 1);
+void signalHandlerShutdown(int sig);
+void checkForPacketArrival(void* clientData);
+void checkInterPacketGaps(void* clientData);
+void checkSessionTimeoutBrokenServer(void* clientData);
+void beginQOSMeasurement();
+
+char const* progName;
+UsageEnvironment* env;
+Medium* ourClient = NULL;
+Authenticator* ourAuthenticator = NULL;
+char const* streamURL = NULL;
+MediaSession* session = NULL;
+TaskToken sessionTimerTask = NULL;
+TaskToken sessionTimeoutBrokenServerTask = NULL;
+TaskToken arrivalCheckTimerTask = NULL;
+TaskToken interPacketGapCheckTimerTask = NULL;
+TaskToken qosMeasurementTimerTask = NULL;
+TaskToken periodicFileOutputTask = NULL;
+Boolean createReceivers = True;
+Boolean outputQuickTimeFile = False;
+Boolean generateMP4Format = False;
+QuickTimeFileSink* qtOut = NULL;
+Boolean outputAVIFile = False;
+AVIFileSink* aviOut = NULL;
+Boolean audioOnly = False;
+Boolean videoOnly = False;
+char const* singleMedium = NULL;
+int verbosityLevel = 1; // by default, print verbose output
+double duration = 0;
+double durationSlop = -1.0; // extra seconds to play at the end
+double initialSeekTime = 0.0f;
+char* initialAbsoluteSeekTime = NULL;
+char* initialAbsoluteSeekEndTime = NULL;
+float scale = 1.0f;
+double endTime;
+unsigned interPacketGapMaxTime = 0;
+unsigned totNumPacketsReceived = ~0; // used if checking inter-packet gaps
+Boolean playContinuously = False;
+int simpleRTPoffsetArg = -1;
+Boolean sendOptionsRequest = True;
+Boolean sendOptionsRequestOnly = False;
+Boolean oneFilePerFrame = False;
+Boolean notifyOnPacketArrival = False;
+Boolean sendKeepAlivesToBrokenServers = False;
+unsigned sessionTimeoutParameter = 0;
+Boolean streamUsingTCP = False;
+Boolean forceMulticastOnUnspecified = False;
+unsigned short desiredPortNum = 0;
+portNumBits tunnelOverHTTPPortNum = 0;
+char* username = NULL;
+char* password = NULL;
+char* proxyServerName = NULL;
+unsigned short proxyServerPortNum = 0;
+unsigned char desiredAudioRTPPayloadFormat = 0;
+char* mimeSubtype = NULL;
+unsigned short movieWidth = 240; // default
+Boolean movieWidthOptionSet = False;
+unsigned short movieHeight = 180; // default
+Boolean movieHeightOptionSet = False;
+unsigned movieFPS = 15; // default
+Boolean movieFPSOptionSet = False;
+char const* fileNamePrefix = "";
+unsigned fileSinkBufferSize = 100000;
+unsigned socketInputBufferSize = 0;
+Boolean packetLossCompensate = False;
+Boolean syncStreams = False;
+Boolean generateHintTracks = False;
+Boolean waitForResponseToTEARDOWN = True;
+unsigned qosMeasurementIntervalMS = 0; // 0 means: Don't output QOS data
+char* userAgent = NULL;
+unsigned fileOutputInterval = 0; // seconds
+unsigned fileOutputSecondsSoFar = 0; // seconds
+Boolean createHandlerServerForREGISTERCommand = False;
+portNumBits handlerServerForREGISTERCommandPortNum = 0;
+HandlerServerForREGISTERCommand* handlerServerForREGISTERCommand;
+char* usernameForREGISTER = NULL;
+char* passwordForREGISTER = NULL;
+UserAuthenticationDatabase* authDBForREGISTER = NULL;
+
+struct timeval startTime;
+
+void usage() {
+ *env << "Usage: " << progName
+ << " [-p <startPortNum>] [-r|-q|-4|-i] [-a|-v] [-V] [-d <duration>] [-D <max-inter-packet-gap-time> [-c] [-S <offset>] [-n] [-O]"
+ << (controlConnectionUsesTCP ? " [-t|-T <http-port>]" : "")
+ << " [-u <username> <password>"
+ << (allowProxyServers ? " [<proxy-server> [<proxy-server-port>]]" : "")
+ << "]" << (supportCodecSelection ? " [-A <audio-codec-rtp-payload-format-code>|-M <mime-subtype-name>]" : "")
+ << " [-s <initial-seek-time>]|[-U <absolute-seek-time>] [-E <absolute-seek-end-time>] [-z <scale>] [-g user-agent]"
+ << " [-k <username-for-REGISTER> <password-for-REGISTER>]"
+ << " [-P <interval-in-seconds>] [-K]"
+ << " [-w <width> -h <height>] [-f <frames-per-second>] [-y] [-H] [-Q [<measurement-interval>]] [-F <filename-prefix>] [-b <file-sink-buffer-size>] [-B <input-socket-buffer-size>] [-I <input-interface-ip-address>] [-m] [<url>|-R [<port-num>]] (or " << progName << " -o [-V] <url>)\n";
+ shutdown();
+}
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ progName = argv[0];
+
+ gettimeofday(&startTime, NULL);
+
+#ifdef USE_SIGNALS
+ // Allow ourselves to be shut down gracefully by a SIGHUP or a SIGUSR1:
+ signal(SIGHUP, signalHandlerShutdown);
+ signal(SIGUSR1, signalHandlerShutdown);
+#endif
+
+ // unfortunately we can't use getopt() here, as Windoze doesn't have it
+ while (argc > 1) {
+ char* const opt = argv[1];
+ if (opt[0] != '-') {
+ if (argc == 2) break; // only the URL is left
+ usage();
+ }
+
+ switch (opt[1]) {
+ case 'p': { // specify start port number
+ int portArg;
+ if (sscanf(argv[2], "%d", &portArg) != 1) {
+ usage();
+ }
+ if (portArg <= 0 || portArg >= 65536 || portArg&1) {
+ *env << "bad port number: " << portArg
+ << " (must be even, and in the range (0,65536))\n";
+ usage();
+ }
+ desiredPortNum = (unsigned short)portArg;
+ ++argv; --argc;
+ break;
+ }
+
+ case 'r': { // do not receive data (instead, just 'play' the stream(s))
+ createReceivers = False;
+ break;
+ }
+
+ case 'q': { // output a QuickTime file (to stdout)
+ outputQuickTimeFile = True;
+ break;
+ }
+
+ case '4': { // output a 'mp4'-format file (to stdout)
+ outputQuickTimeFile = True;
+ generateMP4Format = True;
+ break;
+ }
+
+ case 'i': { // output an AVI file (to stdout)
+ outputAVIFile = True;
+ break;
+ }
+
+ case 'I': { // specify input interface...
+ NetAddressList addresses(argv[2]);
+ if (addresses.numAddresses() == 0) {
+ *env << "Failed to find network address for \"" << argv[2] << "\"";
+ break;
+ }
+ ReceivingInterfaceAddr = *(unsigned*)(addresses.firstAddress()->data());
+ ++argv; --argc;
+ break;
+ }
+
+ case 'a': { // receive/record an audio stream only
+ audioOnly = True;
+ singleMedium = "audio";
+ break;
+ }
+
+ case 'v': { // receive/record a video stream only
+ videoOnly = True;
+ singleMedium = "video";
+ break;
+ }
+
+ case 'V': { // disable verbose output
+ verbosityLevel = 0;
+ break;
+ }
+
+ case 'd': { // specify duration, or how much to delay after end time
+ float arg;
+ if (sscanf(argv[2], "%g", &arg) != 1) {
+ usage();
+ }
+ if (argv[2][0] == '-') { // not "arg<0", in case argv[2] was "-0"
+ // a 'negative' argument was specified; use this for "durationSlop":
+ duration = 0; // use whatever's in the SDP
+ durationSlop = -arg;
+ } else {
+ duration = arg;
+ durationSlop = 0;
+ }
+ ++argv; --argc;
+ break;
+ }
+
+ case 'D': { // specify maximum number of seconds to wait for packets:
+ if (sscanf(argv[2], "%u", &interPacketGapMaxTime) != 1) {
+ usage();
+ }
+ ++argv; --argc;
+ break;
+ }
+
+ case 'c': { // play continuously
+ playContinuously = True;
+ break;
+ }
+
+ case 'S': { // specify an offset to use with "SimpleRTPSource"s
+ if (sscanf(argv[2], "%d", &simpleRTPoffsetArg) != 1) {
+ usage();
+ }
+ if (simpleRTPoffsetArg < 0) {
+ *env << "offset argument to \"-S\" must be >= 0\n";
+ usage();
+ }
+ ++argv; --argc;
+ break;
+ }
+
+ case 'm': { // output multiple files - one for each frame
+ oneFilePerFrame = True;
+ break;
+ }
+
+ case 'n': { // notify the user when the first data packet arrives
+ notifyOnPacketArrival = True;
+ break;
+ }
+
+ case 'O': { // Don't send an "OPTIONS" request before "DESCRIBE"
+ sendOptionsRequest = False;
+ break;
+ }
+
+ case 'o': { // Send only the "OPTIONS" request to the server
+ sendOptionsRequestOnly = True;
+ break;
+ }
+
+ case 'P': { // specify an interval (in seconds) between writing successive output files
+ int fileOutputIntervalInt;
+ if (sscanf(argv[2], "%d", &fileOutputIntervalInt) != 1 || fileOutputIntervalInt <= 0) {
+ usage();
+ }
+ fileOutputInterval = (unsigned)fileOutputIntervalInt;
+ ++argv; --argc;
+ break;
+ }
+
+ case 't': {
+ // stream RTP and RTCP over the TCP 'control' connection
+ if (controlConnectionUsesTCP) {
+ streamUsingTCP = True;
+ } else {
+ usage();
+ }
+ break;
+ }
+
+ case 'T': {
+ // stream RTP and RTCP over a HTTP connection
+ if (controlConnectionUsesTCP) {
+ if (argc > 3 && argv[2][0] != '-') {
+ // The next argument is the HTTP server port number:
+ if (sscanf(argv[2], "%hu", &tunnelOverHTTPPortNum) == 1
+ && tunnelOverHTTPPortNum > 0) {
+ ++argv; --argc;
+ break;
+ }
+ }
+ }
+
+ // If we get here, the option was specified incorrectly:
+ usage();
+ break;
+ }
+
+ case 'u': { // specify a username and password
+ if (argc < 4) usage(); // there's no argv[3] (for the "password")
+ username = argv[2];
+ password = argv[3];
+ argv+=2; argc-=2;
+ if (allowProxyServers && argc > 3 && argv[2][0] != '-') {
+ // The next argument is the name of a proxy server:
+ proxyServerName = argv[2];
+ ++argv; --argc;
+
+ if (argc > 3 && argv[2][0] != '-') {
+ // The next argument is the proxy server port number:
+ if (sscanf(argv[2], "%hu", &proxyServerPortNum) != 1) {
+ usage();
+ }
+ ++argv; --argc;
+ }
+ }
+
+ ourAuthenticator = new Authenticator(username, password);
+ break;
+ }
+
+ case 'k': { // specify a username and password to be used to authentication an incoming "REGISTER" command (for use with -R)
+ if (argc < 4) usage(); // there's no argv[3] (for the "password")
+ usernameForREGISTER = argv[2];
+ passwordForREGISTER = argv[3];
+ argv+=2; argc-=2;
+
+ if (authDBForREGISTER == NULL) authDBForREGISTER = new UserAuthenticationDatabase;
+ authDBForREGISTER->addUserRecord(usernameForREGISTER, passwordForREGISTER);
+ break;
+ }
+
+ case 'K': { // Send periodic 'keep-alive' requests to keep broken server sessions alive
+ sendKeepAlivesToBrokenServers = True;
+ break;
+ }
+
+ case 'A': { // specify a desired audio RTP payload format
+ unsigned formatArg;
+ if (sscanf(argv[2], "%u", &formatArg) != 1
+ || formatArg >= 96) {
+ usage();
+ }
+ desiredAudioRTPPayloadFormat = (unsigned char)formatArg;
+ ++argv; --argc;
+ break;
+ }
+
+ case 'M': { // specify a MIME subtype for a dynamic RTP payload type
+ mimeSubtype = argv[2];
+ if (desiredAudioRTPPayloadFormat==0) desiredAudioRTPPayloadFormat =96;
+ ++argv; --argc;
+ break;
+ }
+
+ case 'w': { // specify a width (pixels) for an output QuickTime or AVI movie
+ if (sscanf(argv[2], "%hu", &movieWidth) != 1) {
+ usage();
+ }
+ movieWidthOptionSet = True;
+ ++argv; --argc;
+ break;
+ }
+
+ case 'h': { // specify a height (pixels) for an output QuickTime or AVI movie
+ if (sscanf(argv[2], "%hu", &movieHeight) != 1) {
+ usage();
+ }
+ movieHeightOptionSet = True;
+ ++argv; --argc;
+ break;
+ }
+
+ case 'f': { // specify a frame rate (per second) for an output QT or AVI movie
+ if (sscanf(argv[2], "%u", &movieFPS) != 1) {
+ usage();
+ }
+ movieFPSOptionSet = True;
+ ++argv; --argc;
+ break;
+ }
+
+ case 'F': { // specify a prefix for the audio and video output files
+ fileNamePrefix = argv[2];
+ ++argv; --argc;
+ break;
+ }
+
+ case 'g': { // specify a user agent name to use in outgoing requests
+ userAgent = argv[2];
+ ++argv; --argc;
+ break;
+ }
+
+ case 'b': { // specify the size of buffers for "FileSink"s
+ if (sscanf(argv[2], "%u", &fileSinkBufferSize) != 1) {
+ usage();
+ }
+ ++argv; --argc;
+ break;
+ }
+
+ case 'B': { // specify the size of input socket buffers
+ if (sscanf(argv[2], "%u", &socketInputBufferSize) != 1) {
+ usage();
+ }
+ ++argv; --argc;
+ break;
+ }
+
+ // Note: The following option is deprecated, and may someday be removed:
+ case 'l': { // try to compensate for packet loss by repeating frames
+ packetLossCompensate = True;
+ break;
+ }
+
+ case 'y': { // synchronize audio and video streams
+ syncStreams = True;
+ break;
+ }
+
+ case 'H': { // generate hint tracks (as well as the regular data tracks)
+ generateHintTracks = True;
+ break;
+ }
+
+ case 'Q': { // output QOS measurements
+ qosMeasurementIntervalMS = 1000; // default: 1 second
+
+ if (argc > 3 && argv[2][0] != '-') {
+ // The next argument is the measurement interval,
+ // in multiples of 100 ms
+ if (sscanf(argv[2], "%u", &qosMeasurementIntervalMS) != 1) {
+ usage();
+ }
+ qosMeasurementIntervalMS *= 100;
+ ++argv; --argc;
+ }
+ break;
+ }
+
+ case 's': { // specify initial seek time (trick play)
+ double arg;
+ if (sscanf(argv[2], "%lg", &arg) != 1 || arg < 0) {
+ usage();
+ }
+ initialSeekTime = arg;
+ ++argv; --argc;
+ break;
+ }
+
+ case 'U': {
+ // specify initial absolute seek time (trick play), using a string of the form "YYYYMMDDTHHMMSSZ" or "YYYYMMDDTHHMMSS.<frac>Z"
+ initialAbsoluteSeekTime = argv[2];
+ ++argv; --argc;
+ break;
+ }
+
+ case 'E': {
+ // specify initial absolute seek END time (trick play), using a string of the form "YYYYMMDDTHHMMSSZ" or "YYYYMMDDTHHMMSS.<frac>Z"
+ initialAbsoluteSeekEndTime = argv[2];
+ ++argv; --argc;
+ break;
+ }
+ case 'z': { // scale (trick play)
+ float arg;
+ if (sscanf(argv[2], "%g", &arg) != 1 || arg == 0.0f) {
+ usage();
+ }
+ scale = arg;
+ ++argv; --argc;
+ break;
+ }
+
+ case 'R': {
+ // set up a handler server for incoming "REGISTER" commands
+ createHandlerServerForREGISTERCommand = True;
+ if (argc > 2 && argv[2][0] != '-') {
+ // The next argument is the REGISTER handler server port number:
+ if (sscanf(argv[2], "%hu", &handlerServerForREGISTERCommandPortNum) == 1 && handlerServerForREGISTERCommandPortNum > 0) {
+ ++argv; --argc;
+ break;
+ }
+ }
+ break;
+ }
+
+ case 'C': {
+ forceMulticastOnUnspecified = True;
+ break;
+ }
+
+ default: {
+ *env << "Invalid option: " << opt << "\n";
+ usage();
+ break;
+ }
+ }
+
+ ++argv; --argc;
+ }
+
+ // There must be exactly one "rtsp://" URL at the end (unless '-R' was used, in which case there's no URL)
+ if (!( (argc == 2 && !createHandlerServerForREGISTERCommand) || (argc == 1 && createHandlerServerForREGISTERCommand) )) usage();
+ if (outputQuickTimeFile && outputAVIFile) {
+ *env << "The -i and -q (or -4) options cannot both be used!\n";
+ usage();
+ }
+ Boolean outputCompositeFile = outputQuickTimeFile || outputAVIFile;
+ if (!createReceivers && (outputCompositeFile || oneFilePerFrame || fileOutputInterval > 0)) {
+ *env << "The -r option cannot be used with -q, -4, -i, -m, or -P!\n";
+ usage();
+ }
+ if (oneFilePerFrame && fileOutputInterval > 0) {
+ *env << "The -m and -P options cannot both be used!\n";
+ usage();
+ }
+ if (outputCompositeFile && !movieWidthOptionSet) {
+ *env << "Warning: The -q, -4 or -i option was used, but not -w. Assuming a video width of "
+ << movieWidth << " pixels\n";
+ }
+ if (outputCompositeFile && !movieHeightOptionSet) {
+ *env << "Warning: The -q, -4 or -i option was used, but not -h. Assuming a video height of "
+ << movieHeight << " pixels\n";
+ }
+ if (outputCompositeFile && !movieFPSOptionSet) {
+ *env << "Warning: The -q, -4 or -i option was used, but not -f. Assuming a video frame rate of "
+ << movieFPS << " frames-per-second\n";
+ }
+ if (audioOnly && videoOnly) {
+ *env << "The -a and -v options cannot both be used!\n";
+ usage();
+ }
+ if (sendOptionsRequestOnly && !sendOptionsRequest) {
+ *env << "The -o and -O options cannot both be used!\n";
+ usage();
+ }
+ if (initialAbsoluteSeekTime != NULL && initialSeekTime != 0.0f) {
+ *env << "The -s and -U options cannot both be used!\n";
+ usage();
+ }
+ if (initialAbsoluteSeekTime == NULL && initialAbsoluteSeekEndTime != NULL) {
+ *env << "The -E option requires the -U option!\n";
+ usage();
+ }
+ if (authDBForREGISTER != NULL && !createHandlerServerForREGISTERCommand) {
+ *env << "If \"-k <username> <password>\" is used, then -R (or \"-R <port-num>\") must also be used!\n";
+ usage();
+ }
+ if (tunnelOverHTTPPortNum > 0) {
+ if (streamUsingTCP) {
+ *env << "The -t and -T options cannot both be used!\n";
+ usage();
+ } else {
+ streamUsingTCP = True;
+ }
+ }
+ if (!createReceivers && notifyOnPacketArrival) {
+ *env << "Warning: Because we're not receiving stream data, the -n flag has no effect\n";
+ }
+ if (durationSlop < 0) {
+ // This parameter wasn't set, so use a default value.
+ // If we're measuring QOS stats, then don't add any slop, to avoid
+ // having 'empty' measurement intervals at the end.
+ durationSlop = qosMeasurementIntervalMS > 0 ? 0.0 : 5.0;
+ }
+
+ streamURL = argv[1];
+
+ // Create (or arrange to create) our client object:
+ if (createHandlerServerForREGISTERCommand) {
+ handlerServerForREGISTERCommand
+ = HandlerServerForREGISTERCommand::createNew(*env, continueAfterClientCreation0,
+ handlerServerForREGISTERCommandPortNum, authDBForREGISTER,
+ verbosityLevel, progName);
+ if (handlerServerForREGISTERCommand == NULL) {
+ *env << "Failed to create a server for handling incoming \"REGISTER\" commands: " << env->getResultMsg() << "\n";
+ shutdown();
+ } else {
+ *env << "Awaiting an incoming \"REGISTER\" command on port " << handlerServerForREGISTERCommand->serverPortNum() << "\n";
+ }
+ } else {
+ ourClient = createClient(*env, streamURL, verbosityLevel, progName);
+ if (ourClient == NULL) {
+ *env << "Failed to create " << clientProtocolName << " client: " << env->getResultMsg() << "\n";
+ shutdown();
+ }
+ continueAfterClientCreation1();
+ }
+
+ // All subsequent activity takes place within the event loop:
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void continueAfterClientCreation0(RTSPClient* newRTSPClient, Boolean requestStreamingOverTCP) {
+ if (newRTSPClient == NULL) return;
+
+ streamUsingTCP = requestStreamingOverTCP;
+
+ assignClient(ourClient = newRTSPClient);
+ streamURL = newRTSPClient->url();
+
+ // Having handled one "REGISTER" command (giving us a "rtsp://" URL to stream from), we don't handle any more:
+ Medium::close(handlerServerForREGISTERCommand); handlerServerForREGISTERCommand = NULL;
+
+ continueAfterClientCreation1();
+}
+
+void continueAfterClientCreation1() {
+ setUserAgentString(userAgent);
+
+ if (sendOptionsRequest) {
+ // Begin by sending an "OPTIONS" command:
+ getOptions(continueAfterOPTIONS);
+ } else {
+ continueAfterOPTIONS(NULL, 0, NULL);
+ }
+}
+
+void continueAfterOPTIONS(RTSPClient*, int resultCode, char* resultString) {
+ if (sendOptionsRequestOnly) {
+ if (resultCode != 0) {
+ *env << clientProtocolName << " \"OPTIONS\" request failed: " << resultString << "\n";
+ } else {
+ *env << clientProtocolName << " \"OPTIONS\" request returned: " << resultString << "\n";
+ }
+ shutdown();
+ }
+ delete[] resultString;
+
+ // Next, get a SDP description for the stream:
+ getSDPDescription(continueAfterDESCRIBE);
+}
+
+void continueAfterDESCRIBE(RTSPClient*, int resultCode, char* resultString) {
+ if (resultCode != 0) {
+ *env << "Failed to get a SDP description for the URL \"" << streamURL << "\": " << resultString << "\n";
+ delete[] resultString;
+ shutdown();
+ }
+
+ char* sdpDescription = resultString;
+ *env << "Opened URL \"" << streamURL << "\", returning a SDP description:\n" << sdpDescription << "\n";
+
+ // Create a media session object from this SDP description:
+ session = MediaSession::createNew(*env, sdpDescription);
+ delete[] sdpDescription;
+ if (session == NULL) {
+ *env << "Failed to create a MediaSession object from the SDP description: " << env->getResultMsg() << "\n";
+ shutdown();
+ } else if (!session->hasSubsessions()) {
+ *env << "This session has no media subsessions (i.e., no \"m=\" lines)\n";
+ shutdown();
+ }
+
+ // Then, setup the "RTPSource"s for the session:
+ MediaSubsessionIterator iter(*session);
+ MediaSubsession *subsession;
+ Boolean madeProgress = False;
+ char const* singleMediumToTest = singleMedium;
+ while ((subsession = iter.next()) != NULL) {
+ // If we've asked to receive only a single medium, then check this now:
+ if (singleMediumToTest != NULL) {
+ if (strcmp(subsession->mediumName(), singleMediumToTest) != 0) {
+ *env << "Ignoring \"" << subsession->mediumName()
+ << "/" << subsession->codecName()
+ << "\" subsession, because we've asked to receive a single " << singleMedium
+ << " session only\n";
+ continue;
+ } else {
+ // Receive this subsession only
+ singleMediumToTest = "xxxxx";
+ // this hack ensures that we get only 1 subsession of this type
+ }
+ }
+
+ if (desiredPortNum != 0) {
+ subsession->setClientPortNum(desiredPortNum);
+ desiredPortNum += 2;
+ }
+
+ if (createReceivers) {
+ if (!subsession->initiate(simpleRTPoffsetArg)) {
+ *env << "Unable to create receiver for \"" << subsession->mediumName()
+ << "/" << subsession->codecName()
+ << "\" subsession: " << env->getResultMsg() << "\n";
+ } else {
+ *env << "Created receiver for \"" << subsession->mediumName()
+ << "/" << subsession->codecName() << "\" subsession (";
+ if (subsession->rtcpIsMuxed()) {
+ *env << "client port " << subsession->clientPortNum();
+ } else {
+ *env << "client ports " << subsession->clientPortNum()
+ << "-" << subsession->clientPortNum()+1;
+ }
+ *env << ")\n";
+ madeProgress = True;
+
+ if (subsession->rtpSource() != NULL) {
+ // Because we're saving the incoming data, rather than playing
+ // it in real time, allow an especially large time threshold
+ // (1 second) for reordering misordered incoming packets:
+ unsigned const thresh = 1000000; // 1 second
+ subsession->rtpSource()->setPacketReorderingThresholdTime(thresh);
+
+ // Set the RTP source's OS socket buffer size as appropriate - either if we were explicitly asked (using -B),
+ // or if the desired FileSink buffer size happens to be larger than the current OS socket buffer size.
+ // (The latter case is a heuristic, on the assumption that if the user asked for a large FileSink buffer size,
+ // then the input data rate may be large enough to justify increasing the OS socket buffer size also.)
+ int socketNum = subsession->rtpSource()->RTPgs()->socketNum();
+ unsigned curBufferSize = getReceiveBufferSize(*env, socketNum);
+ if (socketInputBufferSize > 0 || fileSinkBufferSize > curBufferSize) {
+ unsigned newBufferSize = socketInputBufferSize > 0 ? socketInputBufferSize : fileSinkBufferSize;
+ newBufferSize = setReceiveBufferTo(*env, socketNum, newBufferSize);
+ if (socketInputBufferSize > 0) { // The user explicitly asked for the new socket buffer size; announce it:
+ *env << "Changed socket receive buffer size for the \""
+ << subsession->mediumName()
+ << "/" << subsession->codecName()
+ << "\" subsession from "
+ << curBufferSize << " to "
+ << newBufferSize << " bytes\n";
+ }
+ }
+ }
+ }
+ } else {
+ if (subsession->clientPortNum() == 0) {
+ *env << "No client port was specified for the \""
+ << subsession->mediumName()
+ << "/" << subsession->codecName()
+ << "\" subsession. (Try adding the \"-p <portNum>\" option.)\n";
+ } else {
+ madeProgress = True;
+ }
+ }
+ }
+ if (!madeProgress) shutdown();
+
+ // Perform additional 'setup' on each subsession, before playing them:
+ setupStreams();
+}
+
+MediaSubsession *subsession;
+Boolean madeProgress = False;
+void continueAfterSETUP(RTSPClient* client, int resultCode, char* resultString) {
+ if (resultCode == 0) {
+ *env << "Setup \"" << subsession->mediumName()
+ << "/" << subsession->codecName()
+ << "\" subsession (";
+ if (subsession->rtcpIsMuxed()) {
+ *env << "client port " << subsession->clientPortNum();
+ } else {
+ *env << "client ports " << subsession->clientPortNum()
+ << "-" << subsession->clientPortNum()+1;
+ }
+ *env << ")\n";
+ madeProgress = True;
+ } else {
+ *env << "Failed to setup \"" << subsession->mediumName()
+ << "/" << subsession->codecName()
+ << "\" subsession: " << resultString << "\n";
+ }
+ delete[] resultString;
+
+ if (client != NULL) sessionTimeoutParameter = client->sessionTimeoutParameter();
+
+ // Set up the next subsession, if any:
+ setupStreams();
+}
+
+void createOutputFiles(char const* periodicFilenameSuffix) {
+ char outFileName[1000];
+
+ if (outputQuickTimeFile || outputAVIFile) {
+ if (periodicFilenameSuffix[0] == '\0') {
+ // Normally (unless the '-P <interval-in-seconds>' option was given) we output to 'stdout':
+ sprintf(outFileName, "stdout");
+ } else {
+ // Otherwise output to a type-specific file name, containing "periodicFilenameSuffix":
+ char const* prefix = fileNamePrefix[0] == '\0' ? "output" : fileNamePrefix;
+ snprintf(outFileName, sizeof outFileName, "%s%s.%s", prefix, periodicFilenameSuffix,
+ outputAVIFile ? "avi" : generateMP4Format ? "mp4" : "mov");
+ }
+
+ if (outputQuickTimeFile) {
+ qtOut = QuickTimeFileSink::createNew(*env, *session, outFileName,
+ fileSinkBufferSize,
+ movieWidth, movieHeight,
+ movieFPS,
+ packetLossCompensate,
+ syncStreams,
+ generateHintTracks,
+ generateMP4Format);
+ if (qtOut == NULL) {
+ *env << "Failed to create a \"QuickTimeFileSink\" for outputting to \""
+ << outFileName << "\": " << env->getResultMsg() << "\n";
+ shutdown();
+ } else {
+ *env << "Outputting to the file: \"" << outFileName << "\"\n";
+ }
+
+ qtOut->startPlaying(sessionAfterPlaying, NULL);
+ } else { // outputAVIFile
+ aviOut = AVIFileSink::createNew(*env, *session, outFileName,
+ fileSinkBufferSize,
+ movieWidth, movieHeight,
+ movieFPS,
+ packetLossCompensate);
+ if (aviOut == NULL) {
+ *env << "Failed to create an \"AVIFileSink\" for outputting to \""
+ << outFileName << "\": " << env->getResultMsg() << "\n";
+ shutdown();
+ } else {
+ *env << "Outputting to the file: \"" << outFileName << "\"\n";
+ }
+
+ aviOut->startPlaying(sessionAfterPlaying, NULL);
+ }
+ } else {
+ // Create and start "FileSink"s for each subsession:
+ madeProgress = False;
+ MediaSubsessionIterator iter(*session);
+ while ((subsession = iter.next()) != NULL) {
+ if (subsession->readSource() == NULL) continue; // was not initiated
+
+ // Create an output file for each desired stream:
+ if (singleMedium == NULL || periodicFilenameSuffix[0] != '\0') {
+ // Output file name is
+ // "<filename-prefix><medium_name>-<codec_name>-<counter><periodicFilenameSuffix>"
+ static unsigned streamCounter = 0;
+ snprintf(outFileName, sizeof outFileName, "%s%s-%s-%d%s",
+ fileNamePrefix, subsession->mediumName(),
+ subsession->codecName(), ++streamCounter, periodicFilenameSuffix);
+ } else {
+ // When outputting a single medium only, we output to 'stdout
+ // (unless the '-P <interval-in-seconds>' option was given):
+ sprintf(outFileName, "stdout");
+ }
+
+ FileSink* fileSink = NULL;
+ Boolean createOggFileSink = False; // by default
+ if (strcmp(subsession->mediumName(), "video") == 0) {
+ if (strcmp(subsession->codecName(), "H264") == 0) {
+ // For H.264 video stream, we use a special sink that adds 'start codes',
+ // and (at the start) the SPS and PPS NAL units:
+ fileSink = H264VideoFileSink::createNew(*env, outFileName,
+ subsession->fmtp_spropparametersets(),
+ fileSinkBufferSize, oneFilePerFrame);
+ } else if (strcmp(subsession->codecName(), "H265") == 0) {
+ // For H.265 video stream, we use a special sink that adds 'start codes',
+ // and (at the start) the VPS, SPS, and PPS NAL units:
+ fileSink = H265VideoFileSink::createNew(*env, outFileName,
+ subsession->fmtp_spropvps(),
+ subsession->fmtp_spropsps(),
+ subsession->fmtp_sproppps(),
+ fileSinkBufferSize, oneFilePerFrame);
+ } else if (strcmp(subsession->codecName(), "THEORA") == 0) {
+ createOggFileSink = True;
+ }
+ } else if (strcmp(subsession->mediumName(), "audio") == 0) {
+ if (strcmp(subsession->codecName(), "AMR") == 0 ||
+ strcmp(subsession->codecName(), "AMR-WB") == 0) {
+ // For AMR audio streams, we use a special sink that inserts AMR frame hdrs:
+ fileSink = AMRAudioFileSink::createNew(*env, outFileName,
+ fileSinkBufferSize, oneFilePerFrame);
+ } else if (strcmp(subsession->codecName(), "VORBIS") == 0 ||
+ strcmp(subsession->codecName(), "OPUS") == 0) {
+ createOggFileSink = True;
+ }
+ }
+ if (createOggFileSink) {
+ fileSink = OggFileSink
+ ::createNew(*env, outFileName,
+ subsession->rtpTimestampFrequency(), subsession->fmtp_config());
+ } else if (fileSink == NULL) {
+ // Normal case:
+ fileSink = FileSink::createNew(*env, outFileName,
+ fileSinkBufferSize, oneFilePerFrame);
+ }
+ subsession->sink = fileSink;
+
+ if (subsession->sink == NULL) {
+ *env << "Failed to create FileSink for \"" << outFileName
+ << "\": " << env->getResultMsg() << "\n";
+ } else {
+ if (singleMedium == NULL) {
+ *env << "Created output file: \"" << outFileName << "\"\n";
+ } else {
+ *env << "Outputting data from the \"" << subsession->mediumName()
+ << "/" << subsession->codecName()
+ << "\" subsession to \"" << outFileName << "\"\n";
+ }
+
+ if (strcmp(subsession->mediumName(), "video") == 0 &&
+ strcmp(subsession->codecName(), "MP4V-ES") == 0 &&
+ subsession->fmtp_config() != NULL) {
+ // For MPEG-4 video RTP streams, the 'config' information
+ // from the SDP description contains useful VOL etc. headers.
+ // Insert this data at the front of the output file:
+ unsigned configLen;
+ unsigned char* configData
+ = parseGeneralConfigStr(subsession->fmtp_config(), configLen);
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ fileSink->addData(configData, configLen, timeNow);
+ delete[] configData;
+ }
+
+ subsession->sink->startPlaying(*(subsession->readSource()),
+ subsessionAfterPlaying,
+ subsession);
+
+ // Also set a handler to be called if a RTCP "BYE" arrives
+ // for this subsession:
+ if (subsession->rtcpInstance() != NULL) {
+ subsession->rtcpInstance()->setByeWithReasonHandler(subsessionByeHandler, subsession);
+ }
+
+ madeProgress = True;
+ }
+ }
+ if (!madeProgress) shutdown();
+ }
+}
+
+void createPeriodicOutputFiles() {
+ // Create a filename suffix that notes the time interval that's being recorded:
+ char periodicFileNameSuffix[100];
+ snprintf(periodicFileNameSuffix, sizeof periodicFileNameSuffix, "-%05d-%05d",
+ fileOutputSecondsSoFar, fileOutputSecondsSoFar + fileOutputInterval);
+ createOutputFiles(periodicFileNameSuffix);
+
+ // Schedule an event for writing the next output file:
+ periodicFileOutputTask
+ = env->taskScheduler().scheduleDelayedTask(fileOutputInterval*1000000,
+ (TaskFunc*)periodicFileOutputTimerHandler,
+ (void*)NULL);
+}
+
+void setupStreams() {
+ static MediaSubsessionIterator* setupIter = NULL;
+ if (setupIter == NULL) setupIter = new MediaSubsessionIterator(*session);
+ while ((subsession = setupIter->next()) != NULL) {
+ // We have another subsession left to set up:
+ if (subsession->clientPortNum() == 0) continue; // port # was not set
+
+ setupSubsession(subsession, streamUsingTCP, forceMulticastOnUnspecified, continueAfterSETUP);
+ return;
+ }
+
+ // We're done setting up subsessions.
+ delete setupIter;
+ if (!madeProgress) shutdown();
+
+ // Create output files:
+ if (createReceivers) {
+ if (fileOutputInterval > 0) {
+ createPeriodicOutputFiles();
+ } else {
+ createOutputFiles("");
+ }
+ }
+
+ // Finally, start playing each subsession, to start the data flow:
+ if (duration == 0) {
+ if (scale > 0) duration = session->playEndTime() - initialSeekTime; // use SDP end time
+ else if (scale < 0) duration = initialSeekTime;
+ }
+ if (duration < 0) duration = 0.0;
+
+ endTime = initialSeekTime;
+ if (scale > 0) {
+ if (duration <= 0) endTime = -1.0f;
+ else endTime = initialSeekTime + duration;
+ } else {
+ endTime = initialSeekTime - duration;
+ if (endTime < 0) endTime = 0.0f;
+ }
+
+ char const* absStartTime = initialAbsoluteSeekTime != NULL ? initialAbsoluteSeekTime : session->absStartTime();
+ char const* absEndTime = initialAbsoluteSeekEndTime != NULL ? initialAbsoluteSeekEndTime : session->absEndTime();
+ if (absStartTime != NULL) {
+ // Either we or the server have specified that seeking should be done by 'absolute' time:
+ startPlayingSession(session, absStartTime, absEndTime, scale, continueAfterPLAY);
+ } else {
+ // Normal case: Seek by relative time (NPT):
+ startPlayingSession(session, initialSeekTime, endTime, scale, continueAfterPLAY);
+ }
+}
+
+void continueAfterPLAY(RTSPClient*, int resultCode, char* resultString) {
+ if (resultCode != 0) {
+ *env << "Failed to start playing session: " << resultString << "\n";
+ delete[] resultString;
+ shutdown();
+ return;
+ } else {
+ *env << "Started playing session\n";
+ }
+ delete[] resultString;
+
+ if (qosMeasurementIntervalMS > 0) {
+ // Begin periodic QOS measurements:
+ beginQOSMeasurement();
+ }
+
+ // Figure out how long to delay (if at all) before shutting down, or
+ // repeating the playing
+ Boolean timerIsBeingUsed = False;
+ double secondsToDelay = duration;
+ if (duration > 0) {
+ // First, adjust "duration" based on any change to the play range (that was specified in the "PLAY" response):
+ double rangeAdjustment = (session->playEndTime() - session->playStartTime()) - (endTime - initialSeekTime);
+ if (duration + rangeAdjustment > 0.0) duration += rangeAdjustment;
+
+ timerIsBeingUsed = True;
+ double absScale = scale > 0 ? scale : -scale; // ASSERT: scale != 0
+ secondsToDelay = duration/absScale + durationSlop;
+
+ int64_t uSecsToDelay = (int64_t)(secondsToDelay*1000000.0);
+ sessionTimerTask = env->taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)sessionTimerHandler, (void*)NULL);
+ }
+
+ char const* actionString
+ = createReceivers? "Receiving streamed data":"Data is being streamed";
+ if (timerIsBeingUsed) {
+ *env << actionString
+ << " (for up to " << secondsToDelay
+ << " seconds)...\n";
+ } else {
+#ifdef USE_SIGNALS
+ pid_t ourPid = getpid();
+ *env << actionString
+ << " (signal with \"kill -HUP " << (int)ourPid
+ << "\" or \"kill -USR1 " << (int)ourPid
+ << "\" to terminate)...\n";
+#else
+ *env << actionString << "...\n";
+#endif
+ }
+
+ sessionTimeoutBrokenServerTask = NULL;
+
+ // Watch for incoming packets (if desired):
+ checkForPacketArrival(NULL);
+ checkInterPacketGaps(NULL);
+ checkSessionTimeoutBrokenServer(NULL);
+}
+
+void closeMediaSinks() {
+ Medium::close(qtOut); qtOut = NULL;
+ Medium::close(aviOut); aviOut = NULL;
+
+ if (session == NULL) return;
+ MediaSubsessionIterator iter(*session);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ Medium::close(subsession->sink);
+ subsession->sink = NULL;
+ }
+}
+
+void subsessionAfterPlaying(void* clientData) {
+ // Begin by closing this media subsession's stream:
+ MediaSubsession* subsession = (MediaSubsession*)clientData;
+ Medium::close(subsession->sink);
+ subsession->sink = NULL;
+
+ // Next, check whether *all* subsessions' streams have now been closed:
+ MediaSession& session = subsession->parentSession();
+ MediaSubsessionIterator iter(session);
+ while ((subsession = iter.next()) != NULL) {
+ if (subsession->sink != NULL) return; // this subsession is still active
+ }
+
+ // All subsessions' streams have now been closed
+ sessionAfterPlaying();
+}
+
+void subsessionByeHandler(void* clientData, char const* reason) {
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ unsigned secsDiff = timeNow.tv_sec - startTime.tv_sec;
+
+ MediaSubsession* subsession = (MediaSubsession*)clientData;
+ *env << "Received RTCP \"BYE\"";
+ if (reason != NULL) {
+ *env << " (reason:\"" << reason << "\")";
+ delete[] (char*)reason;
+ }
+ *env << " on \"" << subsession->mediumName()
+ << "/" << subsession->codecName()
+ << "\" subsession (after " << secsDiff
+ << " seconds)\n";
+
+ // Act now as if the subsession had closed:
+ subsessionAfterPlaying(subsession);
+}
+
+void sessionAfterPlaying(void* /*clientData*/) {
+ if (!playContinuously) {
+ shutdown(0);
+ } else {
+ // We've been asked to play the stream(s) over again.
+ // First, reset state from the current session:
+ if (env != NULL) {
+ // Keep this running: env->taskScheduler().unscheduleDelayedTask(periodicFileOutputTask);
+ env->taskScheduler().unscheduleDelayedTask(sessionTimerTask);
+ env->taskScheduler().unscheduleDelayedTask(sessionTimeoutBrokenServerTask);
+ env->taskScheduler().unscheduleDelayedTask(arrivalCheckTimerTask);
+ env->taskScheduler().unscheduleDelayedTask(interPacketGapCheckTimerTask);
+ env->taskScheduler().unscheduleDelayedTask(qosMeasurementTimerTask);
+ }
+ totNumPacketsReceived = ~0;
+
+ startPlayingSession(session, initialSeekTime, endTime, scale, continueAfterPLAY);
+ }
+}
+
+void sessionTimerHandler(void* /*clientData*/) {
+ sessionTimerTask = NULL;
+
+ sessionAfterPlaying();
+}
+
+void periodicFileOutputTimerHandler(void* /*clientData*/) {
+ periodicFileOutputTask = NULL;
+ fileOutputSecondsSoFar += fileOutputInterval;
+
+ // First, close the existing output files:
+ closeMediaSinks();
+
+ // Then, create new output files:
+ createPeriodicOutputFiles();
+}
+
+class qosMeasurementRecord {
+public:
+ qosMeasurementRecord(struct timeval const& startTime, RTPSource* src)
+ : fSource(src), fNext(NULL),
+ kbits_per_second_min(1e20), kbits_per_second_max(0),
+ kBytesTotal(0.0),
+ packet_loss_fraction_min(1.0), packet_loss_fraction_max(0.0),
+ totNumPacketsReceived(0), totNumPacketsExpected(0) {
+ measurementEndTime = measurementStartTime = startTime;
+
+ RTPReceptionStatsDB::Iterator statsIter(src->receptionStatsDB());
+ // Assume that there's only one SSRC source (usually the case):
+ RTPReceptionStats* stats = statsIter.next(True);
+ if (stats != NULL) {
+ kBytesTotal = stats->totNumKBytesReceived();
+ totNumPacketsReceived = stats->totNumPacketsReceived();
+ totNumPacketsExpected = stats->totNumPacketsExpected();
+ }
+ }
+ virtual ~qosMeasurementRecord() { delete fNext; }
+
+ void periodicQOSMeasurement(struct timeval const& timeNow);
+
+public:
+ RTPSource* fSource;
+ qosMeasurementRecord* fNext;
+
+public:
+ struct timeval measurementStartTime, measurementEndTime;
+ double kbits_per_second_min, kbits_per_second_max;
+ double kBytesTotal;
+ double packet_loss_fraction_min, packet_loss_fraction_max;
+ unsigned totNumPacketsReceived, totNumPacketsExpected;
+};
+
+static qosMeasurementRecord* qosRecordHead = NULL;
+
+static void periodicQOSMeasurement(void* clientData); // forward
+
+static unsigned nextQOSMeasurementUSecs;
+
+static void scheduleNextQOSMeasurement() {
+ nextQOSMeasurementUSecs += qosMeasurementIntervalMS*1000;
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ unsigned timeNowUSecs = timeNow.tv_sec*1000000 + timeNow.tv_usec;
+ int usecsToDelay = nextQOSMeasurementUSecs - timeNowUSecs;
+
+ qosMeasurementTimerTask = env->taskScheduler().scheduleDelayedTask(
+ usecsToDelay, (TaskFunc*)periodicQOSMeasurement, (void*)NULL);
+}
+
+static void periodicQOSMeasurement(void* /*clientData*/) {
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+
+ for (qosMeasurementRecord* qosRecord = qosRecordHead;
+ qosRecord != NULL; qosRecord = qosRecord->fNext) {
+ qosRecord->periodicQOSMeasurement(timeNow);
+ }
+
+ // Do this again later:
+ scheduleNextQOSMeasurement();
+}
+
+void qosMeasurementRecord
+::periodicQOSMeasurement(struct timeval const& timeNow) {
+ unsigned secsDiff = timeNow.tv_sec - measurementEndTime.tv_sec;
+ int usecsDiff = timeNow.tv_usec - measurementEndTime.tv_usec;
+ double timeDiff = secsDiff + usecsDiff/1000000.0;
+ measurementEndTime = timeNow;
+
+ RTPReceptionStatsDB::Iterator statsIter(fSource->receptionStatsDB());
+ // Assume that there's only one SSRC source (usually the case):
+ RTPReceptionStats* stats = statsIter.next(True);
+ if (stats != NULL) {
+ double kBytesTotalNow = stats->totNumKBytesReceived();
+ double kBytesDeltaNow = kBytesTotalNow - kBytesTotal;
+ kBytesTotal = kBytesTotalNow;
+
+ double kbpsNow = timeDiff == 0.0 ? 0.0 : 8*kBytesDeltaNow/timeDiff;
+ if (kbpsNow < 0.0) kbpsNow = 0.0; // in case of roundoff error
+ if (kbpsNow < kbits_per_second_min) kbits_per_second_min = kbpsNow;
+ if (kbpsNow > kbits_per_second_max) kbits_per_second_max = kbpsNow;
+
+ unsigned totReceivedNow = stats->totNumPacketsReceived();
+ unsigned totExpectedNow = stats->totNumPacketsExpected();
+ unsigned deltaReceivedNow = totReceivedNow - totNumPacketsReceived;
+ unsigned deltaExpectedNow = totExpectedNow - totNumPacketsExpected;
+ totNumPacketsReceived = totReceivedNow;
+ totNumPacketsExpected = totExpectedNow;
+
+ double lossFractionNow = deltaExpectedNow == 0 ? 0.0
+ : 1.0 - deltaReceivedNow/(double)deltaExpectedNow;
+ //if (lossFractionNow < 0.0) lossFractionNow = 0.0; //reordering can cause
+ if (lossFractionNow < packet_loss_fraction_min) {
+ packet_loss_fraction_min = lossFractionNow;
+ }
+ if (lossFractionNow > packet_loss_fraction_max) {
+ packet_loss_fraction_max = lossFractionNow;
+ }
+ }
+}
+
+void beginQOSMeasurement() {
+ // Set up a measurement record for each active subsession:
+ struct timeval startTime;
+ gettimeofday(&startTime, NULL);
+ nextQOSMeasurementUSecs = startTime.tv_sec*1000000 + startTime.tv_usec;
+ qosMeasurementRecord* qosRecordTail = NULL;
+ MediaSubsessionIterator iter(*session);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ RTPSource* src = subsession->rtpSource();
+ if (src == NULL) continue;
+
+ qosMeasurementRecord* qosRecord
+ = new qosMeasurementRecord(startTime, src);
+ if (qosRecordHead == NULL) qosRecordHead = qosRecord;
+ if (qosRecordTail != NULL) qosRecordTail->fNext = qosRecord;
+ qosRecordTail = qosRecord;
+ }
+
+ // Then schedule the first of the periodic measurements:
+ scheduleNextQOSMeasurement();
+}
+
+void printQOSData(int exitCode) {
+ *env << "begin_QOS_statistics\n";
+
+ // Print out stats for each active subsession:
+ qosMeasurementRecord* curQOSRecord = qosRecordHead;
+ if (session != NULL) {
+ MediaSubsessionIterator iter(*session);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ RTPSource* src = subsession->rtpSource();
+ if (src == NULL) continue;
+
+ *env << "subsession\t" << subsession->mediumName()
+ << "/" << subsession->codecName() << "\n";
+
+ unsigned numPacketsReceived = 0, numPacketsExpected = 0;
+
+ if (curQOSRecord != NULL) {
+ numPacketsReceived = curQOSRecord->totNumPacketsReceived;
+ numPacketsExpected = curQOSRecord->totNumPacketsExpected;
+ }
+ *env << "num_packets_received\t" << numPacketsReceived << "\n";
+ *env << "num_packets_lost\t" << int(numPacketsExpected - numPacketsReceived) << "\n";
+
+ if (curQOSRecord != NULL) {
+ unsigned secsDiff = curQOSRecord->measurementEndTime.tv_sec
+ - curQOSRecord->measurementStartTime.tv_sec;
+ int usecsDiff = curQOSRecord->measurementEndTime.tv_usec
+ - curQOSRecord->measurementStartTime.tv_usec;
+ double measurementTime = secsDiff + usecsDiff/1000000.0;
+ *env << "elapsed_measurement_time\t" << measurementTime << "\n";
+
+ *env << "kBytes_received_total\t" << curQOSRecord->kBytesTotal << "\n";
+
+ *env << "measurement_sampling_interval_ms\t" << qosMeasurementIntervalMS << "\n";
+
+ if (curQOSRecord->kbits_per_second_max == 0) {
+ // special case: we didn't receive any data:
+ *env <<
+ "kbits_per_second_min\tunavailable\n"
+ "kbits_per_second_ave\tunavailable\n"
+ "kbits_per_second_max\tunavailable\n";
+ } else {
+ *env << "kbits_per_second_min\t" << curQOSRecord->kbits_per_second_min << "\n";
+ *env << "kbits_per_second_ave\t"
+ << (measurementTime == 0.0 ? 0.0 : 8*curQOSRecord->kBytesTotal/measurementTime) << "\n";
+ *env << "kbits_per_second_max\t" << curQOSRecord->kbits_per_second_max << "\n";
+ }
+
+ *env << "packet_loss_percentage_min\t" << 100*curQOSRecord->packet_loss_fraction_min << "\n";
+ double packetLossFraction = numPacketsExpected == 0 ? 1.0
+ : 1.0 - numPacketsReceived/(double)numPacketsExpected;
+ if (packetLossFraction < 0.0) packetLossFraction = 0.0;
+ *env << "packet_loss_percentage_ave\t" << 100*packetLossFraction << "\n";
+ *env << "packet_loss_percentage_max\t"
+ << (packetLossFraction == 1.0 ? 100.0 : 100*curQOSRecord->packet_loss_fraction_max) << "\n";
+
+ RTPReceptionStatsDB::Iterator statsIter(src->receptionStatsDB());
+ // Assume that there's only one SSRC source (usually the case):
+ RTPReceptionStats* stats = statsIter.next(True);
+ if (stats != NULL) {
+ *env << "inter_packet_gap_ms_min\t" << stats->minInterPacketGapUS()/1000.0 << "\n";
+ struct timeval totalGaps = stats->totalInterPacketGaps();
+ double totalGapsMS = totalGaps.tv_sec*1000.0 + totalGaps.tv_usec/1000.0;
+ unsigned totNumPacketsReceived = stats->totNumPacketsReceived();
+ *env << "inter_packet_gap_ms_ave\t"
+ << (totNumPacketsReceived == 0 ? 0.0 : totalGapsMS/totNumPacketsReceived) << "\n";
+ *env << "inter_packet_gap_ms_max\t" << stats->maxInterPacketGapUS()/1000.0 << "\n";
+ }
+
+ curQOSRecord = curQOSRecord->fNext;
+ }
+ }
+ }
+
+ *env << "end_QOS_statistics\n";
+ delete qosRecordHead;
+}
+
+Boolean areAlreadyShuttingDown = False;
+int shutdownExitCode;
+void shutdown(int exitCode) {
+ if (areAlreadyShuttingDown) return; // in case we're called after receiving a RTCP "BYE" while in the middle of a "TEARDOWN".
+ areAlreadyShuttingDown = True;
+
+ shutdownExitCode = exitCode;
+ if (env != NULL) {
+ env->taskScheduler().unscheduleDelayedTask(periodicFileOutputTask);
+ env->taskScheduler().unscheduleDelayedTask(sessionTimerTask);
+ env->taskScheduler().unscheduleDelayedTask(sessionTimeoutBrokenServerTask);
+ env->taskScheduler().unscheduleDelayedTask(arrivalCheckTimerTask);
+ env->taskScheduler().unscheduleDelayedTask(interPacketGapCheckTimerTask);
+ env->taskScheduler().unscheduleDelayedTask(qosMeasurementTimerTask);
+ }
+
+ if (qosMeasurementIntervalMS > 0) {
+ printQOSData(exitCode);
+ }
+
+ // Teardown, then shutdown, any outstanding RTP/RTCP subsessions
+ Boolean shutdownImmediately = True; // by default
+ if (session != NULL) {
+ RTSPClient::responseHandler* responseHandlerForTEARDOWN = NULL; // unless:
+ if (waitForResponseToTEARDOWN) {
+ shutdownImmediately = False;
+ responseHandlerForTEARDOWN = continueAfterTEARDOWN;
+ }
+ tearDownSession(session, responseHandlerForTEARDOWN);
+ }
+
+ if (shutdownImmediately) continueAfterTEARDOWN(NULL, 0, NULL);
+}
+
+void continueAfterTEARDOWN(RTSPClient*, int /*resultCode*/, char* resultString) {
+ delete[] resultString;
+
+ // Now that we've stopped any more incoming data from arriving, close our output files:
+ closeMediaSinks();
+ Medium::close(session);
+
+ // Finally, shut down our client:
+ delete ourAuthenticator;
+ delete authDBForREGISTER;
+ Medium::close(ourClient);
+
+ // Adios...
+ exit(shutdownExitCode);
+}
+
+void signalHandlerShutdown(int /*sig*/) {
+ *env << "Got shutdown signal\n";
+ waitForResponseToTEARDOWN = False; // to ensure that we end, even if the server does not respond to our TEARDOWN
+ shutdown(0);
+}
+
+void checkForPacketArrival(void* /*clientData*/) {
+ arrivalCheckTimerTask = NULL;
+ if (!notifyOnPacketArrival) return; // we're not checking
+
+ // Check each subsession, to see whether it has received data packets:
+ unsigned numSubsessionsChecked = 0;
+ unsigned numSubsessionsWithReceivedData = 0;
+ unsigned numSubsessionsThatHaveBeenSynced = 0;
+
+ MediaSubsessionIterator iter(*session);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ RTPSource* src = subsession->rtpSource();
+ if (src == NULL) continue;
+ ++numSubsessionsChecked;
+
+ if (src->receptionStatsDB().numActiveSourcesSinceLastReset() > 0) {
+ // At least one data packet has arrived
+ ++numSubsessionsWithReceivedData;
+ }
+ if (src->hasBeenSynchronizedUsingRTCP()) {
+ ++numSubsessionsThatHaveBeenSynced;
+ }
+ }
+
+ unsigned numSubsessionsToCheck = numSubsessionsChecked;
+ // Special case for "QuickTimeFileSink"s and "AVIFileSink"s:
+ // They might not use all of the input sources:
+ if (qtOut != NULL) {
+ numSubsessionsToCheck = qtOut->numActiveSubsessions();
+ } else if (aviOut != NULL) {
+ numSubsessionsToCheck = aviOut->numActiveSubsessions();
+ }
+
+ Boolean notifyTheUser;
+ if (!syncStreams) {
+ notifyTheUser = numSubsessionsWithReceivedData > 0; // easy case
+ } else {
+ notifyTheUser = numSubsessionsWithReceivedData >= numSubsessionsToCheck
+ && numSubsessionsThatHaveBeenSynced == numSubsessionsChecked;
+ // Note: A subsession with no active sources is considered to be synced
+ }
+ if (notifyTheUser) {
+ struct timeval timeNow;
+ gettimeofday(&timeNow, NULL);
+ char timestampStr[100];
+ sprintf(timestampStr, "%ld%03ld", timeNow.tv_sec, (long)(timeNow.tv_usec/1000));
+ *env << (syncStreams ? "Synchronized d" : "D")
+ << "ata packets have begun arriving [" << timestampStr << "]\007\n";
+ return;
+ }
+
+ // No luck, so reschedule this check again, after a delay:
+ int uSecsToDelay = 100000; // 100 ms
+ arrivalCheckTimerTask
+ = env->taskScheduler().scheduleDelayedTask(uSecsToDelay,
+ (TaskFunc*)checkForPacketArrival, NULL);
+}
+
+void checkInterPacketGaps(void* /*clientData*/) {
+ interPacketGapCheckTimerTask = NULL;
+ if (interPacketGapMaxTime == 0) return; // we're not checking
+
+ // Check each subsession, counting up how many packets have been received:
+ unsigned newTotNumPacketsReceived = 0;
+
+ MediaSubsessionIterator iter(*session);
+ MediaSubsession* subsession;
+ while ((subsession = iter.next()) != NULL) {
+ RTPSource* src = subsession->rtpSource();
+ if (src == NULL) continue;
+ newTotNumPacketsReceived += src->receptionStatsDB().totNumPacketsReceived();
+ }
+
+ if (newTotNumPacketsReceived == totNumPacketsReceived) {
+ // No additional packets have been received since the last time we
+ // checked, so end this stream:
+ *env << "Closing session, because we stopped receiving packets.\n";
+ interPacketGapCheckTimerTask = NULL;
+ sessionAfterPlaying();
+ } else {
+ totNumPacketsReceived = newTotNumPacketsReceived;
+ // Check again, after the specified delay:
+ interPacketGapCheckTimerTask
+ = env->taskScheduler().scheduleDelayedTask(interPacketGapMaxTime*1000000,
+ (TaskFunc*)checkInterPacketGaps, NULL);
+ }
+}
+
+void checkSessionTimeoutBrokenServer(void* /*clientData*/) {
+ if (!sendKeepAlivesToBrokenServers) return; // we're not checking
+
+ // Send an "OPTIONS" request, starting with the second call
+ if (sessionTimeoutBrokenServerTask != NULL) {
+ getOptions(NULL);
+ }
+
+ unsigned sessionTimeout = sessionTimeoutParameter == 0 ? 60/*default*/ : sessionTimeoutParameter;
+ unsigned secondsUntilNextKeepAlive = sessionTimeout <= 5 ? 1 : sessionTimeout - 5;
+ // Reduce the interval a little, to be on the safe side
+
+ sessionTimeoutBrokenServerTask
+ = env->taskScheduler().scheduleDelayedTask(secondsUntilNextKeepAlive*1000000,
+ (TaskFunc*)checkSessionTimeoutBrokenServer, NULL);
+
+}
diff --git a/testProgs/playCommon.hh b/testProgs/playCommon.hh
new file mode 100644
index 0000000..b9be910
--- /dev/null
+++ b/testProgs/playCommon.hh
@@ -0,0 +1,47 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A common framework, used for the "openRTSP" and "playSIP" applications
+// Interfaces
+
+#include "liveMedia.hh"
+
+extern Medium* createClient(UsageEnvironment& env, char const* URL, int verbosityLevel, char const* applicationName);
+extern void assignClient(Medium* client);
+extern RTSPClient* ourRTSPClient;
+extern SIPClient* ourSIPClient;
+
+extern void getOptions(RTSPClient::responseHandler* afterFunc);
+
+extern void getSDPDescription(RTSPClient::responseHandler* afterFunc);
+
+extern void setupSubsession(MediaSubsession* subsession, Boolean streamUsingTCP, Boolean forceMulticastOnUnspecified, RTSPClient::responseHandler* afterFunc);
+
+extern void startPlayingSession(MediaSession* session, double start, double end, float scale, RTSPClient::responseHandler* afterFunc);
+
+extern void startPlayingSession(MediaSession* session, char const* absStartTime, char const* absEndTime, float scale, RTSPClient::responseHandler* afterFunc);
+ // For playing by 'absolute' time (using strings of the form "YYYYMMDDTHHMMSSZ" or "YYYYMMDDTHHMMSS.<frac>Z"
+
+extern void tearDownSession(MediaSession* session, RTSPClient::responseHandler* afterFunc);
+
+extern void setUserAgentString(char const* userAgentString);
+
+extern Authenticator* ourAuthenticator;
+extern Boolean allowProxyServers;
+extern Boolean controlConnectionUsesTCP;
+extern Boolean supportCodecSelection;
+extern char const* clientProtocolName;
+extern unsigned statusCode;
diff --git a/testProgs/playSIP.cpp b/testProgs/playSIP.cpp
new file mode 100644
index 0000000..45bc928
--- /dev/null
+++ b/testProgs/playSIP.cpp
@@ -0,0 +1,184 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A SIP client test program that opens a SIP URL argument,
+// and extracts the data from each incoming RTP stream.
+
+#include "playCommon.hh"
+#include "SIPClient.hh"
+
+static char* getLine(char* startOfLine) {
+ // returns the start of the next line, or NULL if none
+ for (char* ptr = startOfLine; *ptr != '\0'; ++ptr) {
+ if (*ptr == '\r' || *ptr == '\n') {
+ // We found the end of the line
+ *ptr++ = '\0';
+ if (*ptr == '\n') ++ptr;
+ return ptr;
+ }
+ }
+
+ return NULL;
+}
+
+SIPClient* ourSIPClient = NULL;
+Medium* createClient(UsageEnvironment& env, char const* /*url*/, int verbosityLevel, char const* applicationName) {
+ // First, trim any directory prefixes from "applicationName":
+ char const* suffix = &applicationName[strlen(applicationName)];
+ while (suffix != applicationName) {
+ if (*suffix == '/' || *suffix == '\\') {
+ applicationName = ++suffix;
+ break;
+ }
+ --suffix;
+ }
+
+ extern unsigned char desiredAudioRTPPayloadFormat;
+ extern char* mimeSubtype;
+ return ourSIPClient = SIPClient::createNew(env, desiredAudioRTPPayloadFormat, mimeSubtype, verbosityLevel, applicationName);
+}
+
+// The followign function is implemented, but is not used for "playSIP":
+void assignClient(Medium* /*client*/) {
+}
+
+void getOptions(RTSPClient::responseHandler* afterFunc) {
+ ourSIPClient->envir().setResultMsg("NOT SUPPORTED IN CLIENT");
+ afterFunc(NULL, -1, strDup(ourSIPClient->envir().getResultMsg()));
+}
+
+void getSDPDescription(RTSPClient::responseHandler* afterFunc) {
+ extern char* proxyServerName;
+ if (proxyServerName != NULL) {
+ // Tell the SIP client about the proxy:
+ NetAddressList addresses(proxyServerName);
+ if (addresses.numAddresses() == 0) {
+ ourSIPClient->envir() << "Failed to find network address for \"" << proxyServerName << "\"\n";
+ } else {
+ NetAddress address = *(addresses.firstAddress());
+ unsigned proxyServerAddress // later, allow for IPv6 #####
+ = *(unsigned*)(address.data());
+ extern unsigned short proxyServerPortNum;
+ if (proxyServerPortNum == 0) proxyServerPortNum = 5060; // default
+
+ ourSIPClient->setProxyServer(proxyServerAddress, proxyServerPortNum);
+ }
+ }
+
+ extern unsigned short desiredPortNum;
+ unsigned short clientStartPortNum = desiredPortNum;
+ if (clientStartPortNum == 0) clientStartPortNum = 8000; // default
+ ourSIPClient->setClientStartPortNum(clientStartPortNum);
+
+ extern char const* streamURL;
+ char const* username = ourAuthenticator == NULL ? NULL : ourAuthenticator->username();
+ char const* password = ourAuthenticator == NULL ? NULL : ourAuthenticator->password();
+ char* result;
+ if (username != NULL && password != NULL) {
+ result = ourSIPClient->inviteWithPassword(streamURL, username, password);
+ } else {
+ result = ourSIPClient->invite(streamURL);
+ }
+
+ int resultCode = result == NULL ? -1 : 0;
+ afterFunc(NULL, resultCode, strDup(result));
+}
+
+void setupSubsession(MediaSubsession* subsession, Boolean /*streamUsingTCP*/, Boolean /*forceMulticastOnUnspecified*/,RTSPClient::responseHandler* afterFunc) {
+ subsession->setSessionId("mumble"); // anything that's non-NULL will work
+
+ ////////// BEGIN hack code that should really be implemented in SIPClient //////////
+ // Parse the "Transport:" header parameters:
+ // We do not send audio, but we need port for RTCP
+ char* serverAddressStr;
+ portNumBits serverPortNum;
+ unsigned char rtpChannelId, rtcpChannelId;
+
+ rtpChannelId = rtcpChannelId = 0xff;
+ serverPortNum = 0;
+ serverAddressStr = NULL;
+
+ char* sdp = strDup(ourSIPClient->getInviteSdpReply());
+
+ char* lineStart;
+ char* nextLineStart = sdp;
+ while (1) {
+ lineStart = nextLineStart;
+ if (lineStart == NULL) {
+ break;
+ }
+ nextLineStart = getLine(lineStart);
+
+ char* toTagStr = strDupSize(lineStart);
+
+ if (sscanf(lineStart, "m=audio %[^/\r\n]", toTagStr) == 1) {
+ sscanf(toTagStr, "%hu", &serverPortNum);
+ } else if (sscanf(lineStart, "c=IN IP4 %[^/\r\n]", toTagStr) == 1) {
+ serverAddressStr = strDup(toTagStr);
+ }
+ delete[] toTagStr;
+ }
+
+ if(sdp != NULL) {
+ delete[] sdp;
+ }
+
+ delete[] subsession->connectionEndpointName();
+ subsession->connectionEndpointName() = serverAddressStr;
+ subsession->serverPortNum = serverPortNum;
+ subsession->rtpChannelId = rtpChannelId;
+ subsession->rtcpChannelId = rtcpChannelId;
+
+ // Set the RTP and RTCP sockets' destination address and port from the information in the SETUP response (if present):
+ netAddressBits destAddress = subsession->connectionEndpointAddress();
+ if (destAddress != 0) {
+ subsession->setDestinations(destAddress);
+ }
+ ////////// END hack code that should really be implemented in SIPClient //////////
+
+ afterFunc(NULL, 0, NULL);
+}
+
+void startPlayingSession(MediaSession* /*session*/, double /*start*/, double /*end*/, float /*scale*/, RTSPClient::responseHandler* afterFunc) {
+ if (ourSIPClient->sendACK()) {
+ //##### This isn't quite right, because we should really be allowing
+ //##### for the possibility of this ACK getting lost, by retransmitting
+ //##### it *each time* we get a 2xx response from the server.
+ afterFunc(NULL, 0, NULL);
+ } else {
+ afterFunc(NULL, -1, strDup(ourSIPClient->envir().getResultMsg()));
+ }
+}
+void startPlayingSession(MediaSession* /*session*/, const char* /*start*/, const char* /*end*/, float /*scale*/, RTSPClient::responseHandler* afterFunc) {
+ startPlayingSession(NULL,(double)0,(double)0,0,afterFunc);
+}
+
+void tearDownSession(MediaSession* /*session*/, RTSPClient::responseHandler* afterFunc) {
+ if (ourSIPClient == NULL || ourSIPClient->sendBYE()) {
+ afterFunc(NULL, 0, NULL);
+ } else {
+ afterFunc(NULL, -1, strDup(ourSIPClient->envir().getResultMsg()));
+ }
+}
+
+void setUserAgentString(char const* userAgentString) {
+ ourSIPClient->setUserAgentString(userAgentString);
+}
+
+Boolean allowProxyServers = True;
+Boolean controlConnectionUsesTCP = False;
+Boolean supportCodecSelection = True;
+char const* clientProtocolName = "SIP";
diff --git a/testProgs/registerRTSPStream.cpp b/testProgs/registerRTSPStream.cpp
new file mode 100644
index 0000000..a469003
--- /dev/null
+++ b/testProgs/registerRTSPStream.cpp
@@ -0,0 +1,102 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A demonstration application that uses our custom RTSP "REGISTER" command to register a stream
+// (given by "rtsp://" URL) with a RTSP client or proxy server
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+
+char const* programName;
+UsageEnvironment* env;
+
+Boolean requestStreamingViaTCP = False;
+char const* username = NULL;
+char const* password = NULL;
+
+void registerResponseHandler(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ Medium::close(rtspClient);
+
+ // We're done:
+ exit(0);
+}
+
+void usage() {
+ *env << "usage: " << programName << " [-t] [-u <username> <password>] "
+ "<remote-client-or-proxy-server-name-or-address> <remote-client-or-proxy-server-port-number> <rtsp-URL-to-register>"
+ " [proxy-URL-suffix]\n";
+ exit(1);
+}
+
+int main(int argc, char const** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Parse command-line options and arguments:
+ // (Unfortunately we can't use getopt() here; Windoze doesn't have it)
+ programName = argv[0];
+ while (argc > 2) {
+ char const* const opt = argv[1];
+ if (opt[0] != '-') break;
+ switch (opt[1]) {
+ case 't': { // ask the remote client to access the stream via TCP instead of UDP
+ requestStreamingViaTCP = True;
+ break;
+ }
+
+ case 'u': { // specify a username and password
+ if (argc < 4) usage(); // there's no argv[3] (for the "password")
+ username = argv[2];
+ password = argv[3];
+ argv+=2; argc-=2;
+ break;
+ }
+
+ default: {
+ usage();
+ break;
+ }
+ }
+
+ ++argv; --argc;
+ }
+ if (argc != 4 && argc != 5) usage();
+
+ char const* remoteClientNameOrAddress = argv[1];
+
+ portNumBits remoteClientPortNum;
+ if (sscanf(argv[2], "%hu", &remoteClientPortNum) != 1 || remoteClientPortNum == 0 || remoteClientPortNum == 0xFFFF) usage();
+
+ char const* rtspURLToRegister = argv[3];
+
+ char const* proxyURLSuffix = argc == 5 ? argv[4] : NULL;
+
+ Authenticator* ourAuthenticator = username == NULL ? NULL : new Authenticator(username, password);
+
+ // We have the command-line arguments. Send the command:
+
+ RTSPRegisterSender::createNew(*env, remoteClientNameOrAddress, remoteClientPortNum, rtspURLToRegister,
+ registerResponseHandler, ourAuthenticator,
+ requestStreamingViaTCP, proxyURLSuffix, False/*reuseConnection*/,
+ 1/*verbosityLevel*/, programName);
+ // Note: This object will be deleted later, by the response handler
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
diff --git a/testProgs/sapWatch.cpp b/testProgs/sapWatch.cpp
new file mode 100644
index 0000000..8b5e258
--- /dev/null
+++ b/testProgs/sapWatch.cpp
@@ -0,0 +1,73 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A program that receives and prints SDP/SAP announcements
+// (on the default SDP/SAP directory: 224.2.127.254/9875)
+
+#include "Groupsock.hh"
+#include "GroupsockHelper.hh"
+#include "BasicUsageEnvironment.hh"
+#include <stdio.h>
+
+static unsigned const maxPacketSize = 65536;
+static unsigned char packet[maxPacketSize+1];
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
+
+
+ // Create a 'groupsock' for the input multicast group,port:
+ char const* sessionAddressStr = "224.2.127.254";
+ struct in_addr sessionAddress;
+ sessionAddress.s_addr = our_inet_addr(sessionAddressStr);
+
+ const Port port(9875);
+ const unsigned char ttl = 0; // we're only reading from this mcast group
+
+ Groupsock inputGroupsock(*env, sessionAddress, port, ttl);
+
+ // Start reading and printing incoming packets
+ // (Because this is the only thing we do, we can just do this
+ // synchronously, in a loop, so we don't need to set up an asynchronous
+ // event handler like we do in most of the other test programs.)
+ unsigned packetSize;
+ struct sockaddr_in fromAddress;
+ while (inputGroupsock.handleRead(packet, maxPacketSize,
+ packetSize, fromAddress)) {
+ printf("\n[packet from %s (%d bytes)]\n", AddressString(fromAddress).val(), packetSize);
+
+ // Ignore the first 8 bytes (SAP header).
+ if (packetSize < 8) {
+ *env << "Ignoring short packet from " << AddressString(fromAddress).val() << "%s!\n";
+ continue;
+ }
+
+ // convert "application/sdp\0" -> "application/sdp\0x20"
+ // or all other nonprintable characters to blank, except new line
+ unsigned idx = 8;
+ while (idx < packetSize) {
+ if (packet[idx] < 0x20 && packet[idx] != '\n') packet[idx] = 0x20;
+ idx++;
+ }
+
+ packet[packetSize] = '\0'; // just in case
+ printf("%s", (char*)(packet+8));
+ }
+
+ return 0; // only to prevent compiler warning
+}
diff --git a/testProgs/testAMRAudioStreamer.cpp b/testProgs/testAMRAudioStreamer.cpp
new file mode 100644
index 0000000..e5dc0af
--- /dev/null
+++ b/testProgs/testAMRAudioStreamer.cpp
@@ -0,0 +1,122 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads an AMR audio file (as defined in RFC 3267)
+// and streams it using RTP
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+#include "GroupsockHelper.hh"
+
+UsageEnvironment* env;
+char const* inputFileName = "test.amr";
+AMRAudioFileSource* audioSource;
+RTPSink* audioSink;
+
+void play(); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create 'groupsocks' for RTP and RTCP:
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
+ // Note: This is a multicast address. If you wish instead to stream
+ // using unicast, then you should use the "testOnDemandRTSPServer"
+ // test program - not this test program - as a model.
+
+ const unsigned short rtpPortNum = 16666;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+ const unsigned char ttl = 255;
+
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+ Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
+ rtpGroupsock.multicastSendOnly(); // we're a SSM source
+ Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
+ rtcpGroupsock.multicastSendOnly(); // we're a SSM source
+
+ // Create a 'AMR Audio RTP' sink from the RTP 'groupsock':
+ audioSink = AMRAudioRTPSink::createNew(*env, &rtpGroupsock, 96);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidth = 10; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ RTCPInstance* rtcp
+ = RTCPInstance::createNew(*env, &rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ audioSink, NULL /* we're a server */,
+ True /* we're a SSM source */);
+ // Note: This starts RTCP running automatically
+
+ // Create and start a RTSP server to serve this stream.
+ RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testAMRAudioStreamer\"",
+ True /*SSM*/);
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*audioSink, rtcp));
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+
+ // Start the streaming:
+ *env << "Beginning streaming...\n";
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done reading from file\n";
+
+ audioSink->stopPlaying();
+ Medium::close(audioSource);
+ // Note that this also closes the input file that this source read from.
+
+ play();
+}
+
+void play() {
+ // Open the input file as an 'AMR audio file source':
+ AMRAudioFileSource* audioSource
+ = AMRAudioFileSource::createNew(*env, inputFileName);
+ if (audioSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as an AMR audio file source: "
+ << env->getResultMsg() << "\n";
+ exit(1);
+ }
+
+ // Finally, start playing:
+ *env << "Beginning to read from file...\n";
+ audioSink->startPlaying(*audioSource, afterPlaying, audioSink);
+}
diff --git a/testProgs/testDVVideoStreamer.cpp b/testProgs/testDVVideoStreamer.cpp
new file mode 100644
index 0000000..7f03936
--- /dev/null
+++ b/testProgs/testDVVideoStreamer.cpp
@@ -0,0 +1,128 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads a DV Video Elementary Stream file,
+// and streams it using RTP
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+#include "GroupsockHelper.hh"
+
+UsageEnvironment* env;
+char const* inputFileName = "test.dv";
+DVVideoStreamFramer* videoSource;
+RTPSink* videoSink;
+
+void play(); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create 'groupsocks' for RTP and RTCP:
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
+ // Note: This is a multicast address. If you wish instead to stream
+ // using unicast, then you should use the "testOnDemandRTSPServer"
+ // test program - not this test program - as a model.
+
+ const unsigned short rtpPortNum = 18888;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+ const unsigned char ttl = 255;
+
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+ Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
+ rtpGroupsock.multicastSendOnly(); // we're a SSM source
+ Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
+ rtcpGroupsock.multicastSendOnly(); // we're a SSM source
+
+ // Create a 'DV Video RTP' sink from the RTP 'groupsock':
+ // (But first, make sure that its buffers will be large enough to handle the huge size of DV frames (as big as 288000).)
+ OutPacketBuffer::maxSize = 300000;
+ videoSink = DVVideoRTPSink::createNew(*env, &rtpGroupsock, 96);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidth = 50000; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ RTCPInstance* rtcp
+ = RTCPInstance::createNew(*env, &rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ videoSink, NULL /* we're a server */,
+ True /* we're a SSM source */);
+ // Note: This starts RTCP running automatically
+
+ RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testDVVideoStreamer\"",
+ True /*SSM*/);
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+
+ // Start the streaming:
+ *env << "Beginning streaming...\n";
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done reading from file\n";
+
+ videoSink->stopPlaying();
+ Medium::close(videoSource);
+ // Note that this also closes the input file that this source read from.
+
+ // Start playing once again:
+ play();
+}
+
+void play() {
+ // Open the input file as a 'byte-stream file source':
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (fileSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ FramedSource* videoES = fileSource;
+
+ // Create a framer for the Video Elementary Stream:
+ videoSource = DVVideoStreamFramer::createNew(*env, videoES);
+
+ // Finally, start playing:
+ *env << "Beginning to read from file...\n";
+ videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
+}
diff --git a/testProgs/testGSMStreamer.cpp b/testProgs/testGSMStreamer.cpp
new file mode 100644
index 0000000..706fe3e
--- /dev/null
+++ b/testProgs/testGSMStreamer.cpp
@@ -0,0 +1,168 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that streams GSM audio via RTP/RTCP
+// main program
+
+// NOTE: This program assumes the existence of a (currently nonexistent)
+// function called "createNewGSMAudioSource()".
+
+#include "liveMedia.hh"
+#include "GroupsockHelper.hh"
+
+#include "BasicUsageEnvironment.hh"
+
+////////// Main program //////////
+
+// To stream using "source-specific multicast" (SSM), uncomment the following:
+//#define USE_SSM 1
+#ifdef USE_SSM
+Boolean const isSSM = True;
+#else
+Boolean const isSSM = False;
+#endif
+
+// To set up an internal RTSP server, uncomment the following:
+//#define IMPLEMENT_RTSP_SERVER 1
+// (Note that this RTSP server works for multicast only)
+
+#ifdef IMPLEMENT_RTSP_SERVER
+RTSPServer* rtspServer;
+#endif
+
+UsageEnvironment* env;
+
+void afterPlaying(void* clientData); // forward
+
+// A structure to hold the state of the current session.
+// It is used in the "afterPlaying()" function to clean up the session.
+struct sessionState_t {
+ FramedSource* source;
+ RTPSink* sink;
+ RTCPInstance* rtcpInstance;
+ Groupsock* rtpGroupsock;
+ Groupsock* rtcpGroupsock;
+} sessionState;
+
+void play(); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create 'groupsocks' for RTP and RTCP:
+ char* destinationAddressStr
+#ifdef USE_SSM
+ = "232.255.42.42";
+#else
+ = "239.255.42.42";
+ // Note: This is a multicast address. If you wish to stream using
+ // unicast instead, then replace this string with the unicast address
+ // of the (single) destination. (You may also need to make a similar
+ // change to the receiver program.)
+#endif
+ const unsigned short rtpPortNum = 6666;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+ const unsigned char ttl = 1; // low, in case routers don't admin scope
+
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = our_inet_addr(destinationAddressStr);
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+ sessionState.rtpGroupsock
+ = new Groupsock(*env, destinationAddress, rtpPort, ttl);
+ sessionState.rtcpGroupsock
+ = new Groupsock(*env, destinationAddress, rtcpPort, ttl);
+#ifdef USE_SSM
+ sessionState.rtpGroupsock->multicastSendOnly();
+ sessionState.rtcpGroupsock->multicastSendOnly();
+#endif
+
+ // Create a 'GSM RTP' sink from the RTP 'groupsock':
+ sessionState.sink
+ = GSMAudioRTPSink::createNew(*env, sessionState.rtpGroupsock);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidth = 160; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ sessionState.rtcpInstance
+ = RTCPInstance::createNew(*env, sessionState.rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ sessionState.sink, NULL /* we're a server */,
+ isSSM);
+ // Note: This starts RTCP running automatically
+
+#ifdef IMPLEMENT_RTSP_SERVER
+ rtspServer = RTSPServer::createNew(*env, 8554);
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "%s\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "testStream", "GSM input",
+ "Session streamed by \"testGSMStreamer\"", isSSM);
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*sessionState.sink, sessionState.rtcpInstance));
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+#endif
+
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+ return 0; // only to prevent compiler warning
+}
+
+void play() {
+ // Open the input source:
+ extern FramedSource* createNewGSMAudioSource(UsageEnvironment&);
+ sessionState.source = createNewGSMAudioSource(*env);
+ if (sessionState.source == NULL) {
+ *env << "Failed to create GSM source\n";
+ exit(1);
+ }
+
+ // Finally, start the streaming:
+ *env << "Beginning streaming...\n";
+ sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL);
+}
+
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done streaming\n";
+
+ sessionState.sink->stopPlaying();
+
+ // End this loop by closing the media:
+#ifdef IMPLEMENT_RTSP_SERVER
+ Medium::close(rtspServer);
+#endif
+ Medium::close(sessionState.rtcpInstance);
+ Medium::close(sessionState.sink);
+ delete sessionState.rtpGroupsock;
+ Medium::close(sessionState.source);
+ delete sessionState.rtcpGroupsock;
+
+ // And start another loop:
+ play();
+}
diff --git a/testProgs/testH264VideoStreamer.cpp b/testProgs/testH264VideoStreamer.cpp
new file mode 100644
index 0000000..ab3b942
--- /dev/null
+++ b/testProgs/testH264VideoStreamer.cpp
@@ -0,0 +1,132 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads a H.264 Elementary Stream video file
+// and streams it using RTP
+// main program
+//
+// NOTE: For this application to work, the H.264 Elementary Stream video file *must* contain SPS and PPS NAL units,
+// ideally at or near the start of the file. These SPS and PPS NAL units are used to specify 'configuration' information
+// that is set in the output stream's SDP description (by the RTSP server that is built in to this application).
+// Note also that - unlike some other "*Streamer" demo applications - the resulting stream can be received only using a
+// RTSP client (such as "openRTSP")
+
+#include <liveMedia.hh>
+#include <BasicUsageEnvironment.hh>
+#include <GroupsockHelper.hh>
+
+UsageEnvironment* env;
+char const* inputFileName = "test.264";
+H264VideoStreamFramer* videoSource;
+RTPSink* videoSink;
+
+void play(); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create 'groupsocks' for RTP and RTCP:
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
+ // Note: This is a multicast address. If you wish instead to stream
+ // using unicast, then you should use the "testOnDemandRTSPServer"
+ // test program - not this test program - as a model.
+
+ const unsigned short rtpPortNum = 18888;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+ const unsigned char ttl = 255;
+
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+ Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
+ rtpGroupsock.multicastSendOnly(); // we're a SSM source
+ Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
+ rtcpGroupsock.multicastSendOnly(); // we're a SSM source
+
+ // Create a 'H264 Video RTP' sink from the RTP 'groupsock':
+ OutPacketBuffer::maxSize = 100000;
+ videoSink = H264VideoRTPSink::createNew(*env, &rtpGroupsock, 96);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ RTCPInstance* rtcp
+ = RTCPInstance::createNew(*env, &rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ videoSink, NULL /* we're a server */,
+ True /* we're a SSM source */);
+ // Note: This starts RTCP running automatically
+
+ RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testH264VideoStreamer\"",
+ True /*SSM*/);
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+
+ // Start the streaming:
+ *env << "Beginning streaming...\n";
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done reading from file\n";
+ videoSink->stopPlaying();
+ Medium::close(videoSource);
+ // Note that this also closes the input file that this source read from.
+
+ // Start playing once again:
+ play();
+}
+
+void play() {
+ // Open the input file as a 'byte-stream file source':
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (fileSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ FramedSource* videoES = fileSource;
+
+ // Create a framer for the Video Elementary Stream:
+ videoSource = H264VideoStreamFramer::createNew(*env, videoES);
+
+ // Finally, start playing:
+ *env << "Beginning to read from file...\n";
+ videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
+}
diff --git a/testProgs/testH264VideoToHLSSegments.cpp b/testProgs/testH264VideoToHLSSegments.cpp
new file mode 100644
index 0000000..faaa44d
--- /dev/null
+++ b/testProgs/testH264VideoToHLSSegments.cpp
@@ -0,0 +1,106 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A program that converts a H.264 (Elementary Stream) video file into sequence of
+// HLS (HTTP Live Streaming) segments, plus a ".m3u8" file that can be accessed via a web browser.
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+
+#define OUR_HLS_SEGMENTATION_DURATION 6
+#define OUR_HLS_FILENAME_PREFIX "hlsTest"
+char const* inputFileName = "in.264";
+FILE* ourM3U8Fid = NULL;
+
+void segmentationCallback(void* clientData, char const* segmentFileName, double segmentDuration); // forward
+void afterPlaying(void* clientData); // forward
+
+UsageEnvironment* env;
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Open the input file as a 'byte-stream file source':
+ FramedSource* inputSource = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (inputSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ // Create a 'framer' filter for this file source, to generate presentation times for each NAL unit:
+ H264VideoStreamFramer* framer
+ = H264VideoStreamFramer::createNew(*env, inputSource,
+ True/*includeStartCodeInOutput*/,
+ True/*insertAccessUnitDelimiters*/);
+
+ // Then create a filter that packs the H.264 video data into a Transport Stream:
+ MPEG2TransportStreamFromESSource* tsFrames = MPEG2TransportStreamFromESSource::createNew(*env);
+ tsFrames->addNewVideoSource(framer, 5/*mpegVersion: H.264*/);
+
+ // Create a 'HLS Segmenter' as the media sink:
+ MediaSink* outputSink
+ = HLSSegmenter::createNew(*env, OUR_HLS_SEGMENTATION_DURATION, OUR_HLS_FILENAME_PREFIX,
+ segmentationCallback);
+
+ // Finally, start playing:
+ *env << "Beginning to read...\n";
+ outputSink->startPlaying(*tsFrames, afterPlaying, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void segmentationCallback(void* /*clientData*/,
+ char const* segmentFileName, double segmentDuration) {
+ if (ourM3U8Fid == NULL) {
+ // Open our ".m3u8" file for output, and write the prefix:
+ char* ourM3U8FileName = new char[strlen(OUR_HLS_FILENAME_PREFIX) + 5/*strlen(".m3u8")*/ + 1];
+ sprintf(ourM3U8FileName, "%s.m3u8", OUR_HLS_FILENAME_PREFIX);
+ ourM3U8Fid = fopen(ourM3U8FileName, "wb");
+
+ fprintf(ourM3U8Fid,
+ "#EXTM3U\n"
+ "#EXT-X-VERSION:3\n"
+ "#EXT-X-INDEPENDENT-SEGMENTS\n"
+ "#EXT-X-TARGETDURATION:%u\n"
+ "#EXT-X-MEDIA-SEQUENCE:0\n",
+ OUR_HLS_SEGMENTATION_DURATION);
+ }
+
+ // Update our ".m3u8" file with information about this most recent segment:
+ fprintf(ourM3U8Fid,
+ "#EXTINF:%f,\n"
+ "%s\n",
+ segmentDuration,
+ segmentFileName);
+
+ fprintf(stderr, "Wrote segment \"%s\" (duration: %f seconds)\n", segmentFileName, segmentDuration);
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...Done reading\n";
+
+ // Complete and close our ".m3u8" file:
+ fprintf(ourM3U8Fid, "#EXT-X-ENDLIST\n");
+
+ fprintf(stderr, "Wrote %s.m3u8\n", OUR_HLS_FILENAME_PREFIX);
+ exit(0);
+}
diff --git a/testProgs/testH264VideoToTransportStream.cpp b/testProgs/testH264VideoToTransportStream.cpp
new file mode 100644
index 0000000..d400f0c
--- /dev/null
+++ b/testProgs/testH264VideoToTransportStream.cpp
@@ -0,0 +1,70 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A program that converts a H.264 (Elementary Stream) video file into a Transport Stream file.
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+
+char const* inputFileName = "in.264";
+char const* outputFileName = "out.ts";
+
+void afterPlaying(void* clientData); // forward
+
+UsageEnvironment* env;
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Open the input file as a 'byte-stream file source':
+ FramedSource* inputSource = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (inputSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ // Create a 'framer' filter for this file source, to generate presentation times for each NAL unit:
+ H264VideoStreamFramer* framer = H264VideoStreamFramer::createNew(*env, inputSource, True/*includeStartCodeInOutput*/);
+
+ // Then create a filter that packs the H.264 video data into a Transport Stream:
+ MPEG2TransportStreamFromESSource* tsFrames = MPEG2TransportStreamFromESSource::createNew(*env);
+ tsFrames->addNewVideoSource(framer, 5/*mpegVersion: H.264*/);
+
+ // Open the output file as a 'file sink':
+ MediaSink* outputSink = FileSink::createNew(*env, outputFileName);
+ if (outputSink == NULL) {
+ *env << "Unable to open file \"" << outputFileName << "\" as a file sink\n";
+ exit(1);
+ }
+
+ // Finally, start playing:
+ *env << "Beginning to read...\n";
+ outputSink->startPlaying(*tsFrames, afterPlaying, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "Done reading.\n";
+ *env << "Wrote output file: \"" << outputFileName << "\"\n";
+ exit(0);
+}
diff --git a/testProgs/testH265VideoStreamer.cpp b/testProgs/testH265VideoStreamer.cpp
new file mode 100644
index 0000000..ff6d3d5
--- /dev/null
+++ b/testProgs/testH265VideoStreamer.cpp
@@ -0,0 +1,133 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads a H.265 Elementary Stream video file
+// and streams it using RTP
+// main program
+//
+// NOTE: For this application to work, the H.265 Elementary Stream video file *must* contain
+// VPS, SPS and PPS NAL units, ideally at or near the start of the file.
+// These VPS, SPS and PPS NAL units are used to specify 'configuration' information that is set in
+// the output stream's SDP description (by the RTSP server that is built in to this application).
+// Note also that - unlike some other "*Streamer" demo applications - the resulting stream can be
+// received only using a RTSP client (such as "openRTSP")
+
+#include <liveMedia.hh>
+#include <BasicUsageEnvironment.hh>
+#include <GroupsockHelper.hh>
+
+UsageEnvironment* env;
+char const* inputFileName = "test.265";
+H265VideoStreamFramer* videoSource;
+RTPSink* videoSink;
+
+void play(); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create 'groupsocks' for RTP and RTCP:
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
+ // Note: This is a multicast address. If you wish instead to stream
+ // using unicast, then you should use the "testOnDemandRTSPServer"
+ // test program - not this test program - as a model.
+
+ const unsigned short rtpPortNum = 18888;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+ const unsigned char ttl = 255;
+
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+ Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
+ rtpGroupsock.multicastSendOnly(); // we're a SSM source
+ Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
+ rtcpGroupsock.multicastSendOnly(); // we're a SSM source
+
+ // Create a 'H265 Video RTP' sink from the RTP 'groupsock':
+ OutPacketBuffer::maxSize = 100000;
+ videoSink = H265VideoRTPSink::createNew(*env, &rtpGroupsock, 96);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ RTCPInstance* rtcp
+ = RTCPInstance::createNew(*env, &rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ videoSink, NULL /* we're a server */,
+ True /* we're a SSM source */);
+ // Note: This starts RTCP running automatically
+
+ RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testH265VideoStreamer\"",
+ True /*SSM*/);
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+
+ // Start the streaming:
+ *env << "Beginning streaming...\n";
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done reading from file\n";
+ videoSink->stopPlaying();
+ Medium::close(videoSource);
+ // Note that this also closes the input file that this source read from.
+
+ // Start playing once again:
+ play();
+}
+
+void play() {
+ // Open the input file as a 'byte-stream file source':
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (fileSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ FramedSource* videoES = fileSource;
+
+ // Create a framer for the Video Elementary Stream:
+ videoSource = H265VideoStreamFramer::createNew(*env, videoES);
+
+ // Finally, start playing:
+ *env << "Beginning to read from file...\n";
+ videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
+}
diff --git a/testProgs/testH265VideoToTransportStream.cpp b/testProgs/testH265VideoToTransportStream.cpp
new file mode 100644
index 0000000..d7e21b3
--- /dev/null
+++ b/testProgs/testH265VideoToTransportStream.cpp
@@ -0,0 +1,70 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A program that converts a H.265 (Elementary Stream) video file into a Transport Stream file.
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+
+char const* inputFileName = "in.265";
+char const* outputFileName = "out.ts";
+
+void afterPlaying(void* clientData); // forward
+
+UsageEnvironment* env;
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Open the input file as a 'byte-stream file source':
+ FramedSource* inputSource = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (inputSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ // Create a 'framer' filter for this file source, to generate presentation times for each NAL unit:
+ H265VideoStreamFramer* framer = H265VideoStreamFramer::createNew(*env, inputSource, True/*includeStartCodeInOutput*/);
+
+ // Then create a filter that packs the H.265 video data into a Transport Stream:
+ MPEG2TransportStreamFromESSource* tsFrames = MPEG2TransportStreamFromESSource::createNew(*env);
+ tsFrames->addNewVideoSource(framer, 6/*mpegVersion: H.265*/);
+
+ // Open the output file as a 'file sink':
+ MediaSink* outputSink = FileSink::createNew(*env, outputFileName);
+ if (outputSink == NULL) {
+ *env << "Unable to open file \"" << outputFileName << "\" as a file sink\n";
+ exit(1);
+ }
+
+ // Finally, start playing:
+ *env << "Beginning to read...\n";
+ outputSink->startPlaying(*tsFrames, afterPlaying, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "Done reading.\n";
+ *env << "Wrote output file: \"" << outputFileName << "\"\n";
+ exit(0);
+}
diff --git a/testProgs/testMKVSplitter.cpp b/testProgs/testMKVSplitter.cpp
new file mode 100644
index 0000000..2ebfb72
--- /dev/null
+++ b/testProgs/testMKVSplitter.cpp
@@ -0,0 +1,137 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads a ".mkv" (i.e., Matroska) file, demultiplexes each track
+// (video, audio, subtitles), and outputs each track to a file.
+// main program
+
+#include <liveMedia.hh>
+#include <BasicUsageEnvironment.hh>
+
+UsageEnvironment* env;
+char const* programName;
+char const* inputFileName;
+
+// An array of structures representing the state of the video, audio, and subtitle tracks:
+static struct {
+ unsigned trackNumber;
+ FramedSource* source;
+ FileSink* sink;
+} trackState[3];
+
+void onMatroskaFileCreation(MatroskaFile* newFile, void* clientData); // forward
+
+void usage() {
+ *env << "usage: " << programName << " <input-Matroska-or-WebM-file-name>\n";
+ exit(1);
+}
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Parse the command line:
+ programName = argv[0];
+ if (argc != 2) usage();
+ inputFileName = argv[1];
+
+ // Arrange to create a "MatroskaFile" object for the specified file.
+ // (Note that this object is not created immediately, but instead via a callback.)
+ MatroskaFile::createNew(*env, inputFileName, onMatroskaFileCreation, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void play(); // forward
+
+void onMatroskaFileCreation(MatroskaFile* matroskaFile, void* /*clientData*/) {
+ // Create a new demultiplexor for the file:
+ MatroskaDemux* matroskaDemux = matroskaFile->newDemux();
+
+ // Create source streams and file sinks for each preferred track;
+
+ unsigned numActiveTracks = 0;
+ for (unsigned i = 0; i < 3; ++i) {
+ unsigned trackNumber;
+ trackState[i].source = matroskaDemux->newDemuxedTrack(trackNumber);
+ trackState[i].trackNumber = trackNumber;
+ trackState[i].sink = NULL; // by default; may get changed below
+
+ if (trackState[i].source == NULL) continue;
+
+ char const* mimeType = matroskaFile->trackMIMEType(trackNumber);
+ if (mimeType == NULL || mimeType[0] == '\0') continue;
+ fprintf(stderr, "#####@@@@@ MatroskaDemuxedTrack for mimeType %s is %p\n", mimeType, trackState[i].source);
+
+ // Create the file name from "mimeType" by replacing "/" with "-", and adding the
+ // track number at the end:
+ char* fileName = new char[strlen(mimeType) + 100/*more than enough space*/];
+ sprintf(fileName, "%s-%d", mimeType, trackNumber);
+ for (unsigned j = 0; fileName[j] != '\0'; ++j) {
+ if (fileName[j] == '/') {
+ fileName[j] = '-';
+ break;
+ }
+ }
+
+ trackState[i].sink
+ = matroskaFile->createFileSinkForTrackNumber(trackNumber, fileName);
+ if (trackState[i].sink != NULL) {
+ ++numActiveTracks;
+ fprintf(stderr, "Created output file \"%s\" for track %d\n", fileName, trackNumber);
+ }
+ }
+
+ if (numActiveTracks == 0) {
+ *env << "Error: The Matroska file \"" << inputFileName << "\" has no streamable tracks\n";
+ *env << "(Perhaps the file does not exist, or is not a 'Matroska' file.)\n";
+ exit(1);
+ }
+
+ // Start the streaming:
+ play();
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done reading from file\n";
+
+ // Stop playing all sinks, then close the source streams
+ // (which will also close the demultiplexor itself):
+ unsigned i;
+ for (i = 0; i < 3; ++i) {
+ if (trackState[i].sink != NULL) trackState[i].sink->stopPlaying();
+ Medium::close(trackState[i].source); trackState[i].source = NULL;
+ }
+
+ // Finally, close the sinks:
+ for (i = 0; i < 3; ++i) Medium::close(trackState[i].sink);
+
+ exit(0);
+}
+
+void play() {
+ *env << "Beginning to read from file...\n";
+
+ // Start playing each track's RTP sink from its corresponding source:
+ for (unsigned i = 0; i < 3; ++i) {
+ if (trackState[i].sink != NULL && trackState[i].source != NULL) {
+ trackState[i].sink->startPlaying(*trackState[i].source, afterPlaying, NULL);
+ }
+ }
+}
diff --git a/testProgs/testMKVStreamer.cpp b/testProgs/testMKVStreamer.cpp
new file mode 100644
index 0000000..f46b9e2
--- /dev/null
+++ b/testProgs/testMKVStreamer.cpp
@@ -0,0 +1,178 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads a ".mkv" (i.e., Matroska) file, demultiplexes each track
+// (video, audio, subtitles), and streams each track using RTP multicast.
+// main program
+
+#include <liveMedia.hh>
+#include <BasicUsageEnvironment.hh>
+#include <GroupsockHelper.hh>
+
+UsageEnvironment* env;
+char const* inputFileName = "test.mkv";
+struct in_addr destinationAddress;
+RTSPServer* rtspServer;
+ServerMediaSession* sms;
+MatroskaFile* matroskaFile;
+MatroskaDemux* matroskaDemux;
+
+// An array of structures representing the state of the video, audio, and subtitle tracks:
+static struct {
+ unsigned trackNumber;
+ FramedSource* source;
+ RTPSink* sink;
+ RTCPInstance* rtcp;
+} trackState[3];
+
+void onMatroskaFileCreation(MatroskaFile* newFile, void* clientData); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Define our destination (multicast) IP address:
+ destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
+ // Note: This is a multicast address. If you wish instead to stream
+ // using unicast, then you should use the "testOnDemandRTSPServer"
+ // test program - not this test program - as a model.
+
+ // Create our RTSP server. (Receivers will need to use RTSP to access the stream.)
+ rtspServer = RTSPServer::createNew(*env, 8554);
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ sms = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testMKVStreamer\"",
+ True /*SSM*/);
+
+ // Arrange to create a "MatroskaFile" object for the specified file.
+ // (Note that this object is not created immediately, but instead via a callback.)
+ MatroskaFile::createNew(*env, inputFileName, onMatroskaFileCreation, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void play(); // forward
+
+void onMatroskaFileCreation(MatroskaFile* newFile, void* /*clientData*/) {
+ matroskaFile = newFile;
+
+ // Create a new demultiplexor for the file:
+ matroskaDemux = matroskaFile->newDemux();
+
+ // Create source streams, "RTPSink"s, and "RTCPInstance"s for each preferred track;
+ unsigned short rtpPortNum = 44444;
+ const unsigned char ttl = 255;
+
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+
+ for (unsigned i = 0; i < 3; ++i) {
+ unsigned trackNumber;
+ FramedSource* baseSource = matroskaDemux->newDemuxedTrack(trackNumber);
+ trackState[i].trackNumber = trackNumber;
+
+ unsigned estBitrate, numFiltersInFrontOfTrack;
+ trackState[i].source = matroskaFile
+ ->createSourceForStreaming(baseSource, trackNumber, estBitrate, numFiltersInFrontOfTrack);
+ trackState[i].sink = NULL; // by default; may get changed below
+ trackState[i].rtcp = NULL; // ditto
+
+ if (trackState[i].source != NULL) {
+ Groupsock* rtpGroupsock = new Groupsock(*env, destinationAddress, rtpPortNum, ttl);
+ Groupsock* rtcpGroupsock = new Groupsock(*env, destinationAddress, rtpPortNum+1, ttl);
+ rtpPortNum += 2;
+
+ trackState[i].sink
+ = matroskaFile->createRTPSinkForTrackNumber(trackNumber, rtpGroupsock, 96+i);
+ if (trackState[i].sink != NULL) {
+ if (trackState[i].sink->estimatedBitrate() > 0) {
+ estBitrate = trackState[i].sink->estimatedBitrate(); // hack
+ }
+ trackState[i].rtcp
+ = RTCPInstance::createNew(*env, rtcpGroupsock, estBitrate, CNAME,
+ trackState[i].sink, NULL /* we're a server */,
+ True /* we're a SSM source */);
+ // Note: This starts RTCP running automatically
+
+ // Having set up a track for streaming, add it to our RTSP server's "ServerMediaSession":
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*trackState[i].sink, trackState[i].rtcp));
+ }
+ }
+ }
+
+ if (sms->numSubsessions() == 0) {
+ *env << "Error: The Matroska file \"" << inputFileName << "\" has no streamable tracks\n";
+ *env << "(Perhaps the file does not exist, or is not a 'Matroska' file.)\n";
+ exit(1);
+ }
+
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+
+ // Start the streaming:
+ play();
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done reading from file\n";
+
+ // Stop playing all "RTPSink"s, then close the source streams
+ // (which will also close the demultiplexor itself):
+ unsigned i;
+ for (i = 0; i < 3; ++i) {
+ if (trackState[i].sink != NULL) trackState[i].sink->stopPlaying();
+ Medium::close(trackState[i].source); trackState[i].source = NULL;
+ }
+
+ // Create a new demultiplexor from our Matroska file, then new data sources for each track:
+ matroskaDemux = matroskaFile->newDemux();
+ for (i = 0; i < 3; ++i) {
+ if (trackState[i].trackNumber != 0) {
+ FramedSource* baseSource
+ = matroskaDemux->newDemuxedTrackByTrackNumber(trackState[i].trackNumber);
+
+ unsigned estBitrate, numFiltersInFrontOfTrack;
+ trackState[i].source = matroskaFile
+ ->createSourceForStreaming(baseSource, trackState[i].trackNumber,
+ estBitrate, numFiltersInFrontOfTrack);
+ }
+ }
+
+ // Start playing once again:
+ play();
+}
+
+void play() {
+ *env << "Beginning to read from file...\n";
+
+ // Start playing each track's RTP sink from its corresponding source:
+ for (unsigned i = 0; i < 3; ++i) {
+ if (trackState[i].sink != NULL && trackState[i].source != NULL) {
+ trackState[i].sink->startPlaying(*trackState[i].source, afterPlaying, NULL);
+ }
+ }
+}
diff --git a/testProgs/testMP3-using-ADUs.sdp b/testProgs/testMP3-using-ADUs.sdp
new file mode 100644
index 0000000..d164914
--- /dev/null
+++ b/testProgs/testMP3-using-ADUs.sdp
@@ -0,0 +1,10 @@
+v=0
+o=- 49452 4 IN IP4 127.0.0.1
+s=Test MP3 session
+i=Parameters for the session streamed by "testMP3Streamer"
+t=0 0
+a=tool:testMP3Streamer
+a=type:broadcast
+m=audio 6666 RTP/AVP 96
+c=IN IP4 239.255.42.42/127
+a=rtpmap:96 mpa-robust/90000
diff --git a/testProgs/testMP3.sdp b/testProgs/testMP3.sdp
new file mode 100644
index 0000000..750ba79
--- /dev/null
+++ b/testProgs/testMP3.sdp
@@ -0,0 +1,9 @@
+v=0
+o=- 49452 4 IN IP4 127.0.0.1
+s=Test MP3 session
+i=Parameters for the session streamed by "testMP3Streamer"
+t=0 0
+a=tool:testMP3Streamer
+a=type:broadcast
+m=audio 6666 RTP/AVP 14
+c=IN IP4 239.255.42.42/127
diff --git a/testProgs/testMP3Receiver.cpp b/testProgs/testMP3Receiver.cpp
new file mode 100644
index 0000000..773e7bd
--- /dev/null
+++ b/testProgs/testMP3Receiver.cpp
@@ -0,0 +1,150 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that receives a RTP/RTCP multicast MP3 stream,
+// and outputs the resulting MP3 file stream to 'stdout'
+// main program
+
+#include "liveMedia.hh"
+#include "GroupsockHelper.hh"
+
+#include "BasicUsageEnvironment.hh"
+
+// To receive a stream of 'ADUs' rather than raw MP3 frames, uncomment this:
+//#define STREAM_USING_ADUS 1
+// (For more information about ADUs and interleaving,
+// see <http://www.live555.com/rtp-mp3/>)
+
+// To receive a "source-specific multicast" (SSM) stream, uncomment this:
+//#define USE_SSM 1
+
+void afterPlaying(void* clientData); // forward
+
+// A structure to hold the state of the current session.
+// It is used in the "afterPlaying()" function to clean up the session.
+struct sessionState_t {
+ FramedSource* source;
+ FileSink* sink;
+ RTCPInstance* rtcpInstance;
+} sessionState;
+
+UsageEnvironment* env;
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create the data sink for 'stdout':
+ sessionState.sink = FileSink::createNew(*env, "stdout");
+ // Note: The string "stdout" is handled as a special case.
+ // A real file name could have been used instead.
+
+ // Create 'groupsocks' for RTP and RTCP:
+ char const* sessionAddressStr
+#ifdef USE_SSM
+ = "232.255.42.42";
+#else
+ = "239.255.42.42";
+ // Note: If the session is unicast rather than multicast,
+ // then replace this string with "0.0.0.0"
+#endif
+ const unsigned short rtpPortNum = 6666;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+#ifndef USE_SSM
+ const unsigned char ttl = 1; // low, in case routers don't admin scope
+#endif
+
+ struct in_addr sessionAddress;
+ sessionAddress.s_addr = our_inet_addr(sessionAddressStr);
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+#ifdef USE_SSM
+ char* sourceAddressStr = "aaa.bbb.ccc.ddd";
+ // replace this with the real source address
+ struct in_addr sourceFilterAddress;
+ sourceFilterAddress.s_addr = our_inet_addr(sourceAddressStr);
+
+ Groupsock rtpGroupsock(*env, sessionAddress, sourceFilterAddress, rtpPort);
+ Groupsock rtcpGroupsock(*env, sessionAddress, sourceFilterAddress, rtcpPort);
+ rtcpGroupsock.changeDestinationParameters(sourceFilterAddress,0,~0);
+ // our RTCP "RR"s are sent back using unicast
+#else
+ Groupsock rtpGroupsock(*env, sessionAddress, rtpPort, ttl);
+ Groupsock rtcpGroupsock(*env, sessionAddress, rtcpPort, ttl);
+#endif
+
+ RTPSource* rtpSource;
+#ifndef STREAM_USING_ADUS
+ // Create the data source: a "MPEG Audio RTP source"
+ rtpSource = MPEG1or2AudioRTPSource::createNew(*env, &rtpGroupsock);
+#else
+ // Create the data source: a "MP3 *ADU* RTP source"
+ unsigned char rtpPayloadFormat = 96; // a dynamic payload type
+ rtpSource
+ = MP3ADURTPSource::createNew(*env, &rtpGroupsock, rtpPayloadFormat);
+#endif
+
+ // Create (and start) a 'RTCP instance' for the RTP source:
+ const unsigned estimatedSessionBandwidth = 160; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ sessionState.rtcpInstance
+ = RTCPInstance::createNew(*env, &rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ NULL /* we're a client */, rtpSource);
+ // Note: This starts RTCP running automatically
+
+ sessionState.source = rtpSource;
+#ifdef STREAM_USING_ADUS
+ // Add a filter that deinterleaves the ADUs after depacketizing them:
+ sessionState.source
+ = MP3ADUdeinterleaver::createNew(*env, sessionState.source);
+ if (sessionState.source == NULL) {
+ *env << "Unable to create an ADU deinterleaving filter for the source\n";
+ exit(1);
+ }
+
+ // Add another filter that converts these ADUs to MP3s:
+ sessionState.source
+ = MP3FromADUSource::createNew(*env, sessionState.source);
+ if (sessionState.source == NULL) {
+ *env << "Unable to create an ADU->MP3 filter for the source\n";
+ exit(1);
+ }
+#endif
+
+ // Finally, start receiving the multicast stream:
+ *env << "Beginning receiving multicast stream...\n";
+ sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done receiving\n";
+
+ // End by closing the media:
+ Medium::close(sessionState.rtcpInstance); // Note: Sends a RTCP BYE
+ Medium::close(sessionState.sink);
+ Medium::close(sessionState.source);
+}
diff --git a/testProgs/testMP3Streamer.cpp b/testProgs/testMP3Streamer.cpp
new file mode 100644
index 0000000..c3247d1
--- /dev/null
+++ b/testProgs/testMP3Streamer.cpp
@@ -0,0 +1,199 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that streams a MP3 file via RTP/RTCP
+// main program
+
+#include "liveMedia.hh"
+#include "GroupsockHelper.hh"
+
+#include "BasicUsageEnvironment.hh"
+
+// To stream using 'ADUs' rather than raw MP3 frames, uncomment the following:
+//#define STREAM_USING_ADUS 1
+// To also reorder ADUs before streaming, uncomment the following:
+//#define INTERLEAVE_ADUS 1
+// (For more information about ADUs and interleaving,
+// see <http://www.live555.com/rtp-mp3/>)
+
+// To stream using "source-specific multicast" (SSM), uncomment the following:
+//#define USE_SSM 1
+#ifdef USE_SSM
+Boolean const isSSM = True;
+#else
+Boolean const isSSM = False;
+#endif
+
+// To set up an internal RTSP server, uncomment the following:
+//#define IMPLEMENT_RTSP_SERVER 1
+// (Note that this RTSP server works for multicast only)
+
+#ifdef IMPLEMENT_RTSP_SERVER
+RTSPServer* rtspServer;
+#endif
+
+UsageEnvironment* env;
+
+// A structure to hold the state of the current session.
+// It is used in the "afterPlaying()" function to clean up the session.
+struct sessionState_t {
+ FramedSource* source;
+ RTPSink* sink;
+ RTCPInstance* rtcpInstance;
+ Groupsock* rtpGroupsock;
+ Groupsock* rtcpGroupsock;
+} sessionState;
+
+char const* inputFileName = "test.mp3";
+
+void play(); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create 'groupsocks' for RTP and RTCP:
+ char const* destinationAddressStr
+#ifdef USE_SSM
+ = "232.255.42.42";
+#else
+ = "239.255.42.42";
+ // Note: This is a multicast address. If you wish to stream using
+ // unicast instead, then replace this string with the unicast address
+ // of the (single) destination. (You may also need to make a similar
+ // change to the receiver program.)
+#endif
+ const unsigned short rtpPortNum = 6666;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+ const unsigned char ttl = 1; // low, in case routers don't admin scope
+
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = our_inet_addr(destinationAddressStr);
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+ sessionState.rtpGroupsock
+ = new Groupsock(*env, destinationAddress, rtpPort, ttl);
+ sessionState.rtcpGroupsock
+ = new Groupsock(*env, destinationAddress, rtcpPort, ttl);
+#ifdef USE_SSM
+ sessionState.rtpGroupsock->multicastSendOnly();
+ sessionState.rtcpGroupsock->multicastSendOnly();
+#endif
+
+ // Create a 'MP3 RTP' sink from the RTP 'groupsock':
+#ifdef STREAM_USING_ADUS
+ unsigned char rtpPayloadFormat = 96; // A dynamic payload format code
+ sessionState.sink
+ = MP3ADURTPSink::createNew(*env, sessionState.rtpGroupsock,
+ rtpPayloadFormat);
+#else
+ sessionState.sink
+ = MPEG1or2AudioRTPSink::createNew(*env, sessionState.rtpGroupsock);
+#endif
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidth = 160; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ sessionState.rtcpInstance
+ = RTCPInstance::createNew(*env, sessionState.rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ sessionState.sink, NULL /* we're a server */,
+ isSSM);
+ // Note: This starts RTCP running automatically
+
+#ifdef IMPLEMENT_RTSP_SERVER
+ rtspServer = RTSPServer::createNew(*env);
+ // Note that this (attempts to) start a server on the default RTSP server
+ // port: 554. To use a different port number, add it as an extra
+ // (optional) parameter to the "RTSPServer::createNew()" call above.
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testMP3Streamer\"", isSSM);
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*sessionState.sink, sessionState.rtcpInstance));
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+#endif
+
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* clientData); // forward
+
+void play() {
+ // Open the file as a 'MP3 file source':
+ sessionState.source = MP3FileSource::createNew(*env, inputFileName);
+ if (sessionState.source == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a MP3 file source\n";
+ exit(1);
+ }
+
+#ifdef STREAM_USING_ADUS
+ // Add a filter that converts the source MP3s to ADUs:
+ sessionState.source
+ = ADUFromMP3Source::createNew(*env, sessionState.source);
+ if (sessionState.source == NULL) {
+ *env << "Unable to create a MP3->ADU filter for the source\n";
+ exit(1);
+ }
+
+#ifdef INTERLEAVE_ADUS
+ // Add another filter that interleaves the ADUs before packetizing them:
+ unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own order...
+ unsigned const interleaveCycleSize
+ = (sizeof interleaveCycle)/(sizeof (unsigned char));
+ Interleaving interleaving(interleaveCycleSize, interleaveCycle);
+ sessionState.source
+ = MP3ADUinterleaver::createNew(*env, interleaving, sessionState.source);
+ if (sessionState.source == NULL) {
+ *env << "Unable to create an ADU interleaving filter for the source\n";
+ exit(1);
+ }
+#endif
+#endif
+
+ // Finally, start the streaming:
+ *env << "Beginning streaming...\n";
+ sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL);
+}
+
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done streaming\n";
+
+ sessionState.sink->stopPlaying();
+
+ // End this loop by closing the current source:
+ Medium::close(sessionState.source);
+
+ // And start another loop:
+ play();
+}
diff --git a/testProgs/testMPEG1or2AudioVideo.sdp b/testProgs/testMPEG1or2AudioVideo.sdp
new file mode 100644
index 0000000..3cb7775
--- /dev/null
+++ b/testProgs/testMPEG1or2AudioVideo.sdp
@@ -0,0 +1,11 @@
+v=0
+o=- 49451 3 IN IP4 127.0.0.1
+s=Test MPEG Audio+Video session
+i=Parameters for the session streamed by "testMPEG1or2AudioVideoStreamer"
+t=0 0
+a=tool:testMPEG1or2AudioVideoStreamer
+a=type:broadcast
+m=audio 6666 RTP/AVP 14
+c=IN IP4 239.255.42.42/127
+m=video 8888 RTP/AVP 32
+c=IN IP4 239.255.42.42/127
diff --git a/testProgs/testMPEG1or2AudioVideoStreamer.cpp b/testProgs/testMPEG1or2AudioVideoStreamer.cpp
new file mode 100644
index 0000000..ff1a839
--- /dev/null
+++ b/testProgs/testMPEG1or2AudioVideoStreamer.cpp
@@ -0,0 +1,202 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads a MPEG-1 or 2 Program Stream file,
+// splits it into Audio and Video Elementary Streams,
+// and streams both using RTP
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+#include "GroupsockHelper.hh"
+
+UsageEnvironment* env;
+char const* inputFileName = "test.mpg";
+MPEG1or2Demux* mpegDemux;
+FramedSource* audioSource;
+FramedSource* videoSource;
+RTPSink* audioSink;
+RTPSink* videoSink;
+
+void play(); // forward
+
+// To stream using "source-specific multicast" (SSM), uncomment the following:
+//#define USE_SSM 1
+#ifdef USE_SSM
+Boolean const isSSM = True;
+#else
+Boolean const isSSM = False;
+#endif
+
+// To set up an internal RTSP server, uncomment the following:
+//#define IMPLEMENT_RTSP_SERVER 1
+// (Note that this RTSP server works for multicast only)
+
+// To stream *only* MPEG "I" frames (e.g., to reduce network bandwidth),
+// change the following "False" to "True":
+Boolean iFramesOnly = False;
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create 'groupsocks' for RTP and RTCP:
+ char const* destinationAddressStr
+#ifdef USE_SSM
+ = "232.255.42.42";
+#else
+ = "239.255.42.42";
+ // Note: This is a multicast address. If you wish to stream using
+ // unicast instead, then replace this string with the unicast address
+ // of the (single) destination. (You may also need to make a similar
+ // change to the receiver program.)
+#endif
+ const unsigned short rtpPortNumAudio = 6666;
+ const unsigned short rtcpPortNumAudio = rtpPortNumAudio+1;
+ const unsigned short rtpPortNumVideo = 8888;
+ const unsigned short rtcpPortNumVideo = rtpPortNumVideo+1;
+ const unsigned char ttl = 7; // low, in case routers don't admin scope
+
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = our_inet_addr(destinationAddressStr);
+ const Port rtpPortAudio(rtpPortNumAudio);
+ const Port rtcpPortAudio(rtcpPortNumAudio);
+ const Port rtpPortVideo(rtpPortNumVideo);
+ const Port rtcpPortVideo(rtcpPortNumVideo);
+
+ Groupsock rtpGroupsockAudio(*env, destinationAddress, rtpPortAudio, ttl);
+ Groupsock rtcpGroupsockAudio(*env, destinationAddress, rtcpPortAudio, ttl);
+ Groupsock rtpGroupsockVideo(*env, destinationAddress, rtpPortVideo, ttl);
+ Groupsock rtcpGroupsockVideo(*env, destinationAddress, rtcpPortVideo, ttl);
+#ifdef USE_SSM
+ rtpGroupsockAudio.multicastSendOnly();
+ rtcpGroupsockAudio.multicastSendOnly();
+ rtpGroupsockVideo.multicastSendOnly();
+ rtcpGroupsockVideo.multicastSendOnly();
+#endif
+
+ // Create a 'MPEG Audio RTP' sink from the RTP 'groupsock':
+ audioSink = MPEG1or2AudioRTPSink::createNew(*env, &rtpGroupsockAudio);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidthAudio = 160; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+#ifdef IMPLEMENT_RTSP_SERVER
+ RTCPInstance* audioRTCP =
+#endif
+ RTCPInstance::createNew(*env, &rtcpGroupsockAudio,
+ estimatedSessionBandwidthAudio, CNAME,
+ audioSink, NULL /* we're a server */, isSSM);
+ // Note: This starts RTCP running automatically
+
+ // Create a 'MPEG Video RTP' sink from the RTP 'groupsock':
+ videoSink = MPEG1or2VideoRTPSink::createNew(*env, &rtpGroupsockVideo);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidthVideo = 4500; // in kbps; for RTCP b/w share
+#ifdef IMPLEMENT_RTSP_SERVER
+ RTCPInstance* videoRTCP =
+#endif
+ RTCPInstance::createNew(*env, &rtcpGroupsockVideo,
+ estimatedSessionBandwidthVideo, CNAME,
+ videoSink, NULL /* we're a server */, isSSM);
+ // Note: This starts RTCP running automatically
+
+#ifdef IMPLEMENT_RTSP_SERVER
+ RTSPServer* rtspServer = RTSPServer::createNew(*env);
+ // Note that this (attempts to) start a server on the default RTSP server
+ // port: 554. To use a different port number, add it as an extra
+ // (optional) parameter to the "RTSPServer::createNew()" call above.
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testMPEG1or2AudioVideoStreamer\"",
+ isSSM);
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*audioSink, audioRTCP));
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, videoRTCP));
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+#endif
+
+ // Finally, start the streaming:
+ *env << "Beginning streaming...\n";
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* clientData) {
+ // One of the sinks has ended playing.
+ // Check whether any of the sources have a pending read. If so,
+ // wait until its sink ends playing also:
+ if (audioSource->isCurrentlyAwaitingData()
+ || videoSource->isCurrentlyAwaitingData()) return;
+
+ // Now that both sinks have ended, close both input sources,
+ // and start playing again:
+ *env << "...done reading from file\n";
+
+ audioSink->stopPlaying();
+ videoSink->stopPlaying();
+ // ensures that both are shut down
+ Medium::close(audioSource);
+ Medium::close(videoSource);
+ Medium::close(mpegDemux);
+ // Note: This also closes the input file that this source read from.
+
+ // Start playing once again:
+ play();
+}
+
+void play() {
+ // Open the input file as a 'byte-stream file source':
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (fileSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ // We must demultiplex Audio and Video Elementary Streams
+ // from the input source:
+ mpegDemux = MPEG1or2Demux::createNew(*env, fileSource);
+ FramedSource* audioES = mpegDemux->newAudioStream();
+ FramedSource* videoES = mpegDemux->newVideoStream();
+
+ // Create a framer for each Elementary Stream:
+ audioSource
+ = MPEG1or2AudioStreamFramer::createNew(*env, audioES);
+ videoSource
+ = MPEG1or2VideoStreamFramer::createNew(*env, videoES, iFramesOnly);
+
+ // Finally, start playing each sink.
+ *env << "Beginning to read from file...\n";
+ videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
+ audioSink->startPlaying(*audioSource, afterPlaying, audioSink);
+}
diff --git a/testProgs/testMPEG1or2ProgramToTransportStream.cpp b/testProgs/testMPEG1or2ProgramToTransportStream.cpp
new file mode 100644
index 0000000..a171ae2
--- /dev/null
+++ b/testProgs/testMPEG1or2ProgramToTransportStream.cpp
@@ -0,0 +1,74 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A program that converts a MPEG-1 or 2 Program Stream file into
+// a Transport Stream file.
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+
+char const* inputFileName = "in.mpg";
+char const* outputFileName = "out.ts";
+
+void afterPlaying(void* clientData); // forward
+
+UsageEnvironment* env;
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Open the input file as a 'byte-stream file source':
+ FramedSource* inputSource = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (inputSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ // Create a MPEG demultiplexor that reads from that source.
+ MPEG1or2Demux* baseDemultiplexor = MPEG1or2Demux::createNew(*env, inputSource);
+
+ // Create, from this, a source that returns raw PES packets:
+ MPEG1or2DemuxedElementaryStream* pesSource = baseDemultiplexor->newRawPESStream();
+
+ // And, from this, a filter that converts to MPEG-2 Transport Stream frames:
+ FramedSource* tsFrames
+ = MPEG2TransportStreamFromPESSource::createNew(*env, pesSource);
+
+ // Open the output file as a 'file sink':
+ MediaSink* outputSink = FileSink::createNew(*env, outputFileName);
+ if (outputSink == NULL) {
+ *env << "Unable to open file \"" << outputFileName << "\" as a file sink\n";
+ exit(1);
+ }
+
+ // Finally, start playing:
+ *env << "Beginning to read...\n";
+ outputSink->startPlaying(*tsFrames, afterPlaying, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "Done reading.\n";
+ *env << "Wrote output file: \"" << outputFileName << "\"\n";
+ exit(0);
+}
diff --git a/testProgs/testMPEG1or2Splitter.cpp b/testProgs/testMPEG1or2Splitter.cpp
new file mode 100644
index 0000000..f09e588
--- /dev/null
+++ b/testProgs/testMPEG1or2Splitter.cpp
@@ -0,0 +1,102 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that splits a MPEG-1 or 2 Program Stream file into
+// video and audio output files.
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+#include <stdlib.h>
+
+char const* inputFileName = "in.mpg";
+char const* outputFileName_video = "out_video.mpg";
+char const* outputFileName_audio = "out_audio.mpg";
+
+void afterPlaying(void* clientData); // forward
+
+// A structure to hold the state of the current session.
+// It is used in the "afterPlaying()" function to clean up the session.
+struct sessionState_t {
+ MPEG1or2Demux* baseDemultiplexor;
+ MediaSource* videoSource;
+ MediaSource* audioSource;
+ FileSink* videoSink;
+ FileSink* audioSink;
+} sessionState;
+
+UsageEnvironment* env;
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Open the input file as a 'byte-stream file source':
+ ByteStreamFileSource* inputSource
+ = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (inputSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ // Create a MPEG demultiplexor that reads from that source.
+ sessionState.baseDemultiplexor = MPEG1or2Demux::createNew(*env, inputSource);
+
+ // Create, from this, our own sources (video and audio):
+ sessionState.videoSource = sessionState.baseDemultiplexor->newVideoStream();
+ sessionState.audioSource = sessionState.baseDemultiplexor->newAudioStream();
+
+ // Create the data sinks (output files):
+ sessionState.videoSink = FileSink::createNew(*env, outputFileName_video);
+ sessionState.audioSink = FileSink::createNew(*env, outputFileName_audio);
+
+ // Finally, start playing each sink.
+ *env << "Beginning to read...\n";
+ sessionState.videoSink->startPlaying(*sessionState.videoSource,
+ afterPlaying, sessionState.videoSink);
+ sessionState.audioSink->startPlaying(*sessionState.audioSource,
+ afterPlaying, sessionState.audioSink);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* clientData) {
+ Medium* finishedSink = (Medium*)clientData;
+
+ if (finishedSink == sessionState.videoSink) {
+ *env << "No more video\n";
+ Medium::close(sessionState.videoSink);
+ Medium::close(sessionState.videoSource);
+ sessionState.videoSink = NULL;
+ } else if (finishedSink == sessionState.audioSink) {
+ *env << "No more audio\n";
+ Medium::close(sessionState.audioSink);
+ Medium::close(sessionState.audioSource);
+ sessionState.audioSink = NULL;
+ }
+
+ if (sessionState.videoSink == NULL && sessionState.audioSink == NULL) {
+ *env << "...finished reading\n";
+
+ Medium::close(sessionState.baseDemultiplexor);
+
+ exit(0);
+ }
+}
diff --git a/testProgs/testMPEG1or2Video.sdp b/testProgs/testMPEG1or2Video.sdp
new file mode 100644
index 0000000..c3b9c73
--- /dev/null
+++ b/testProgs/testMPEG1or2Video.sdp
@@ -0,0 +1,9 @@
+v=0
+o=- 49451 3 IN IP4 127.0.0.1
+s=Test MPEG Video session
+i=Parameters for the session streamed by "testMPEG1or2VideoStreamer"
+t=0 0
+a=tool:testMPEG1or2VideoStreamer
+a=type:broadcast
+m=video 8888 RTP/AVP 32
+c=IN IP4 239.255.42.42/127
diff --git a/testProgs/testMPEG1or2VideoReceiver.cpp b/testProgs/testMPEG1or2VideoReceiver.cpp
new file mode 100644
index 0000000..57c248f
--- /dev/null
+++ b/testProgs/testMPEG1or2VideoReceiver.cpp
@@ -0,0 +1,118 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that receives a RTP/RTCP multicast MPEG video stream,
+// and outputs the resulting MPEG file stream to 'stdout'
+// main program
+
+#include "liveMedia.hh"
+#include "GroupsockHelper.hh"
+
+#include "BasicUsageEnvironment.hh"
+
+// To receive a "source-specific multicast" (SSM) stream, uncomment this:
+//#define USE_SSM 1
+
+void afterPlaying(void* clientData); // forward
+
+// A structure to hold the state of the current session.
+// It is used in the "afterPlaying()" function to clean up the session.
+struct sessionState_t {
+ RTPSource* source;
+ MediaSink* sink;
+ RTCPInstance* rtcpInstance;
+} sessionState;
+
+UsageEnvironment* env;
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create the data sink for 'stdout':
+ sessionState.sink = FileSink::createNew(*env, "stdout");
+ // Note: The string "stdout" is handled as a special case.
+ // A real file name could have been used instead.
+
+ // Create 'groupsocks' for RTP and RTCP:
+ char const* sessionAddressStr
+#ifdef USE_SSM
+ = "232.255.42.42";
+#else
+ = "239.255.42.42";
+ // Note: If the session is unicast rather than multicast,
+ // then replace this string with "0.0.0.0"
+#endif
+ const unsigned short rtpPortNum = 8888;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+#ifndef USE_SSM
+ const unsigned char ttl = 1; // low, in case routers don't admin scope
+#endif
+
+ struct in_addr sessionAddress;
+ sessionAddress.s_addr = our_inet_addr(sessionAddressStr);
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+#ifdef USE_SSM
+ char* sourceAddressStr = "aaa.bbb.ccc.ddd";
+ // replace this with the real source address
+ struct in_addr sourceFilterAddress;
+ sourceFilterAddress.s_addr = our_inet_addr(sourceAddressStr);
+
+ Groupsock rtpGroupsock(*env, sessionAddress, sourceFilterAddress, rtpPort);
+ Groupsock rtcpGroupsock(*env, sessionAddress, sourceFilterAddress, rtcpPort);
+ rtcpGroupsock.changeDestinationParameters(sourceFilterAddress,0,~0);
+ // our RTCP "RR"s are sent back using unicast
+#else
+ Groupsock rtpGroupsock(*env, sessionAddress, rtpPort, ttl);
+ Groupsock rtcpGroupsock(*env, sessionAddress, rtcpPort, ttl);
+#endif
+
+ // Create the data source: a "MPEG Video RTP source"
+ sessionState.source = MPEG1or2VideoRTPSource::createNew(*env, &rtpGroupsock);
+
+ // Create (and start) a 'RTCP instance' for the RTP source:
+ const unsigned estimatedSessionBandwidth = 4500; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ sessionState.rtcpInstance
+ = RTCPInstance::createNew(*env, &rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ NULL /* we're a client */, sessionState.source);
+ // Note: This starts RTCP running automatically
+
+ // Finally, start receiving the multicast stream:
+ *env << "Beginning receiving multicast stream...\n";
+ sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done receiving\n";
+
+ // End by closing the media:
+ Medium::close(sessionState.rtcpInstance); // Note: Sends a RTCP BYE
+ Medium::close(sessionState.sink);
+ Medium::close(sessionState.source);
+}
diff --git a/testProgs/testMPEG1or2VideoStreamer.cpp b/testProgs/testMPEG1or2VideoStreamer.cpp
new file mode 100644
index 0000000..ca91541
--- /dev/null
+++ b/testProgs/testMPEG1or2VideoStreamer.cpp
@@ -0,0 +1,174 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads a MPEG-1 or 2 Video Elementary Stream file,
+// and streams it using RTP
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+#include "GroupsockHelper.hh"
+
+// Uncomment the following if the input file is a MPEG Program Stream
+// rather than a MPEG Video Elementary Stream
+//#define SOURCE_IS_PROGRAM_STREAM 1
+
+// To stream using "source-specific multicast" (SSM), uncomment the following:
+//#define USE_SSM 1
+#ifdef USE_SSM
+Boolean const isSSM = True;
+#else
+Boolean const isSSM = False;
+#endif
+
+// To set up an internal RTSP server, uncomment the following:
+//#define IMPLEMENT_RTSP_SERVER 1
+// (Note that this RTSP server works for multicast only)
+
+// To stream *only* MPEG "I" frames (e.g., to reduce network bandwidth),
+// change the following "False" to "True":
+Boolean iFramesOnly = False;
+
+UsageEnvironment* env;
+char const* inputFileName = "test.mpg";
+#ifdef SOURCE_IS_PROGRAM_STREAM
+MPEG1or2Demux* mpegDemux;
+#endif
+MediaSource* videoSource;
+RTPSink* videoSink;
+
+void play(); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create 'groupsocks' for RTP and RTCP:
+ char const* destinationAddressStr
+#ifdef USE_SSM
+ = "232.255.42.42";
+#else
+ = "239.255.42.42";
+ // Note: This is a multicast address. If you wish to stream using
+ // unicast instead, then replace this string with the unicast address
+ // of the (single) destination. (You may also need to make a similar
+ // change to the receiver program.)
+#endif
+ const unsigned short rtpPortNum = 8888;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+ const unsigned char ttl = 7; // low, in case routers don't admin scope
+
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = our_inet_addr(destinationAddressStr);
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+ Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
+ Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
+#ifdef USE_SSM
+ rtpGroupsock.multicastSendOnly();
+ rtcpGroupsock.multicastSendOnly();
+#endif
+
+ // Create a 'MPEG Video RTP' sink from the RTP 'groupsock':
+ videoSink = MPEG1or2VideoRTPSink::createNew(*env, &rtpGroupsock);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidth = 4500; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+#ifdef IMPLEMENT_RTSP_SERVER
+ RTCPInstance* rtcp =
+#endif
+ RTCPInstance::createNew(*env, &rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ videoSink, NULL /* we're a server */, isSSM);
+ // Note: This starts RTCP running automatically
+
+#ifdef IMPLEMENT_RTSP_SERVER
+ RTSPServer* rtspServer = RTSPServer::createNew(*env);
+ // Note that this (attempts to) start a server on the default RTSP server
+ // port: 554. To use a different port number, add it as an extra
+ // (optional) parameter to the "RTSPServer::createNew()" call above.
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testMPEG1or2VideoStreamer\"",
+ isSSM);
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+#endif
+
+ // Finally, start the streaming:
+ *env << "Beginning streaming...\n";
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done reading from file\n";
+
+ videoSink->stopPlaying();
+ Medium::close(videoSource);
+#ifdef SOURCE_IS_PROGRAM_STREAM
+ Medium::close(mpegDemux);
+#endif
+ // Note that this also closes the input file that this source read from.
+
+ play();
+}
+
+void play() {
+ // Open the input file as a 'byte-stream file source':
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (fileSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ FramedSource* videoES;
+#ifdef SOURCE_IS_PROGRAM_STREAM
+ // We must demultiplex a Video Elementary Stream from the input source:
+ mpegDemux = MPEG1or2Demux::createNew(*env, fileSource);
+ videoES = mpegDemux->newVideoStream();
+#else
+ // The input source is assumed to already be a Video Elementary Stream:
+ videoES = fileSource;
+#endif
+
+ // Create a framer for the Video Elementary Stream:
+ videoSource
+ = MPEG1or2VideoStreamFramer::createNew(*env, videoES, iFramesOnly);
+
+ // Finally, start playing:
+ *env << "Beginning to read from file...\n";
+ videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
+}
diff --git a/testProgs/testMPEG2Transport.sdp b/testProgs/testMPEG2Transport.sdp
new file mode 100644
index 0000000..5d2d69f
--- /dev/null
+++ b/testProgs/testMPEG2Transport.sdp
@@ -0,0 +1,9 @@
+v=0
+o=- 49451 3 IN IP4 127.0.0.1
+s=Test MPEG-2 Transport Stream session
+i=Parameters for the session streamed by "testMPEG2TransportStreamer"
+t=0 0
+a=tool:testMPEG2TransportStreamer
+a=type:broadcast
+m=video 1234 RTP/AVP 33
+c=IN IP4 239.255.42.42/127
diff --git a/testProgs/testMPEG2TransportReceiver.cpp b/testProgs/testMPEG2TransportReceiver.cpp
new file mode 100644
index 0000000..bf7fcae
--- /dev/null
+++ b/testProgs/testMPEG2TransportReceiver.cpp
@@ -0,0 +1,118 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that receives a RTP/RTCP multicast MPEG-2 Transport Stream,
+// and outputs the resulting Transport Stream data to 'stdout'
+// main program
+
+#include "liveMedia.hh"
+#include "GroupsockHelper.hh"
+
+#include "BasicUsageEnvironment.hh"
+
+// To receive a "source-specific multicast" (SSM) stream, uncomment this:
+//#define USE_SSM 1
+
+void afterPlaying(void* clientData); // forward
+
+// A structure to hold the state of the current session.
+// It is used in the "afterPlaying()" function to clean up the session.
+struct sessionState_t {
+ RTPSource* source;
+ MediaSink* sink;
+ RTCPInstance* rtcpInstance;
+} sessionState;
+
+UsageEnvironment* env;
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create the data sink for 'stdout':
+ sessionState.sink = FileSink::createNew(*env, "stdout");
+ // Note: The string "stdout" is handled as a special case.
+ // A real file name could have been used instead.
+
+ // Create 'groupsocks' for RTP and RTCP:
+ char const* sessionAddressStr
+#ifdef USE_SSM
+ = "232.255.42.42";
+#else
+ = "239.255.42.42";
+ // Note: If the session is unicast rather than multicast,
+ // then replace this string with "0.0.0.0"
+#endif
+ const unsigned short rtpPortNum = 1234;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+#ifndef USE_SSM
+ const unsigned char ttl = 1; // low, in case routers don't admin scope
+#endif
+
+ struct in_addr sessionAddress;
+ sessionAddress.s_addr = our_inet_addr(sessionAddressStr);
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+#ifdef USE_SSM
+ char* sourceAddressStr = "aaa.bbb.ccc.ddd";
+ // replace this with the real source address
+ struct in_addr sourceFilterAddress;
+ sourceFilterAddress.s_addr = our_inet_addr(sourceAddressStr);
+
+ Groupsock rtpGroupsock(*env, sessionAddress, sourceFilterAddress, rtpPort);
+ Groupsock rtcpGroupsock(*env, sessionAddress, sourceFilterAddress, rtcpPort);
+ rtcpGroupsock.changeDestinationParameters(sourceFilterAddress,0,~0);
+ // our RTCP "RR"s are sent back using unicast
+#else
+ Groupsock rtpGroupsock(*env, sessionAddress, rtpPort, ttl);
+ Groupsock rtcpGroupsock(*env, sessionAddress, rtcpPort, ttl);
+#endif
+
+ // Create the data source: a "MPEG-2 TransportStream RTP source" (which uses a 'simple' RTP payload format):
+ sessionState.source = SimpleRTPSource::createNew(*env, &rtpGroupsock, 33, 90000, "video/MP2T", 0, False /*no 'M' bit*/);
+
+ // Create (and start) a 'RTCP instance' for the RTP source:
+ const unsigned estimatedSessionBandwidth = 5000; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ sessionState.rtcpInstance
+ = RTCPInstance::createNew(*env, &rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ NULL /* we're a client */, sessionState.source);
+ // Note: This starts RTCP running automatically
+
+ // Finally, start receiving the multicast stream:
+ *env << "Beginning receiving multicast stream...\n";
+ sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done receiving\n";
+
+ // End by closing the media:
+ Medium::close(sessionState.rtcpInstance); // Note: Sends a RTCP BYE
+ Medium::close(sessionState.sink);
+ Medium::close(sessionState.source);
+}
diff --git a/testProgs/testMPEG2TransportStreamSplitter.cpp b/testProgs/testMPEG2TransportStreamSplitter.cpp
new file mode 100644
index 0000000..c5f573c
--- /dev/null
+++ b/testProgs/testMPEG2TransportStreamSplitter.cpp
@@ -0,0 +1,66 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that splits a MPEG Transport Stream input (on 'stdin')
+// into separate video and audio output files.
+// main program
+
+#include <liveMedia.hh>
+#include <BasicUsageEnvironment.hh>
+
+UsageEnvironment* env;
+char const* programName;
+char const* inputFileName = "stdin";
+MPEG2TransportStreamDemux* baseDemultiplexor = NULL;
+
+void usage() {
+ *env << "usage: " << programName << " takes no arguments (it reads from \"stdin\")\n";
+ exit(1);
+}
+
+void afterReading(void*);
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Parse the command line:
+ programName = argv[0];
+ if (argc != 1) usage();
+
+ // Open the input file as a 'byte-stream file source':
+ ByteStreamFileSource* inputSource
+ = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (inputSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ // Create a demultiplexor that reads from that source, creating new 'demultiplexed tracks'
+ // as they appear:
+ baseDemultiplexor = MPEG2TransportStreamDemux::createNew(*env, inputSource, afterReading, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterReading(void* /*clientData*/) {
+ *env << "...done\n";
+ exit(0);
+}
diff --git a/testProgs/testMPEG2TransportStreamTrickPlay.cpp b/testProgs/testMPEG2TransportStreamTrickPlay.cpp
new file mode 100644
index 0000000..13fd057
--- /dev/null
+++ b/testProgs/testMPEG2TransportStreamTrickPlay.cpp
@@ -0,0 +1,129 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A program that tests 'trick mode' operations on a MPEG-2 Transport Stream file,
+// by generating a new Transport Stream file that represents the result of the
+// 'trick mode' operation (seeking and/or fast forward/reverse play).
+// For this to work, there must also be an index file present, in the same directory
+// as the Transport Stream file, and with the same name prefix. (The Transport
+// Stream file has name suffix ".ts"; the index file has name suffix ".tsx".)
+// main program
+
+#include <liveMedia.hh>
+#include <BasicUsageEnvironment.hh>
+
+void afterPlaying(void* clientData); // forward
+
+UsageEnvironment* env;
+char const* programName;
+
+void usage() {
+ *env << "usage: " << programName << " <input-transport-stream-file-name> <start-time> <scale> <output-transport-stream-file-name>\n";
+ *env << "\twhere\t<transport-stream-file-name> ends with \".ts\"\n";
+ *env << "\t\t<start-time> is the starting play time in seconds (0 for the start)\n";
+ *env << "\t\t<scale> is a non-zero integer, representing the playing speed (use 1 for normal play; use a negative number for reverse play)\n";
+ exit(1);
+}
+
+int main(int argc, char const** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Parse the command line:
+ programName = argv[0];
+ if (argc != 5) usage();
+
+ char const* inputFileName = argv[1];
+ // Check whether the input file name ends with ".ts":
+ int len = strlen(inputFileName);
+ if (len < 4 || strcmp(&inputFileName[len-3], ".ts") != 0) {
+ *env << "ERROR: input file name \"" << inputFileName
+ << "\" does not end with \".ts\"\n";
+ usage();
+ }
+
+ // Parse the <start-time> and <scale> parameters:
+ float startTime;
+ if (sscanf(argv[2], "%f", &startTime) != 1 || startTime < 0.0f) usage();
+
+ int scale;
+ if (sscanf(argv[3], "%d", &scale) != 1 || scale == 0) usage();
+
+ // Open the input file (as a 'byte stream file source'):
+ FramedSource* input
+ = ByteStreamFileSource::createNew(*env, inputFileName, TRANSPORT_PACKET_SIZE);
+ if (input == NULL) {
+ *env << "Failed to open input file \"" << inputFileName << "\" (does it exist?)\n";
+ exit(1);
+ }
+
+ // Check whether the corresponding index file exists.
+ // The index file name is the same as the input file name, except with suffix ".tsx":
+ char* indexFileName = new char[len+2]; // allow for trailing x\0
+ sprintf(indexFileName, "%sx", inputFileName);
+ MPEG2TransportStreamIndexFile* indexFile
+ = MPEG2TransportStreamIndexFile::createNew(*env, indexFileName);
+ if (indexFile == NULL) {
+ *env << "Failed to open index file \"" << indexFileName << "\" (does it exist?)\n";
+ exit(1);
+ }
+
+ // Create a filter that generates trick mode data from the input and index files:
+ MPEG2TransportStreamTrickModeFilter* trickModeFilter
+ = MPEG2TransportStreamTrickModeFilter::createNew(*env, input, indexFile, scale);
+
+ if (startTime > 0.0f) {
+ // Seek the input Transport Stream and Index files to the specified start time:
+ unsigned long tsRecordNumber, indexRecordNumber;
+ indexFile->lookupTSPacketNumFromNPT(startTime, tsRecordNumber, indexRecordNumber);
+ if (!trickModeFilter->seekTo(tsRecordNumber, indexRecordNumber)) { // TARFU!
+ *env << "Failed to seek trick mode filter to ts #" << (unsigned)tsRecordNumber
+ << ", ix #" << (unsigned)indexRecordNumber
+ << "(for time " << startTime << ")\n";
+ exit(1);
+ }
+ }
+
+ // Generate a new Transport Stream from the Trick Mode filter:
+ MPEG2TransportStreamFromESSource* newTransportStream
+ = MPEG2TransportStreamFromESSource::createNew(*env);
+ newTransportStream->addNewVideoSource(trickModeFilter, indexFile->mpegVersion());
+
+ // Open the output file (for writing), as a 'file sink':
+ char const* outputFileName = argv[4];
+ MediaSink* output = FileSink::createNew(*env, outputFileName);
+ if (output == NULL) {
+ *env << "Failed to open output file \"" << outputFileName << "\"\n";
+ exit(1);
+ }
+
+ // Start playing, to generate the output file:
+ *env << "Writing output file \"" << outputFileName
+ << "\" (start time " << startTime
+ << ", scale " << scale
+ << ")...";
+ output->startPlaying(*newTransportStream, afterPlaying, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done\n";
+ exit(0);
+}
diff --git a/testProgs/testMPEG2TransportStreamer.cpp b/testProgs/testMPEG2TransportStreamer.cpp
new file mode 100644
index 0000000..fd4f096
--- /dev/null
+++ b/testProgs/testMPEG2TransportStreamer.cpp
@@ -0,0 +1,158 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads a MPEG-2 Transport Stream file,
+// and streams it using RTP
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+#include "GroupsockHelper.hh"
+
+// To stream using "source-specific multicast" (SSM), uncomment the following:
+//#define USE_SSM 1
+#ifdef USE_SSM
+Boolean const isSSM = True;
+#else
+Boolean const isSSM = False;
+#endif
+
+// To set up an internal RTSP server, uncomment the following:
+//#define IMPLEMENT_RTSP_SERVER 1
+// (Note that this RTSP server works for multicast only)
+
+#define TRANSPORT_PACKET_SIZE 188
+#define TRANSPORT_PACKETS_PER_NETWORK_PACKET 7
+// The product of these two numbers must be enough to fit within a network packet
+
+UsageEnvironment* env;
+char const* inputFileName = "test.ts";
+FramedSource* videoSource;
+RTPSink* videoSink;
+
+void play(); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create 'groupsocks' for RTP and RTCP:
+ char const* destinationAddressStr
+#ifdef USE_SSM
+ = "232.255.42.42";
+#else
+ = "239.255.42.42";
+ // Note: This is a multicast address. If you wish to stream using
+ // unicast instead, then replace this string with the unicast address
+ // of the (single) destination. (You may also need to make a similar
+ // change to the receiver program.)
+#endif
+ const unsigned short rtpPortNum = 1234;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+ const unsigned char ttl = 7; // low, in case routers don't admin scope
+
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = our_inet_addr(destinationAddressStr);
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+ Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
+ Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
+#ifdef USE_SSM
+ rtpGroupsock.multicastSendOnly();
+ rtcpGroupsock.multicastSendOnly();
+#endif
+
+ // Create an appropriate 'RTP sink' from the RTP 'groupsock':
+ videoSink =
+ SimpleRTPSink::createNew(*env, &rtpGroupsock, 33, 90000, "video", "MP2T",
+ 1, True, False /*no 'M' bit*/);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidth = 5000; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+#ifdef IMPLEMENT_RTSP_SERVER
+ RTCPInstance* rtcp =
+#endif
+ RTCPInstance::createNew(*env, &rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ videoSink, NULL /* we're a server */, isSSM);
+ // Note: This starts RTCP running automatically
+
+#ifdef IMPLEMENT_RTSP_SERVER
+ RTSPServer* rtspServer = RTSPServer::createNew(*env);
+ // Note that this (attempts to) start a server on the default RTSP server
+ // port: 554. To use a different port number, add it as an extra
+ // (optional) parameter to the "RTSPServer::createNew()" call above.
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testMPEG2TransportStreamer\"",
+ isSSM);
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+#endif
+
+ // Finally, start the streaming:
+ *env << "Beginning streaming...\n";
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done reading from file\n";
+
+ videoSink->stopPlaying();
+ Medium::close(videoSource);
+ // Note that this also closes the input file that this source read from.
+
+ play();
+}
+
+void play() {
+ unsigned const inputDataChunkSize
+ = TRANSPORT_PACKETS_PER_NETWORK_PACKET*TRANSPORT_PACKET_SIZE;
+
+ // Open the input file as a 'byte-stream file source':
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(*env, inputFileName, inputDataChunkSize);
+ if (fileSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ // Create a 'framer' for the input source (to give us proper inter-packet gaps):
+ videoSource = MPEG2TransportStreamFramer::createNew(*env, fileSource);
+
+ // Finally, start playing:
+ *env << "Beginning to read from file...\n";
+ videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
+}
diff --git a/testProgs/testMPEG4VideoStreamer.cpp b/testProgs/testMPEG4VideoStreamer.cpp
new file mode 100644
index 0000000..d1b5b34
--- /dev/null
+++ b/testProgs/testMPEG4VideoStreamer.cpp
@@ -0,0 +1,126 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads a MPEG-4 Video Elementary Stream file,
+// and streams it using RTP
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+#include "GroupsockHelper.hh"
+
+UsageEnvironment* env;
+char const* inputFileName = "test.m4e";
+MPEG4VideoStreamFramer* videoSource;
+RTPSink* videoSink;
+
+void play(); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create 'groupsocks' for RTP and RTCP:
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
+ // Note: This is a multicast address. If you wish instead to stream
+ // using unicast, then you should use the "testOnDemandRTSPServer"
+ // test program - not this test program - as a model.
+
+ const unsigned short rtpPortNum = 18888;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+ const unsigned char ttl = 255;
+
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+ Groupsock rtpGroupsock(*env, destinationAddress, rtpPort, ttl);
+ rtpGroupsock.multicastSendOnly(); // we're a SSM source
+ Groupsock rtcpGroupsock(*env, destinationAddress, rtcpPort, ttl);
+ rtcpGroupsock.multicastSendOnly(); // we're a SSM source
+
+ // Create a 'MPEG-4 Video RTP' sink from the RTP 'groupsock':
+ videoSink = MPEG4ESVideoRTPSink::createNew(*env, &rtpGroupsock, 96);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidth = 500; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ RTCPInstance* rtcp
+ = RTCPInstance::createNew(*env, &rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ videoSink, NULL /* we're a server */,
+ True /* we're a SSM source */);
+ // Note: This starts RTCP running automatically
+
+ RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554);
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testMPEG4VideoStreamer\"",
+ True /*SSM*/);
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, rtcp));
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+
+ // Start the streaming:
+ *env << "Beginning streaming...\n";
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done reading from file\n";
+
+ videoSink->stopPlaying();
+ Medium::close(videoSource);
+ // Note that this also closes the input file that this source read from.
+
+ // Start playing once again:
+ play();
+}
+
+void play() {
+ // Open the input file as a 'byte-stream file source':
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(*env, inputFileName);
+ if (fileSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a byte-stream file source\n";
+ exit(1);
+ }
+
+ FramedSource* videoES = fileSource;
+
+ // Create a framer for the Video Elementary Stream:
+ videoSource = MPEG4VideoStreamFramer::createNew(*env, videoES);
+
+ // Finally, start playing:
+ *env << "Beginning to read from file...\n";
+ videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
+}
diff --git a/testProgs/testOggStreamer.cpp b/testProgs/testOggStreamer.cpp
new file mode 100644
index 0000000..7c92652
--- /dev/null
+++ b/testProgs/testOggStreamer.cpp
@@ -0,0 +1,182 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads a ".ogg" (i.e., Ogg) file, demultiplexes each track
+// (audio and/or video), and streams each track using RTP multicast.
+// main program
+
+#include <liveMedia.hh>
+#include <BasicUsageEnvironment.hh>
+#include <GroupsockHelper.hh>
+
+UsageEnvironment* env;
+char const* inputFileName = "test.ogg";
+struct in_addr destinationAddress;
+RTSPServer* rtspServer;
+ServerMediaSession* sms;
+OggFile* oggFile;
+OggDemux* oggDemux;
+unsigned numTracks;
+
+// A structure representing the state of a track:
+struct TrackState {
+ u_int32_t trackNumber;
+ FramedSource* source;
+ RTPSink* sink;
+ RTCPInstance* rtcp;
+};
+TrackState* trackState;
+
+void onOggFileCreation(OggFile* newFile, void* clientData); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Define our destination (multicast) IP address:
+ destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
+ // Note: This is a multicast address. If you wish instead to stream
+ // using unicast, then you should use the "testOnDemandRTSPServer"
+ // test program - not this test program - as a model.
+
+ // Create our RTSP server. (Receivers will need to use RTSP to access the stream.)
+ rtspServer = RTSPServer::createNew(*env, 8554);
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ sms = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testMKVStreamer\"",
+ True /*SSM*/);
+
+ // Arrange to create an "OggFile" object for the specified file.
+ // (Note that this object is not created immediately, but instead via a callback.)
+ OggFile::createNew(*env, inputFileName, onOggFileCreation, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void play(); // forward
+
+void onOggFileCreation(OggFile* newFile, void* clientData) {
+ oggFile = newFile;
+
+ // Create a new demultiplexor for the file:
+ oggDemux = oggFile->newDemux();
+
+ // Create source streams, "RTPSink"s, and "RTCPInstance"s for each preferred track;
+ unsigned short rtpPortNum = 22222;
+ const unsigned char ttl = 255;
+
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+
+ numTracks = oggFile->numTracks();
+ trackState = new TrackState[numTracks];
+ for (unsigned i = 0; i < numTracks; ++i) {
+ u_int32_t trackNumber;
+ FramedSource* baseSource = oggDemux->newDemuxedTrack(trackNumber);
+ trackState[i].trackNumber = trackNumber;
+
+ unsigned estBitrate, numFiltersInFrontOfTrack;
+ trackState[i].source = oggFile
+ ->createSourceForStreaming(baseSource, trackNumber, estBitrate, numFiltersInFrontOfTrack);
+ trackState[i].sink = NULL; // by default; may get changed below
+ trackState[i].rtcp = NULL; // ditto
+
+ if (trackState[i].source != NULL) {
+ Groupsock* rtpGroupsock = new Groupsock(*env, destinationAddress, rtpPortNum, ttl);
+ Groupsock* rtcpGroupsock = new Groupsock(*env, destinationAddress, rtpPortNum+1, ttl);
+ rtpPortNum += 2;
+
+ trackState[i].sink
+ = oggFile->createRTPSinkForTrackNumber(trackNumber, rtpGroupsock, 96+i);
+ if (trackState[i].sink != NULL) {
+ if (trackState[i].sink->estimatedBitrate() > 0) {
+ estBitrate = trackState[i].sink->estimatedBitrate(); // hack
+ }
+ trackState[i].rtcp
+ = RTCPInstance::createNew(*env, rtcpGroupsock, estBitrate, CNAME,
+ trackState[i].sink, NULL /* we're a server */,
+ True /* we're a SSM source */);
+ // Note: This starts RTCP running automatically
+
+ // Having set up a track for streaming, add it to our RTSP server's "ServerMediaSession":
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*trackState[i].sink, trackState[i].rtcp));
+ }
+ }
+ }
+
+ if (sms->numSubsessions() == 0) {
+ *env << "Error: The Ogg file \"" << inputFileName << "\" has no streamable tracks\n";
+ *env << "(Perhaps the file does not exist, is not an 'Ogg' file, or has no tracks that we know how to stream.)\n";
+ exit(1);
+ }
+
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+
+ // Start the streaming:
+ play();
+}
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done reading from file\n";
+
+ // Stop playing all "RTPSink"s, then close the source streams
+ // (which will also close the demultiplexor itself):
+ unsigned i;
+ for (i = 0; i < numTracks; ++i) {
+ if (trackState[i].sink != NULL) trackState[i].sink->stopPlaying();
+ Medium::close(trackState[i].source); trackState[i].source = NULL;
+ }
+
+ // Create a new demultiplexor from our Ogg file, then new data sources for each track:
+ oggDemux = oggFile->newDemux();
+ for (i = 0; i < numTracks; ++i) {
+ if (trackState[i].trackNumber != 0) {
+ FramedSource* baseSource
+ = oggDemux->newDemuxedTrack(trackState[i].trackNumber);
+
+ unsigned estBitrate, numFiltersInFrontOfTrack;
+ trackState[i].source
+ = oggFile->createSourceForStreaming(baseSource, trackState[i].trackNumber,
+ estBitrate, numFiltersInFrontOfTrack);
+ }
+ }
+
+ // Start playing once again:
+ play();
+}
+
+void play() {
+ *env << "Beginning to read from file...\n";
+
+ // Start playing each track's RTP sink from its corresponding source:
+ for (unsigned i = 0; i < numTracks; ++i) {
+ if (trackState[i].sink != NULL && trackState[i].source != NULL) {
+ trackState[i].sink->startPlaying(*trackState[i].source, afterPlaying, NULL);
+ }
+ }
+}
diff --git a/testProgs/testOnDemandRTSPServer.cpp b/testProgs/testOnDemandRTSPServer.cpp
new file mode 100644
index 0000000..a6fd023
--- /dev/null
+++ b/testProgs/testOnDemandRTSPServer.cpp
@@ -0,0 +1,455 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that demonstrates how to stream - via unicast RTP
+// - various kinds of file on demand, using a built-in RTSP server.
+// main program
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+
+UsageEnvironment* env;
+
+// To make the second and subsequent client for each stream reuse the same
+// input stream as the first client (rather than playing the file from the
+// start for each client), change the following "False" to "True":
+Boolean reuseFirstSource = False;
+
+// To stream *only* MPEG-1 or 2 video "I" frames
+// (e.g., to reduce network bandwidth),
+// change the following "False" to "True":
+Boolean iFramesOnly = False;
+
+static void announceStream(RTSPServer* rtspServer, ServerMediaSession* sms,
+ char const* streamName, char const* inputFileName); // fwd
+
+static char newDemuxWatchVariable;
+
+static MatroskaFileServerDemux* matroskaDemux;
+static void onMatroskaDemuxCreation(MatroskaFileServerDemux* newDemux, void* /*clientData*/) {
+ matroskaDemux = newDemux;
+ newDemuxWatchVariable = 1;
+}
+
+static OggFileServerDemux* oggDemux;
+static void onOggDemuxCreation(OggFileServerDemux* newDemux, void* /*clientData*/) {
+ oggDemux = newDemux;
+ newDemuxWatchVariable = 1;
+}
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ UserAuthenticationDatabase* authDB = NULL;
+#ifdef ACCESS_CONTROL
+ // To implement client access control to the RTSP server, do the following:
+ authDB = new UserAuthenticationDatabase;
+ authDB->addUserRecord("username1", "password1"); // replace these with real strings
+ // Repeat the above with each <username>, <password> that you wish to allow
+ // access to the server.
+#endif
+
+ // Create the RTSP server:
+ RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB);
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+
+ char const* descriptionString
+ = "Session streamed by \"testOnDemandRTSPServer\"";
+
+ // Set up each of the possible streams that can be served by the
+ // RTSP server. Each such stream is implemented using a
+ // "ServerMediaSession" object, plus one or more
+ // "ServerMediaSubsession" objects for each audio/video substream.
+
+ // A MPEG-4 video elementary stream:
+ {
+ char const* streamName = "mpeg4ESVideoTest";
+ char const* inputFileName = "test.m4e";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ sms->addSubsession(MPEG4VideoFileServerMediaSubsession
+ ::createNew(*env, inputFileName, reuseFirstSource));
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A H.264 video elementary stream:
+ {
+ char const* streamName = "h264ESVideoTest";
+ char const* inputFileName = "test.264";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ sms->addSubsession(H264VideoFileServerMediaSubsession
+ ::createNew(*env, inputFileName, reuseFirstSource));
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A H.265 video elementary stream:
+ {
+ char const* streamName = "h265ESVideoTest";
+ char const* inputFileName = "test.265";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ sms->addSubsession(H265VideoFileServerMediaSubsession
+ ::createNew(*env, inputFileName, reuseFirstSource));
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A MPEG-1 or 2 audio+video program stream:
+ {
+ char const* streamName = "mpeg1or2AudioVideoTest";
+ char const* inputFileName = "test.mpg";
+ // NOTE: This *must* be a Program Stream; not an Elementary Stream
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ MPEG1or2FileServerDemux* demux
+ = MPEG1or2FileServerDemux::createNew(*env, inputFileName, reuseFirstSource);
+ sms->addSubsession(demux->newVideoServerMediaSubsession(iFramesOnly));
+ sms->addSubsession(demux->newAudioServerMediaSubsession());
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A MPEG-1 or 2 video elementary stream:
+ {
+ char const* streamName = "mpeg1or2ESVideoTest";
+ char const* inputFileName = "testv.mpg";
+ // NOTE: This *must* be a Video Elementary Stream; not a Program Stream
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ sms->addSubsession(MPEG1or2VideoFileServerMediaSubsession
+ ::createNew(*env, inputFileName, reuseFirstSource, iFramesOnly));
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A MP3 audio stream (actually, any MPEG-1 or 2 audio file will work):
+ // To stream using 'ADUs' rather than raw MP3 frames, uncomment the following:
+//#define STREAM_USING_ADUS 1
+ // To also reorder ADUs before streaming, uncomment the following:
+//#define INTERLEAVE_ADUS 1
+ // (For more information about ADUs and interleaving,
+ // see <http://www.live555.com/rtp-mp3/>)
+ {
+ char const* streamName = "mp3AudioTest";
+ char const* inputFileName = "test.mp3";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ Boolean useADUs = False;
+ Interleaving* interleaving = NULL;
+#ifdef STREAM_USING_ADUS
+ useADUs = True;
+#ifdef INTERLEAVE_ADUS
+ unsigned char interleaveCycle[] = {0,2,1,3}; // or choose your own...
+ unsigned const interleaveCycleSize
+ = (sizeof interleaveCycle)/(sizeof (unsigned char));
+ interleaving = new Interleaving(interleaveCycleSize, interleaveCycle);
+#endif
+#endif
+ sms->addSubsession(MP3AudioFileServerMediaSubsession
+ ::createNew(*env, inputFileName, reuseFirstSource,
+ useADUs, interleaving));
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A WAV audio stream:
+ {
+ char const* streamName = "wavAudioTest";
+ char const* inputFileName = "test.wav";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ // To convert 16-bit PCM data to 8-bit u-law, prior to streaming,
+ // change the following to True:
+ Boolean convertToULaw = False;
+ sms->addSubsession(WAVAudioFileServerMediaSubsession
+ ::createNew(*env, inputFileName, reuseFirstSource, convertToULaw));
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // An AMR audio stream:
+ {
+ char const* streamName = "amrAudioTest";
+ char const* inputFileName = "test.amr";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ sms->addSubsession(AMRAudioFileServerMediaSubsession
+ ::createNew(*env, inputFileName, reuseFirstSource));
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A 'VOB' file (e.g., from an unencrypted DVD):
+ {
+ char const* streamName = "vobTest";
+ char const* inputFileName = "test.vob";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ // Note: VOB files are MPEG-2 Program Stream files, but using AC-3 audio
+ MPEG1or2FileServerDemux* demux
+ = MPEG1or2FileServerDemux::createNew(*env, inputFileName, reuseFirstSource);
+ sms->addSubsession(demux->newVideoServerMediaSubsession(iFramesOnly));
+ sms->addSubsession(demux->newAC3AudioServerMediaSubsession());
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A MPEG-2 Transport Stream:
+ {
+ char const* streamName = "mpeg2TransportStreamTest";
+ char const* inputFileName = "test.ts";
+ char const* indexFileName = "test.tsx";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ sms->addSubsession(MPEG2TransportFileServerMediaSubsession
+ ::createNew(*env, inputFileName, indexFileName, reuseFirstSource));
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // An AAC audio stream (ADTS-format file):
+ {
+ char const* streamName = "aacAudioTest";
+ char const* inputFileName = "test.aac";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ sms->addSubsession(ADTSAudioFileServerMediaSubsession
+ ::createNew(*env, inputFileName, reuseFirstSource));
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A DV video stream:
+ {
+ // First, make sure that the RTPSinks' buffers will be large enough to handle the huge size of DV frames (as big as 288000).
+ OutPacketBuffer::maxSize = 300000;
+
+ char const* streamName = "dvVideoTest";
+ char const* inputFileName = "test.dv";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ sms->addSubsession(DVVideoFileServerMediaSubsession
+ ::createNew(*env, inputFileName, reuseFirstSource));
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A AC3 video elementary stream:
+ {
+ char const* streamName = "ac3AudioTest";
+ char const* inputFileName = "test.ac3";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+
+ sms->addSubsession(AC3AudioFileServerMediaSubsession
+ ::createNew(*env, inputFileName, reuseFirstSource));
+
+ rtspServer->addServerMediaSession(sms);
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A Matroska ('.mkv') file, with video+audio+subtitle streams:
+ {
+ char const* streamName = "matroskaFileTest";
+ char const* inputFileName = "test.mkv";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+
+ newDemuxWatchVariable = 0;
+ MatroskaFileServerDemux::createNew(*env, inputFileName, onMatroskaDemuxCreation, NULL);
+ env->taskScheduler().doEventLoop(&newDemuxWatchVariable);
+
+ Boolean sessionHasTracks = False;
+ ServerMediaSubsession* smss;
+ while ((smss = matroskaDemux->newServerMediaSubsession()) != NULL) {
+ sms->addSubsession(smss);
+ sessionHasTracks = True;
+ }
+ if (sessionHasTracks) {
+ rtspServer->addServerMediaSession(sms);
+ }
+ // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server.
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A WebM ('.webm') file, with video(VP8)+audio(Vorbis) streams:
+ // (Note: ".webm' files are special types of Matroska files, so we use the same code as the Matroska ('.mkv') file code above.)
+ {
+ char const* streamName = "webmFileTest";
+ char const* inputFileName = "test.webm";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+
+ newDemuxWatchVariable = 0;
+ MatroskaFileServerDemux::createNew(*env, inputFileName, onMatroskaDemuxCreation, NULL);
+ env->taskScheduler().doEventLoop(&newDemuxWatchVariable);
+
+ Boolean sessionHasTracks = False;
+ ServerMediaSubsession* smss;
+ while ((smss = matroskaDemux->newServerMediaSubsession()) != NULL) {
+ sms->addSubsession(smss);
+ sessionHasTracks = True;
+ }
+ if (sessionHasTracks) {
+ rtspServer->addServerMediaSession(sms);
+ }
+ // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server.
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // An Ogg ('.ogg') file, with video and/or audio streams:
+ {
+ char const* streamName = "oggFileTest";
+ char const* inputFileName = "test.ogg";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+
+ newDemuxWatchVariable = 0;
+ OggFileServerDemux::createNew(*env, inputFileName, onOggDemuxCreation, NULL);
+ env->taskScheduler().doEventLoop(&newDemuxWatchVariable);
+
+ Boolean sessionHasTracks = False;
+ ServerMediaSubsession* smss;
+ while ((smss = oggDemux->newServerMediaSubsession()) != NULL) {
+ sms->addSubsession(smss);
+ sessionHasTracks = True;
+ }
+ if (sessionHasTracks) {
+ rtspServer->addServerMediaSession(sms);
+ }
+ // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server.
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // An Opus ('.opus') audio file:
+ // (Note: ".opus' files are special types of Ogg files, so we use the same code as the Ogg ('.ogg') file code above.)
+ {
+ char const* streamName = "opusFileTest";
+ char const* inputFileName = "test.opus";
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+
+ newDemuxWatchVariable = 0;
+ OggFileServerDemux::createNew(*env, inputFileName, onOggDemuxCreation, NULL);
+ env->taskScheduler().doEventLoop(&newDemuxWatchVariable);
+
+ Boolean sessionHasTracks = False;
+ ServerMediaSubsession* smss;
+ while ((smss = oggDemux->newServerMediaSubsession()) != NULL) {
+ sms->addSubsession(smss);
+ sessionHasTracks = True;
+ }
+ if (sessionHasTracks) {
+ rtspServer->addServerMediaSession(sms);
+ }
+ // otherwise, because the stream has no tracks, we don't add a ServerMediaSession to the server.
+
+ announceStream(rtspServer, sms, streamName, inputFileName);
+ }
+
+ // A MPEG-2 Transport Stream, coming from a live UDP (raw-UDP or RTP/UDP) source:
+ {
+ char const* streamName = "mpeg2TransportStreamFromUDPSourceTest";
+ char const* inputAddressStr = "239.255.42.42";
+ // This causes the server to take its input from the stream sent by the "testMPEG2TransportStreamer" demo application.
+ // (Note: If the input UDP source is unicast rather than multicast, then change this to NULL.)
+ portNumBits const inputPortNum = 1234;
+ // This causes the server to take its input from the stream sent by the "testMPEG2TransportStreamer" demo application.
+ Boolean const inputStreamIsRawUDP = False;
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, streamName, streamName,
+ descriptionString);
+ sms->addSubsession(MPEG2TransportUDPServerMediaSubsession
+ ::createNew(*env, inputAddressStr, inputPortNum, inputStreamIsRawUDP));
+ rtspServer->addServerMediaSession(sms);
+
+ char* url = rtspServer->rtspURL(sms);
+ *env << "\n\"" << streamName << "\" stream, from a UDP Transport Stream input source \n\t(";
+ if (inputAddressStr != NULL) {
+ *env << "IP multicast address " << inputAddressStr << ",";
+ } else {
+ *env << "unicast;";
+ }
+ *env << " port " << inputPortNum << ")\n";
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+ }
+
+ // Also, attempt to create a HTTP server for RTSP-over-HTTP tunneling.
+ // Try first with the default HTTP port (80), and then with the alternative HTTP
+ // port numbers (8000 and 8080).
+
+ if (rtspServer->setUpTunnelingOverHTTP(80) || rtspServer->setUpTunnelingOverHTTP(8000) || rtspServer->setUpTunnelingOverHTTP(8080)) {
+ *env << "\n(We use port " << rtspServer->httpServerPortNum() << " for optional RTSP-over-HTTP tunneling.)\n";
+ } else {
+ *env << "\n(RTSP-over-HTTP tunneling is not available.)\n";
+ }
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+static void announceStream(RTSPServer* rtspServer, ServerMediaSession* sms,
+ char const* streamName, char const* inputFileName) {
+ char* url = rtspServer->rtspURL(sms);
+ UsageEnvironment& env = rtspServer->envir();
+ env << "\n\"" << streamName << "\" stream, from the file \""
+ << inputFileName << "\"\n";
+ env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+}
diff --git a/testProgs/testRTSPClient.cpp b/testProgs/testRTSPClient.cpp
new file mode 100644
index 0000000..72e6f81
--- /dev/null
+++ b/testProgs/testRTSPClient.cpp
@@ -0,0 +1,537 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A demo application, showing how to create and run a RTSP client (that can potentially receive multiple streams concurrently).
+//
+// NOTE: This code - although it builds a running application - is intended only to illustrate how to develop your own RTSP
+// client application. For a full-featured RTSP client application - with much more functionality, and many options - see
+// "openRTSP": http://www.live555.com/openRTSP/
+
+#include "liveMedia.hh"
+#include "BasicUsageEnvironment.hh"
+
+// Forward function definitions:
+
+// RTSP 'response handlers':
+void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString);
+void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString);
+void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString);
+
+// Other event handler functions:
+void subsessionAfterPlaying(void* clientData); // called when a stream's subsession (e.g., audio or video substream) ends
+void subsessionByeHandler(void* clientData, char const* reason);
+ // called when a RTCP "BYE" is received for a subsession
+void streamTimerHandler(void* clientData);
+ // called at the end of a stream's expected duration (if the stream has not already signaled its end using a RTCP "BYE")
+
+// The main streaming routine (for each "rtsp://" URL):
+void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL);
+
+// Used to iterate through each stream's 'subsessions', setting up each one:
+void setupNextSubsession(RTSPClient* rtspClient);
+
+// Used to shut down and close a stream (including its "RTSPClient" object):
+void shutdownStream(RTSPClient* rtspClient, int exitCode = 1);
+
+// A function that outputs a string that identifies each stream (for debugging output). Modify this if you wish:
+UsageEnvironment& operator<<(UsageEnvironment& env, const RTSPClient& rtspClient) {
+ return env << "[URL:\"" << rtspClient.url() << "\"]: ";
+}
+
+// A function that outputs a string that identifies each subsession (for debugging output). Modify this if you wish:
+UsageEnvironment& operator<<(UsageEnvironment& env, const MediaSubsession& subsession) {
+ return env << subsession.mediumName() << "/" << subsession.codecName();
+}
+
+void usage(UsageEnvironment& env, char const* progName) {
+ env << "Usage: " << progName << " <rtsp-url-1> ... <rtsp-url-N>\n";
+ env << "\t(where each <rtsp-url-i> is a \"rtsp://\" URL)\n";
+}
+
+char eventLoopWatchVariable = 0;
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ UsageEnvironment* env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // We need at least one "rtsp://" URL argument:
+ if (argc < 2) {
+ usage(*env, argv[0]);
+ return 1;
+ }
+
+ // There are argc-1 URLs: argv[1] through argv[argc-1]. Open and start streaming each one:
+ for (int i = 1; i <= argc-1; ++i) {
+ openURL(*env, argv[0], argv[i]);
+ }
+
+ // All subsequent activity takes place within the event loop:
+ env->taskScheduler().doEventLoop(&eventLoopWatchVariable);
+ // This function call does not return, unless, at some point in time, "eventLoopWatchVariable" gets set to something non-zero.
+
+ return 0;
+
+ // If you choose to continue the application past this point (i.e., if you comment out the "return 0;" statement above),
+ // and if you don't intend to do anything more with the "TaskScheduler" and "UsageEnvironment" objects,
+ // then you can also reclaim the (small) memory used by these objects by uncommenting the following code:
+ /*
+ env->reclaim(); env = NULL;
+ delete scheduler; scheduler = NULL;
+ */
+}
+
+// Define a class to hold per-stream state that we maintain throughout each stream's lifetime:
+
+class StreamClientState {
+public:
+ StreamClientState();
+ virtual ~StreamClientState();
+
+public:
+ MediaSubsessionIterator* iter;
+ MediaSession* session;
+ MediaSubsession* subsession;
+ TaskToken streamTimerTask;
+ double duration;
+};
+
+// If you're streaming just a single stream (i.e., just from a single URL, once), then you can define and use just a single
+// "StreamClientState" structure, as a global variable in your application. However, because - in this demo application - we're
+// showing how to play multiple streams, concurrently, we can't do that. Instead, we have to have a separate "StreamClientState"
+// structure for each "RTSPClient". To do this, we subclass "RTSPClient", and add a "StreamClientState" field to the subclass:
+
+class ourRTSPClient: public RTSPClient {
+public:
+ static ourRTSPClient* createNew(UsageEnvironment& env, char const* rtspURL,
+ int verbosityLevel = 0,
+ char const* applicationName = NULL,
+ portNumBits tunnelOverHTTPPortNum = 0);
+
+protected:
+ ourRTSPClient(UsageEnvironment& env, char const* rtspURL,
+ int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum);
+ // called only by createNew();
+ virtual ~ourRTSPClient();
+
+public:
+ StreamClientState scs;
+};
+
+// Define a data sink (a subclass of "MediaSink") to receive the data for each subsession (i.e., each audio or video 'substream').
+// In practice, this might be a class (or a chain of classes) that decodes and then renders the incoming audio or video.
+// Or it might be a "FileSink", for outputting the received data into a file (as is done by the "openRTSP" application).
+// In this example code, however, we define a simple 'dummy' sink that receives incoming data, but does nothing with it.
+
+class DummySink: public MediaSink {
+public:
+ static DummySink* createNew(UsageEnvironment& env,
+ MediaSubsession& subsession, // identifies the kind of data that's being received
+ char const* streamId = NULL); // identifies the stream itself (optional)
+
+private:
+ DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId);
+ // called only by "createNew()"
+ virtual ~DummySink();
+
+ static void afterGettingFrame(void* clientData, unsigned frameSize,
+ unsigned numTruncatedBytes,
+ struct timeval presentationTime,
+ unsigned durationInMicroseconds);
+ void afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime, unsigned durationInMicroseconds);
+
+private:
+ // redefined virtual functions:
+ virtual Boolean continuePlaying();
+
+private:
+ u_int8_t* fReceiveBuffer;
+ MediaSubsession& fSubsession;
+ char* fStreamId;
+};
+
+#define RTSP_CLIENT_VERBOSITY_LEVEL 1 // by default, print verbose output from each "RTSPClient"
+
+static unsigned rtspClientCount = 0; // Counts how many streams (i.e., "RTSPClient"s) are currently in use.
+
+void openURL(UsageEnvironment& env, char const* progName, char const* rtspURL) {
+ // Begin by creating a "RTSPClient" object. Note that there is a separate "RTSPClient" object for each stream that we wish
+ // to receive (even if more than stream uses the same "rtsp://" URL).
+ RTSPClient* rtspClient = ourRTSPClient::createNew(env, rtspURL, RTSP_CLIENT_VERBOSITY_LEVEL, progName);
+ if (rtspClient == NULL) {
+ env << "Failed to create a RTSP client for URL \"" << rtspURL << "\": " << env.getResultMsg() << "\n";
+ return;
+ }
+
+ ++rtspClientCount;
+
+ // Next, send a RTSP "DESCRIBE" command, to get a SDP description for the stream.
+ // Note that this command - like all RTSP commands - is sent asynchronously; we do not block, waiting for a response.
+ // Instead, the following function call returns immediately, and we handle the RTSP response later, from within the event loop:
+ rtspClient->sendDescribeCommand(continueAfterDESCRIBE);
+}
+
+
+// Implementation of the RTSP 'response handlers':
+
+void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ do {
+ UsageEnvironment& env = rtspClient->envir(); // alias
+ StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
+
+ if (resultCode != 0) {
+ env << *rtspClient << "Failed to get a SDP description: " << resultString << "\n";
+ delete[] resultString;
+ break;
+ }
+
+ char* const sdpDescription = resultString;
+ env << *rtspClient << "Got a SDP description:\n" << sdpDescription << "\n";
+
+ // Create a media session object from this SDP description:
+ scs.session = MediaSession::createNew(env, sdpDescription);
+ delete[] sdpDescription; // because we don't need it anymore
+ if (scs.session == NULL) {
+ env << *rtspClient << "Failed to create a MediaSession object from the SDP description: " << env.getResultMsg() << "\n";
+ break;
+ } else if (!scs.session->hasSubsessions()) {
+ env << *rtspClient << "This session has no media subsessions (i.e., no \"m=\" lines)\n";
+ break;
+ }
+
+ // Then, create and set up our data source objects for the session. We do this by iterating over the session's 'subsessions',
+ // calling "MediaSubsession::initiate()", and then sending a RTSP "SETUP" command, on each one.
+ // (Each 'subsession' will have its own data source.)
+ scs.iter = new MediaSubsessionIterator(*scs.session);
+ setupNextSubsession(rtspClient);
+ return;
+ } while (0);
+
+ // An unrecoverable error occurred with this stream.
+ shutdownStream(rtspClient);
+}
+
+// By default, we request that the server stream its data using RTP/UDP.
+// If, instead, you want to request that the server stream via RTP-over-TCP, change the following to True:
+#define REQUEST_STREAMING_OVER_TCP False
+
+void setupNextSubsession(RTSPClient* rtspClient) {
+ UsageEnvironment& env = rtspClient->envir(); // alias
+ StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
+
+ scs.subsession = scs.iter->next();
+ if (scs.subsession != NULL) {
+ if (!scs.subsession->initiate()) {
+ env << *rtspClient << "Failed to initiate the \"" << *scs.subsession << "\" subsession: " << env.getResultMsg() << "\n";
+ setupNextSubsession(rtspClient); // give up on this subsession; go to the next one
+ } else {
+ env << *rtspClient << "Initiated the \"" << *scs.subsession << "\" subsession (";
+ if (scs.subsession->rtcpIsMuxed()) {
+ env << "client port " << scs.subsession->clientPortNum();
+ } else {
+ env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
+ }
+ env << ")\n";
+
+ // Continue setting up this subsession, by sending a RTSP "SETUP" command:
+ rtspClient->sendSetupCommand(*scs.subsession, continueAfterSETUP, False, REQUEST_STREAMING_OVER_TCP);
+ }
+ return;
+ }
+
+ // We've finished setting up all of the subsessions. Now, send a RTSP "PLAY" command to start the streaming:
+ if (scs.session->absStartTime() != NULL) {
+ // Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command:
+ rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY, scs.session->absStartTime(), scs.session->absEndTime());
+ } else {
+ scs.duration = scs.session->playEndTime() - scs.session->playStartTime();
+ rtspClient->sendPlayCommand(*scs.session, continueAfterPLAY);
+ }
+}
+
+void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ do {
+ UsageEnvironment& env = rtspClient->envir(); // alias
+ StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
+
+ if (resultCode != 0) {
+ env << *rtspClient << "Failed to set up the \"" << *scs.subsession << "\" subsession: " << resultString << "\n";
+ break;
+ }
+
+ env << *rtspClient << "Set up the \"" << *scs.subsession << "\" subsession (";
+ if (scs.subsession->rtcpIsMuxed()) {
+ env << "client port " << scs.subsession->clientPortNum();
+ } else {
+ env << "client ports " << scs.subsession->clientPortNum() << "-" << scs.subsession->clientPortNum()+1;
+ }
+ env << ")\n";
+
+ // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it.
+ // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later,
+ // after we've sent a RTSP "PLAY" command.)
+
+ scs.subsession->sink = DummySink::createNew(env, *scs.subsession, rtspClient->url());
+ // perhaps use your own custom "MediaSink" subclass instead
+ if (scs.subsession->sink == NULL) {
+ env << *rtspClient << "Failed to create a data sink for the \"" << *scs.subsession
+ << "\" subsession: " << env.getResultMsg() << "\n";
+ break;
+ }
+
+ env << *rtspClient << "Created a data sink for the \"" << *scs.subsession << "\" subsession\n";
+ scs.subsession->miscPtr = rtspClient; // a hack to let subsession handler functions get the "RTSPClient" from the subsession
+ scs.subsession->sink->startPlaying(*(scs.subsession->readSource()),
+ subsessionAfterPlaying, scs.subsession);
+ // Also set a handler to be called if a RTCP "BYE" arrives for this subsession:
+ if (scs.subsession->rtcpInstance() != NULL) {
+ scs.subsession->rtcpInstance()->setByeWithReasonHandler(subsessionByeHandler, scs.subsession);
+ }
+ } while (0);
+ delete[] resultString;
+
+ // Set up the next subsession, if any:
+ setupNextSubsession(rtspClient);
+}
+
+void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString) {
+ Boolean success = False;
+
+ do {
+ UsageEnvironment& env = rtspClient->envir(); // alias
+ StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
+
+ if (resultCode != 0) {
+ env << *rtspClient << "Failed to start playing session: " << resultString << "\n";
+ break;
+ }
+
+ // Set a timer to be handled at the end of the stream's expected duration (if the stream does not already signal its end
+ // using a RTCP "BYE"). This is optional. If, instead, you want to keep the stream active - e.g., so you can later
+ // 'seek' back within it and do another RTSP "PLAY" - then you can omit this code.
+ // (Alternatively, if you don't want to receive the entire stream, you could set this timer for some shorter value.)
+ if (scs.duration > 0) {
+ unsigned const delaySlop = 2; // number of seconds extra to delay, after the stream's expected duration. (This is optional.)
+ scs.duration += delaySlop;
+ unsigned uSecsToDelay = (unsigned)(scs.duration*1000000);
+ scs.streamTimerTask = env.taskScheduler().scheduleDelayedTask(uSecsToDelay, (TaskFunc*)streamTimerHandler, rtspClient);
+ }
+
+ env << *rtspClient << "Started playing session";
+ if (scs.duration > 0) {
+ env << " (for up to " << scs.duration << " seconds)";
+ }
+ env << "...\n";
+
+ success = True;
+ } while (0);
+ delete[] resultString;
+
+ if (!success) {
+ // An unrecoverable error occurred with this stream.
+ shutdownStream(rtspClient);
+ }
+}
+
+
+// Implementation of the other event handlers:
+
+void subsessionAfterPlaying(void* clientData) {
+ MediaSubsession* subsession = (MediaSubsession*)clientData;
+ RTSPClient* rtspClient = (RTSPClient*)(subsession->miscPtr);
+
+ // Begin by closing this subsession's stream:
+ Medium::close(subsession->sink);
+ subsession->sink = NULL;
+
+ // Next, check whether *all* subsessions' streams have now been closed:
+ MediaSession& session = subsession->parentSession();
+ MediaSubsessionIterator iter(session);
+ while ((subsession = iter.next()) != NULL) {
+ if (subsession->sink != NULL) return; // this subsession is still active
+ }
+
+ // All subsessions' streams have now been closed, so shutdown the client:
+ shutdownStream(rtspClient);
+}
+
+void subsessionByeHandler(void* clientData, char const* reason) {
+ MediaSubsession* subsession = (MediaSubsession*)clientData;
+ RTSPClient* rtspClient = (RTSPClient*)subsession->miscPtr;
+ UsageEnvironment& env = rtspClient->envir(); // alias
+
+ env << *rtspClient << "Received RTCP \"BYE\"";
+ if (reason != NULL) {
+ env << " (reason:\"" << reason << "\")";
+ delete[] (char*)reason;
+ }
+ env << " on \"" << *subsession << "\" subsession\n";
+
+ // Now act as if the subsession had closed:
+ subsessionAfterPlaying(subsession);
+}
+
+void streamTimerHandler(void* clientData) {
+ ourRTSPClient* rtspClient = (ourRTSPClient*)clientData;
+ StreamClientState& scs = rtspClient->scs; // alias
+
+ scs.streamTimerTask = NULL;
+
+ // Shut down the stream:
+ shutdownStream(rtspClient);
+}
+
+void shutdownStream(RTSPClient* rtspClient, int exitCode) {
+ UsageEnvironment& env = rtspClient->envir(); // alias
+ StreamClientState& scs = ((ourRTSPClient*)rtspClient)->scs; // alias
+
+ // First, check whether any subsessions have still to be closed:
+ if (scs.session != NULL) {
+ Boolean someSubsessionsWereActive = False;
+ MediaSubsessionIterator iter(*scs.session);
+ MediaSubsession* subsession;
+
+ while ((subsession = iter.next()) != NULL) {
+ if (subsession->sink != NULL) {
+ Medium::close(subsession->sink);
+ subsession->sink = NULL;
+
+ if (subsession->rtcpInstance() != NULL) {
+ subsession->rtcpInstance()->setByeHandler(NULL, NULL); // in case the server sends a RTCP "BYE" while handling "TEARDOWN"
+ }
+
+ someSubsessionsWereActive = True;
+ }
+ }
+
+ if (someSubsessionsWereActive) {
+ // Send a RTSP "TEARDOWN" command, to tell the server to shutdown the stream.
+ // Don't bother handling the response to the "TEARDOWN".
+ rtspClient->sendTeardownCommand(*scs.session, NULL);
+ }
+ }
+
+ env << *rtspClient << "Closing the stream.\n";
+ Medium::close(rtspClient);
+ // Note that this will also cause this stream's "StreamClientState" structure to get reclaimed.
+
+ if (--rtspClientCount == 0) {
+ // The final stream has ended, so exit the application now.
+ // (Of course, if you're embedding this code into your own application, you might want to comment this out,
+ // and replace it with "eventLoopWatchVariable = 1;", so that we leave the LIVE555 event loop, and continue running "main()".)
+ exit(exitCode);
+ }
+}
+
+
+// Implementation of "ourRTSPClient":
+
+ourRTSPClient* ourRTSPClient::createNew(UsageEnvironment& env, char const* rtspURL,
+ int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum) {
+ return new ourRTSPClient(env, rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum);
+}
+
+ourRTSPClient::ourRTSPClient(UsageEnvironment& env, char const* rtspURL,
+ int verbosityLevel, char const* applicationName, portNumBits tunnelOverHTTPPortNum)
+ : RTSPClient(env,rtspURL, verbosityLevel, applicationName, tunnelOverHTTPPortNum, -1) {
+}
+
+ourRTSPClient::~ourRTSPClient() {
+}
+
+
+// Implementation of "StreamClientState":
+
+StreamClientState::StreamClientState()
+ : iter(NULL), session(NULL), subsession(NULL), streamTimerTask(NULL), duration(0.0) {
+}
+
+StreamClientState::~StreamClientState() {
+ delete iter;
+ if (session != NULL) {
+ // We also need to delete "session", and unschedule "streamTimerTask" (if set)
+ UsageEnvironment& env = session->envir(); // alias
+
+ env.taskScheduler().unscheduleDelayedTask(streamTimerTask);
+ Medium::close(session);
+ }
+}
+
+
+// Implementation of "DummySink":
+
+// Even though we're not going to be doing anything with the incoming data, we still need to receive it.
+// Define the size of the buffer that we'll use:
+#define DUMMY_SINK_RECEIVE_BUFFER_SIZE 100000
+
+DummySink* DummySink::createNew(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId) {
+ return new DummySink(env, subsession, streamId);
+}
+
+DummySink::DummySink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId)
+ : MediaSink(env),
+ fSubsession(subsession) {
+ fStreamId = strDup(streamId);
+ fReceiveBuffer = new u_int8_t[DUMMY_SINK_RECEIVE_BUFFER_SIZE];
+}
+
+DummySink::~DummySink() {
+ delete[] fReceiveBuffer;
+ delete[] fStreamId;
+}
+
+void DummySink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime, unsigned durationInMicroseconds) {
+ DummySink* sink = (DummySink*)clientData;
+ sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
+}
+
+// If you don't want to see debugging output for each received frame, then comment out the following line:
+#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1
+
+void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
+ struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
+ // We've just received a frame of data. (Optionally) print out information about it:
+#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
+ if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
+ envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
+ if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
+ char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
+ sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
+ envir() << ".\tPresentation time: " << (int)presentationTime.tv_sec << "." << uSecsStr;
+ if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
+ envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
+ }
+#ifdef DEBUG_PRINT_NPT
+ envir() << "\tNPT: " << fSubsession.getNormalPlayTime(presentationTime);
+#endif
+ envir() << "\n";
+#endif
+
+ // Then continue, to request the next frame of data:
+ continuePlaying();
+}
+
+Boolean DummySink::continuePlaying() {
+ if (fSource == NULL) return False; // sanity check (should not happen)
+
+ // Request the next frame of data from our input source. "afterGettingFrame()" will get called later, when it arrives:
+ fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
+ afterGettingFrame, this,
+ onSourceClosure, this);
+ return True;
+}
diff --git a/testProgs/testRelay.cpp b/testProgs/testRelay.cpp
new file mode 100644
index 0000000..2c18093
--- /dev/null
+++ b/testProgs/testRelay.cpp
@@ -0,0 +1,87 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that receives a UDP multicast stream
+// and retransmits it to another (multicast or unicast) address & port
+// main program
+
+#include <liveMedia.hh>
+#include "BasicUsageEnvironment.hh"
+#include "GroupsockHelper.hh"
+
+UsageEnvironment* env;
+
+// To receive a "source-specific multicast" (SSM) stream, uncomment this:
+//#define USE_SSM 1
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create a 'groupsock' for the input multicast group,port:
+ char const* inputAddressStr
+#ifdef USE_SSM
+ = "232.255.42.42";
+#else
+ = "239.255.42.42";
+#endif
+ struct in_addr inputAddress;
+ inputAddress.s_addr = our_inet_addr(inputAddressStr);
+
+ Port const inputPort(8888);
+ unsigned char const inputTTL = 0; // we're only reading from this mcast group
+
+#ifdef USE_SSM
+ char* sourceAddressStr = "aaa.bbb.ccc.ddd";
+ // replace this with the real source address
+ struct in_addr sourceFilterAddress;
+ sourceFilterAddress.s_addr = our_inet_addr(sourceAddressStr);
+
+ Groupsock inputGroupsock(*env, inputAddress, sourceFilterAddress, inputPort);
+#else
+ Groupsock inputGroupsock(*env, inputAddress, inputPort, inputTTL);
+#endif
+
+ // Then create a liveMedia 'source' object, encapsulating this groupsock:
+ FramedSource* source = BasicUDPSource::createNew(*env, &inputGroupsock);
+
+
+ // Create a 'groupsock' for the destination address and port:
+ char const* outputAddressStr = "239.255.43.43"; // this could also be unicast
+ // Note: You may change "outputAddressStr" to use a different multicast
+ // (or unicast address), but do *not* change it to use the same multicast
+ // address as "inputAddressStr".
+ struct in_addr outputAddress;
+ outputAddress.s_addr = our_inet_addr(outputAddressStr);
+
+ Port const outputPort(4444);
+ unsigned char const outputTTL = 255;
+
+ Groupsock outputGroupsock(*env, outputAddress, outputPort, outputTTL);
+
+ // Then create a liveMedia 'sink' object, encapsulating this groupsock:
+ unsigned const maxPacketSize = 65536; // allow for large UDP packets
+ MediaSink* sink = BasicUDPSink::createNew(*env, &outputGroupsock, maxPacketSize);
+
+
+ // Now, start playing, feeding the sink object from the source:
+ sink->startPlaying(*source, NULL, NULL);
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
diff --git a/testProgs/testReplicator.cpp b/testProgs/testReplicator.cpp
new file mode 100644
index 0000000..459a734
--- /dev/null
+++ b/testProgs/testReplicator.cpp
@@ -0,0 +1,114 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A demo application that receives a UDP multicast stream, replicates it (using the "StreamReplicator" class),
+// and retransmits one replica stream to another (multicast or unicast) address & port,
+// and writes the other replica stream to a file.
+//
+// main program
+
+#include <liveMedia.hh>
+#include "BasicUsageEnvironment.hh"
+#include "GroupsockHelper.hh"
+
+UsageEnvironment* env;
+
+// To receive a "source-specific multicast" (SSM) stream, uncomment this:
+//#define USE_SSM 1
+
+void startReplicaUDPSink(StreamReplicator* replicator, char const* outputAddressStr, portNumBits outputPortNum); // forward
+void startReplicaFileSink(StreamReplicator* replicator, char const* outputFileName); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Create a 'groupsock' for the input multicast group,port:
+ char const* inputAddressStr
+#ifdef USE_SSM
+ = "232.255.42.42";
+#else
+ = "239.255.42.42";
+#endif
+ struct in_addr inputAddress;
+ inputAddress.s_addr = our_inet_addr(inputAddressStr);
+
+ Port const inputPort(8888);
+ unsigned char const inputTTL = 0; // we're only reading from this mcast group
+
+#ifdef USE_SSM
+ char* sourceAddressStr = "aaa.bbb.ccc.ddd";
+ // replace this with the real source address
+ struct in_addr sourceFilterAddress;
+ sourceFilterAddress.s_addr = our_inet_addr(sourceAddressStr);
+
+ Groupsock inputGroupsock(*env, inputAddress, sourceFilterAddress, inputPort);
+#else
+ Groupsock inputGroupsock(*env, inputAddress, inputPort, inputTTL);
+#endif
+
+ // Then create a liveMedia 'source' object, encapsulating this groupsock:
+ FramedSource* source = BasicUDPSource::createNew(*env, &inputGroupsock);
+
+ // And feed this into a 'stream replicator':
+ StreamReplicator* replicator = StreamReplicator::createNew(*env, source);
+
+ // Then create a network (UDP) 'sink' object to receive a replica of the input stream, and start it.
+ // If you wish, you can duplicate this line - with different network addresses and ports - to create multiple output UDP streams:
+ startReplicaUDPSink(replicator, "239.255.43.43", 4444);
+
+ // Then create a file 'sink' object to receive a replica of the input stream, and start it.
+ // If you wish, you can duplicate this line - with a different file name - to create multiple output files:
+ startReplicaFileSink(replicator, "test.out");
+
+ // Finally, enter the 'event loop' (which is where most of the 'real work' in a LIVE555-based application gets done):
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void startReplicaUDPSink(StreamReplicator* replicator, char const* outputAddressStr, portNumBits outputPortNum) {
+ // Begin by creating an input stream from our replicator:
+ FramedSource* source = replicator->createStreamReplica();
+
+ // Create a 'groupsock' for the destination address and port:
+ struct in_addr outputAddress;
+ outputAddress.s_addr = our_inet_addr(outputAddressStr);
+
+ Port const outputPort(outputPortNum);
+ unsigned char const outputTTL = 255;
+
+ Groupsock* outputGroupsock = new Groupsock(*env, outputAddress, outputPort, outputTTL);
+
+ // Then create a liveMedia 'sink' object, encapsulating this groupsock:
+ unsigned const maxPacketSize = 65536; // allow for large UDP packets
+ MediaSink* sink = BasicUDPSink::createNew(*env, outputGroupsock, maxPacketSize);
+
+ // Now, start playing, feeding the sink object from the source:
+ sink->startPlaying(*source, NULL, NULL);
+}
+
+void startReplicaFileSink(StreamReplicator* replicator, char const* outputFileName) {
+ // Begin by creating an input stream from our replicator:
+ FramedSource* source = replicator->createStreamReplica();
+
+ // Then create a 'file sink' object to receive thie replica stream:
+ MediaSink* sink = FileSink::createNew(*env, outputFileName);
+
+ // Now, start playing, feeding the sink object from the source:
+ sink->startPlaying(*source, NULL, NULL);
+}
diff --git a/testProgs/testWAVAudioStreamer.cpp b/testProgs/testWAVAudioStreamer.cpp
new file mode 100644
index 0000000..199848e
--- /dev/null
+++ b/testProgs/testWAVAudioStreamer.cpp
@@ -0,0 +1,239 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that streams a WAV audio file via RTP/RTCP
+// main program
+
+#include "liveMedia.hh"
+#include "GroupsockHelper.hh"
+
+#include "BasicUsageEnvironment.hh"
+
+// To convert 16-bit samples to 8-bit u-law ("u" is the Greek letter "mu")
+// encoding, before streaming, uncomment the following line:
+//#define CONVERT_TO_ULAW 1
+
+UsageEnvironment* env;
+
+void play(); // forward
+
+int main(int argc, char** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+ return 0; // only to prevent compiler warnings
+}
+
+char const* inputFileName = "test.wav";
+
+void afterPlaying(void* clientData); // forward
+
+// A structure to hold the state of the current session.
+// It is used in the "afterPlaying()" function to clean up the session.
+struct sessionState_t {
+ FramedSource* source;
+ RTPSink* sink;
+ RTCPInstance* rtcpInstance;
+ Groupsock* rtpGroupsock;
+ Groupsock* rtcpGroupsock;
+ RTSPServer* rtspServer;
+} sessionState;
+
+void play() {
+ // Open the file as a 'WAV' file:
+ WAVAudioFileSource* wavSource = WAVAudioFileSource::createNew(*env, inputFileName);
+ if (wavSource == NULL) {
+ *env << "Unable to open file \"" << inputFileName
+ << "\" as a WAV audio file source: "
+ << env->getResultMsg() << "\n";
+ exit(1);
+ }
+
+ // Get attributes of the audio source:
+ unsigned char audioFormat = wavSource->getAudioFormat();
+ unsigned char const bitsPerSample = wavSource->bitsPerSample();
+ // We handle only 4,8,16,20,24 bits-per-sample audio:
+ if (bitsPerSample%4 != 0 || bitsPerSample < 4 || bitsPerSample > 24 || bitsPerSample == 12) {
+ *env << "The input file contains " << bitsPerSample << " bit-per-sample audio, which we don't handle\n";
+ exit(1);
+ }
+ unsigned const samplingFrequency = wavSource->samplingFrequency();
+ unsigned char const numChannels = wavSource->numChannels();
+ unsigned bitsPerSecond = samplingFrequency*bitsPerSample*numChannels;
+ *env << "Audio source parameters:\n\t" << samplingFrequency << " Hz, ";
+ *env << bitsPerSample << " bits-per-sample, ";
+ *env << numChannels << " channels => ";
+ *env << bitsPerSecond << " bits-per-second\n";
+
+ char const* mimeType;
+ unsigned char payloadFormatCode = 96; // by default, unless a static RTP payload type can be used
+
+ // Add in any filter necessary to transform the data prior to streaming.
+ // (This is where any audio compression would get added.)
+ sessionState.source = wavSource; // by default
+ if (audioFormat == WA_PCM) {
+ if (bitsPerSample == 16) {
+ // Note that samples in the WAV audio file are in little-endian order.
+#ifdef CONVERT_TO_ULAW
+ // Add a filter that converts from raw 16-bit PCM audio (in little-endian order) to 8-bit u-law audio:
+ sessionState.source = uLawFromPCMAudioSource::createNew(*env, wavSource, 1/*little-endian*/);
+ if (sessionState.source == NULL) {
+ *env << "Unable to create a u-law filter from the PCM audio source: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ bitsPerSecond /= 2;
+ *env << "Converting to 8-bit u-law audio for streaming => " << bitsPerSecond << " bits-per-second\n";
+ mimeType = "PCMU";
+ if (samplingFrequency == 8000 && numChannels == 1) {
+ payloadFormatCode = 0; // a static RTP payload type
+ }
+#else
+ // Add a filter that converts from little-endian to network (big-endian) order:
+ sessionState.source = EndianSwap16::createNew(*env, wavSource);
+ if (sessionState.source == NULL) {
+ *env << "Unable to create a little->bit-endian order filter from the PCM audio source: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ *env << "Converting to network byte order for streaming\n";
+ mimeType = "L16";
+ if (samplingFrequency == 44100 && numChannels == 2) {
+ payloadFormatCode = 10; // a static RTP payload type
+ } else if (samplingFrequency == 44100 && numChannels == 1) {
+ payloadFormatCode = 11; // a static RTP payload type
+ }
+#endif
+ } else if (bitsPerSample == 20 || bitsPerSample == 24) {
+ // Add a filter that converts from little-endian to network (big-endian) order:
+ sessionState.source = EndianSwap24::createNew(*env, wavSource);
+ if (sessionState.source == NULL) {
+ *env << "Unable to create a little->bit-endian order filter from the PCM audio source: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ *env << "Converting to network byte order for streaming\n";
+ mimeType = bitsPerSample == 20 ? "L20" : "L24";
+ } else { // bitsPerSample == 8 (we assume that bitsPerSample == 4 is only for WA_IMA_ADPCM)
+ // Don't do any transformation; send the 8-bit PCM data 'as is':
+ mimeType = "L8";
+ }
+ } else if (audioFormat == WA_PCMU) {
+ mimeType = "PCMU";
+ if (samplingFrequency == 8000 && numChannels == 1) {
+ payloadFormatCode = 0; // a static RTP payload type
+ }
+ } else if (audioFormat == WA_PCMA) {
+ mimeType = "PCMA";
+ if (samplingFrequency == 8000 && numChannels == 1) {
+ payloadFormatCode = 8; // a static RTP payload type
+ }
+ } else if (audioFormat == WA_IMA_ADPCM) {
+ mimeType = "DVI4";
+ // Use a static payload type, if one is defined:
+ if (numChannels == 1) {
+ if (samplingFrequency == 8000) {
+ payloadFormatCode = 5; // a static RTP payload type
+ } else if (samplingFrequency == 16000) {
+ payloadFormatCode = 6; // a static RTP payload type
+ } else if (samplingFrequency == 11025) {
+ payloadFormatCode = 16; // a static RTP payload type
+ } else if (samplingFrequency == 22050) {
+ payloadFormatCode = 17; // a static RTP payload type
+ }
+ }
+ } else { //unknown format
+ *env << "Unknown audio format code \"" << audioFormat << "\" in WAV file header\n";
+ exit(1);
+ }
+
+ // Create 'groupsocks' for RTP and RTCP:
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
+ // Note: This is a multicast address. If you wish instead to stream
+ // using unicast, then you should use the "testOnDemandRTSPServer" demo application,
+ // or the "LIVE555 Media Server" - not this application - as a model.
+
+ const unsigned short rtpPortNum = 2222;
+ const unsigned short rtcpPortNum = rtpPortNum+1;
+ const unsigned char ttl = 255;
+
+ const Port rtpPort(rtpPortNum);
+ const Port rtcpPort(rtcpPortNum);
+
+ sessionState.rtpGroupsock
+ = new Groupsock(*env, destinationAddress, rtpPort, ttl);
+ sessionState.rtpGroupsock->multicastSendOnly(); // we're a SSM source
+ sessionState.rtcpGroupsock
+ = new Groupsock(*env, destinationAddress, rtcpPort, ttl);
+ sessionState.rtcpGroupsock->multicastSendOnly(); // we're a SSM source
+
+ // Create an appropriate audio RTP sink (using "SimpleRTPSink") from the RTP 'groupsock':
+ sessionState.sink
+ = SimpleRTPSink::createNew(*env, sessionState.rtpGroupsock,
+ payloadFormatCode, samplingFrequency,
+ "audio", mimeType, numChannels);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ const unsigned estimatedSessionBandwidth = (bitsPerSecond + 500)/1000; // in kbps; for RTCP b/w share
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+ sessionState.rtcpInstance
+ = RTCPInstance::createNew(*env, sessionState.rtcpGroupsock,
+ estimatedSessionBandwidth, CNAME,
+ sessionState.sink, NULL /* we're a server */,
+ True /* we're a SSM source*/);
+ // Note: This starts RTCP running automatically
+
+ // Create and start a RTSP server to serve this stream:
+ sessionState.rtspServer = RTSPServer::createNew(*env, 8554);
+ if (sessionState.rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "testStream", inputFileName,
+ "Session streamed by \"testWAVAudiotreamer\"", True/*SSM*/);
+ sms->addSubsession(PassiveServerMediaSubsession::createNew(*sessionState.sink, sessionState.rtcpInstance));
+ sessionState.rtspServer->addServerMediaSession(sms);
+
+ char* url = sessionState.rtspServer->rtspURL(sms);
+ *env << "Play this stream using the URL \"" << url << "\"\n";
+ delete[] url;
+
+ // Finally, start the streaming:
+ *env << "Beginning streaming...\n";
+ sessionState.sink->startPlaying(*sessionState.source, afterPlaying, NULL);
+}
+
+
+void afterPlaying(void* /*clientData*/) {
+ *env << "...done streaming\n";
+
+ // End by closing the media:
+ Medium::close(sessionState.rtspServer);
+ Medium::close(sessionState.rtcpInstance);
+ Medium::close(sessionState.sink);
+ delete sessionState.rtpGroupsock;
+ Medium::close(sessionState.source);
+ delete sessionState.rtcpGroupsock;
+
+ // We're done:
+ exit(0);
+}
diff --git a/testProgs/vobStreamer.cpp b/testProgs/vobStreamer.cpp
new file mode 100644
index 0000000..24975f9
--- /dev/null
+++ b/testProgs/vobStreamer.cpp
@@ -0,0 +1,299 @@
+/**********
+This library is free software; you can redistribute it and/or modify it under
+the terms of the GNU Lesser General Public License as published by the
+Free Software Foundation; either version 3 of the License, or (at your
+option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.)
+
+This library is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+**********/
+// Copyright (c) 1996-2020, Live Networks, Inc. All rights reserved
+// A test program that reads a VOB file
+// splits it into Audio (AC3) and Video (MPEG) Elementary Streams,
+// and streams both using RTP.
+// main program
+
+#include "liveMedia.hh"
+#include "AC3AudioStreamFramer.hh"
+#include "BasicUsageEnvironment.hh"
+#include "GroupsockHelper.hh"
+
+char const* programName;
+// Whether to stream *only* "I" (key) frames
+// (e.g., to reduce network bandwidth):
+Boolean iFramesOnly = False;
+
+unsigned const VOB_AUDIO = 1<<0;
+unsigned const VOB_VIDEO = 1<<1;
+unsigned mediaToStream = VOB_AUDIO|VOB_VIDEO; // by default
+
+char const** inputFileNames;
+char const** curInputFileName;
+Boolean haveReadOneFile = False;
+
+UsageEnvironment* env;
+MPEG1or2Demux* mpegDemux;
+AC3AudioStreamFramer* audioSource = NULL;
+FramedSource* videoSource = NULL;
+RTPSink* audioSink = NULL;
+RTCPInstance* audioRTCP = NULL;
+RTPSink* videoSink = NULL;
+RTCPInstance* videoRTCP = NULL;
+RTSPServer* rtspServer = NULL;
+unsigned short const defaultRTSPServerPortNum = 554;
+unsigned short rtspServerPortNum = defaultRTSPServerPortNum;
+
+Groupsock* rtpGroupsockAudio;
+Groupsock* rtcpGroupsockAudio;
+Groupsock* rtpGroupsockVideo;
+Groupsock* rtcpGroupsockVideo;
+
+void usage() {
+ *env << "usage: " << programName << " [-i] [-a|-v] "
+ "[-p <RTSP-server-port-number>] "
+ "<VOB-file>...<VOB-file>\n";
+ exit(1);
+}
+
+void play(); // forward
+
+int main(int argc, char const** argv) {
+ // Begin by setting up our usage environment:
+ TaskScheduler* scheduler = BasicTaskScheduler::createNew();
+ env = BasicUsageEnvironment::createNew(*scheduler);
+
+ // Parse command-line options:
+ // (Unfortunately we can't use getopt() here; Windoze doesn't have it)
+ programName = argv[0];
+ while (argc > 2) {
+ char const* const opt = argv[1];
+ if (opt[0] != '-') break;
+ switch (opt[1]) {
+
+ case 'i': { // transmit video I-frames only
+ iFramesOnly = True;
+ break;
+ }
+
+ case 'a': { // transmit audio, but not video
+ mediaToStream &=~ VOB_VIDEO;
+ break;
+ }
+
+ case 'v': { // transmit video, but not audio
+ mediaToStream &=~ VOB_AUDIO;
+ break;
+ }
+
+ case 'p': { // specify port number for built-in RTSP server
+ int portArg;
+ if (sscanf(argv[2], "%d", &portArg) != 1) {
+ usage();
+ }
+ if (portArg <= 0 || portArg >= 65536) {
+ *env << "bad port number: " << portArg
+ << " (must be in the range (0,65536))\n";
+ usage();
+ }
+ rtspServerPortNum = (unsigned short)portArg;
+ ++argv; --argc;
+ break;
+ }
+
+ default: {
+ usage();
+ break;
+ }
+ }
+
+ ++argv; --argc;
+ }
+ if (argc < 2) usage();
+ if (mediaToStream == 0) {
+ *env << "The -a and -v flags cannot both be used!\n";
+ usage();
+ }
+ if (iFramesOnly && (mediaToStream&VOB_VIDEO) == 0) {
+ *env << "Warning: Because we're not streaming video, the -i flag has no effect.\n";
+ }
+
+ inputFileNames = &argv[1];
+ curInputFileName = inputFileNames;
+
+ // Create 'groupsocks' for RTP and RTCP:
+ struct in_addr destinationAddress;
+ destinationAddress.s_addr = chooseRandomIPv4SSMAddress(*env);
+
+ const unsigned short rtpPortNumAudio = 4444;
+ const unsigned short rtcpPortNumAudio = rtpPortNumAudio+1;
+ const unsigned short rtpPortNumVideo = 8888;
+ const unsigned short rtcpPortNumVideo = rtpPortNumVideo+1;
+ const unsigned char ttl = 255;
+
+ const Port rtpPortAudio(rtpPortNumAudio);
+ const Port rtcpPortAudio(rtcpPortNumAudio);
+ const Port rtpPortVideo(rtpPortNumVideo);
+ const Port rtcpPortVideo(rtcpPortNumVideo);
+
+ const unsigned maxCNAMElen = 100;
+ unsigned char CNAME[maxCNAMElen+1];
+ gethostname((char*)CNAME, maxCNAMElen);
+ CNAME[maxCNAMElen] = '\0'; // just in case
+
+ if (mediaToStream&VOB_AUDIO) {
+ rtpGroupsockAudio
+ = new Groupsock(*env, destinationAddress, rtpPortAudio, ttl);
+ rtpGroupsockAudio->multicastSendOnly(); // because we're a SSM source
+
+ // Create an 'AC3 Audio RTP' sink from the RTP 'groupsock':
+ audioSink
+ = AC3AudioRTPSink::createNew(*env, rtpGroupsockAudio, 96, 0);
+ // set the RTP timestamp frequency 'for real' later
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ rtcpGroupsockAudio
+ = new Groupsock(*env, destinationAddress, rtcpPortAudio, ttl);
+ rtcpGroupsockAudio->multicastSendOnly(); // because we're a SSM source
+ const unsigned estimatedSessionBandwidthAudio
+ = 160; // in kbps; for RTCP b/w share
+ audioRTCP = RTCPInstance::createNew(*env, rtcpGroupsockAudio,
+ estimatedSessionBandwidthAudio, CNAME,
+ audioSink, NULL /* we're a server */,
+ True /* we're a SSM source */);
+ // Note: This starts RTCP running automatically
+ }
+
+ if (mediaToStream&VOB_VIDEO) {
+ rtpGroupsockVideo
+ = new Groupsock(*env, destinationAddress, rtpPortVideo, ttl);
+ rtpGroupsockVideo->multicastSendOnly(); // because we're a SSM source
+
+ // Create a 'MPEG Video RTP' sink from the RTP 'groupsock':
+ videoSink = MPEG1or2VideoRTPSink::createNew(*env, rtpGroupsockVideo);
+
+ // Create (and start) a 'RTCP instance' for this RTP sink:
+ rtcpGroupsockVideo
+ = new Groupsock(*env, destinationAddress, rtcpPortVideo, ttl);
+ rtcpGroupsockVideo->multicastSendOnly(); // because we're a SSM source
+ const unsigned estimatedSessionBandwidthVideo
+ = 4500; // in kbps; for RTCP b/w share
+ videoRTCP = RTCPInstance::createNew(*env, rtcpGroupsockVideo,
+ estimatedSessionBandwidthVideo, CNAME,
+ videoSink, NULL /* we're a server */,
+ True /* we're a SSM source */);
+ // Note: This starts RTCP running automatically
+ }
+
+ if (rtspServer == NULL) {
+ rtspServer = RTSPServer::createNew(*env, rtspServerPortNum);
+ if (rtspServer == NULL) {
+ *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
+ *env << "To change the RTSP server's port number, use the \"-p <port number>\" option.\n";
+ exit(1);
+ }
+ ServerMediaSession* sms
+ = ServerMediaSession::createNew(*env, "vobStream", *curInputFileName,
+ "Session streamed by \"vobStreamer\"", True /*SSM*/);
+ if (audioSink != NULL) sms->addSubsession(PassiveServerMediaSubsession::createNew(*audioSink, audioRTCP));
+ if (videoSink != NULL) sms->addSubsession(PassiveServerMediaSubsession::createNew(*videoSink, videoRTCP));
+ rtspServer->addServerMediaSession(sms);
+
+ *env << "Created RTSP server.\n";
+
+ // Display our "rtsp://" URL, for clients to connect to:
+ char* url = rtspServer->rtspURL(sms);
+ *env << "Access this stream using the URL:\n\t" << url << "\n";
+ delete[] url;
+ }
+
+ // Finally, start the streaming:
+ *env << "Beginning streaming...\n";
+ play();
+
+ env->taskScheduler().doEventLoop(); // does not return
+
+ return 0; // only to prevent compiler warning
+}
+
+void afterPlaying(void* clientData) {
+ // One of the sinks has ended playing.
+ // Check whether any of the sources have a pending read. If so,
+ // wait until its sink ends playing also:
+ if ((audioSource != NULL && audioSource->isCurrentlyAwaitingData()) ||
+ (videoSource != NULL && videoSource->isCurrentlyAwaitingData())) {
+ return;
+ }
+
+ // Now that both sinks have ended, close both input sources,
+ // and start playing again:
+ *env << "...done reading from file\n";
+
+ if (audioSink != NULL) audioSink->stopPlaying();
+ if (videoSink != NULL) videoSink->stopPlaying();
+ // ensures that both are shut down
+ Medium::close(audioSource);
+ Medium::close(videoSource);
+ Medium::close(mpegDemux);
+ // Note: This also closes the input file that this source read from.
+
+ // Move to the next file name (if any):
+ ++curInputFileName;
+
+ // Start playing once again:
+ play();
+}
+
+void play() {
+ if (*curInputFileName == NULL) {
+ // We have reached the end of the file name list.
+ // Start again, unless we didn't succeed in reading any files:
+ if (!haveReadOneFile) exit(1);
+ haveReadOneFile = False;
+ curInputFileName = inputFileNames;
+ }
+
+ // Open the current input file as a 'byte-stream file source':
+ ByteStreamFileSource* fileSource
+ = ByteStreamFileSource::createNew(*env, *curInputFileName);
+ if (fileSource == NULL) {
+ *env << "Unable to open file \"" << *curInputFileName
+ << "\" as a byte-stream file source\n";
+ // Try the next file instead:
+ ++curInputFileName;
+ play();
+ return;
+ }
+ haveReadOneFile = True;
+
+ // We must demultiplex Audio and Video Elementary Streams
+ // from the input source:
+ mpegDemux = MPEG1or2Demux::createNew(*env, fileSource);
+ if (mediaToStream&VOB_AUDIO) {
+ FramedSource* audioES = mpegDemux->newElementaryStream(0xBD);
+ // Because, in a VOB file, the AC3 audio has stream id 0xBD
+ audioSource
+ = AC3AudioStreamFramer::createNew(*env, audioES, 0x80);
+ }
+ if (mediaToStream&VOB_VIDEO) {
+ FramedSource* videoES = mpegDemux->newVideoStream();
+
+ videoSource
+ = MPEG1or2VideoStreamFramer::createNew(*env, videoES, iFramesOnly);
+ }
+
+ // Finally, start playing each sink.
+ *env << "Beginning to read from \"" << *curInputFileName << "\"...\n";
+ if (videoSink != NULL) {
+ videoSink->startPlaying(*videoSource, afterPlaying, videoSink);
+ }
+ if (audioSink != NULL) {
+ audioSink->setRTPTimestampFrequency(audioSource->samplingRate());
+ audioSink->startPlaying(*audioSource, afterPlaying, audioSink);
+ }
+}
diff --git a/win32config b/win32config
new file mode 100644
index 0000000..e74cb22
--- /dev/null
+++ b/win32config
@@ -0,0 +1,47 @@
+# Comment out the following line to produce Makefiles that generate debuggable code:
+NODEBUG=1
+
+# The following definition ensures that we are properly matching
+# the WinSock2 library file with the correct header files.
+# (will link with "ws2_32.lib" and include "winsock2.h" & "Ws2tcpip.h")
+TARGETOS = WINNT
+
+# If for some reason you wish to use WinSock1 instead, uncomment the
+# following two definitions.
+# (will link with "wsock32.lib" and include "winsock.h")
+#TARGETOS = WIN95
+#APPVER = 4.0
+
+!include <ntwin32.mak>
+
+UI_OPTS = $(guilflags) $(guilibsdll)
+# Use the following to get a console (e.g., for debugging):
+CONSOLE_UI_OPTS = $(conlflags) $(conlibsdll)
+CPU=i386
+
+TOOLS32 = c:\Program Files\DevStudio\Vc
+COMPILE_OPTS = $(INCLUDES) $(cdebug) $(cflags) $(cvarsdll) -I. -I"$(TOOLS32)\include" -DNO_OPENSSL=1
+C = c
+C_COMPILER = "$(TOOLS32)\bin\cl"
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = $(C_COMPILER)
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS)
+OBJ = obj
+LINK = $(link) -out:
+LIBRARY_LINK = lib -out:
+LINK_OPTS_0 = $(linkdebug) msvcirt.lib
+LIBRARY_LINK_OPTS =
+LINK_OPTS = $(LINK_OPTS_0) $(UI_OPTS)
+CONSOLE_LINK_OPTS = $(LINK_OPTS_0) $(CONSOLE_UI_OPTS)
+SERVICE_LINK_OPTS = kernel32.lib advapi32.lib shell32.lib -subsystem:console,$(APPVER)
+LIB_SUFFIX = lib
+LIBS_FOR_CONSOLE_APPLICATION =
+LIBS_FOR_GUI_APPLICATION =
+MULTIMEDIA_LIBS = winmm.lib
+EXE = .exe
+PLATFORM = Windows
+
+rc32 = "$(TOOLS32)\bin\rc"
+.rc.res:
+ $(rc32) $<
diff --git a/win32config.Borland b/win32config.Borland
new file mode 100644
index 0000000..edc1219
--- /dev/null
+++ b/win32config.Borland
@@ -0,0 +1,46 @@
+# Comment out the following line to produce Makefiles that generate debuggable code:
+NODEBUG=1
+
+# The following definition ensures that we are properly matching
+# the WinSock2 library file with the correct header files.
+# (will link with "ws2_32.lib" and include "winsock2.h" & "Ws2tcpip.h")
+TARGETOS = WINNT
+
+# If for some reason you wish to use WinSock1 instead, uncomment the
+# following two definitions.
+# (will link with "wsock32.lib" and include "winsock.h")
+#TARGETOS = WIN95
+#APPVER = 4.0
+
+#!include <ntwin32.mak>
+
+UI_OPTS = $(guilflags) $(guilibsdll)
+# Use the following to get a console (e.g., for debugging):
+CONSOLE_UI_OPTS = $(conlflags) $(conlibsdll)
+CPU=i386
+
+TOOLS32 = C:\Progra~1\Borland\CBuilder5
+COMPILE_OPTS = $(INCLUDES) $(cdebug) $(cflags) $(cvarsdll) -I. -I$(TOOLS32)\include -DNO_OPENSSL=1
+C = c
+C_COMPILER = $(TOOLS32)\bin\bcc32
+C_FLAGS = $(COMPILE_OPTS)
+CPP = cpp
+CPLUSPLUS_COMPILER = $(C_COMPILER)
+CPLUSPLUS_FLAGS = $(COMPILE_OPTS)
+OBJ = obj
+LINK = $(TOOLS32)\bin\ilink32
+LIBRARY_LINK = $(TOOLS32)\bin\tlib
+LINK_OPTS_0 = $(linkdebug) msvcirt.lib
+LIBRARY_LINK_OPTS = /u
+LINK_OPTS = $(LINK_OPTS_0) $(UI_OPTS)
+CONSOLE_LINK_OPTS = c0x32
+
+SERVICE_LINK_OPTS = kernel32.lib advapi32.lib shell32.lib -subsystem:console,$(APPVER)
+LIB_SUFFIX = lib
+LIBS_FOR_CONSOLE_APPLICATION = cw32.lib import32.lib
+LIBS_FOR_GUI_APPLICATION = ,,cw32
+EXE =
+
+rc32 = $(TOOLS32)\bin\brc32"
+.rc.res:
+ $(rc32) $<