Update upstream source from tag 'upstream/2020.07.09'
Update to upstream version '2020.07.09'
with Debian dir 6398df5cc037751af1034011949ad4d31a8a9e81
Sebastian Ramacher
3 years ago
3 | 3 | #ifndef _BASICUSAGEENVIRONMENT_VERSION_HH |
4 | 4 | #define _BASICUSAGEENVIRONMENT_VERSION_HH |
5 | 5 | |
6 | #define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_STRING "2020.04.12" | |
7 | #define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_INT 1586649600 | |
6 | #define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_STRING "2020.07.09" | |
7 | #define BASICUSAGEENVIRONMENT_LIBRARY_VERSION_INT 1594252800 | |
8 | 8 | |
9 | 9 | #endif |
3 | 3 | #ifndef _USAGEENVIRONMENT_VERSION_HH |
4 | 4 | #define _USAGEENVIRONMENT_VERSION_HH |
5 | 5 | |
6 | #define USAGEENVIRONMENT_LIBRARY_VERSION_STRING "2020.04.12" | |
7 | #define USAGEENVIRONMENT_LIBRARY_VERSION_INT 1586649600 | |
6 | #define USAGEENVIRONMENT_LIBRARY_VERSION_STRING "2020.07.09" | |
7 | #define USAGEENVIRONMENT_LIBRARY_VERSION_INT 1594252800 | |
8 | 8 | |
9 | 9 | #endif |
2 | 2 | # At least one interface changes, or is removed => CURRENT += 1; REVISION = 0; AGE = 0 |
3 | 3 | # One or more interfaces were added, but no existing interfaces were changed or removed => CURRENT += 1; REVISION = 0; AGE += 1 |
4 | 4 | |
5 | libliveMedia_VERSION_CURRENT=79 | |
6 | libliveMedia_VERSION_REVISION=0 | |
7 | libliveMedia_VERSION_AGE=0 | |
5 | libliveMedia_VERSION_CURRENT=80 | |
6 | libliveMedia_VERSION_REVISION=3 | |
7 | libliveMedia_VERSION_AGE=1 | |
8 | 8 | libliveMedia_LIB_SUFFIX=so.$(shell expr $(libliveMedia_VERSION_CURRENT) - $(libliveMedia_VERSION_AGE)).$(libliveMedia_VERSION_AGE).$(libliveMedia_VERSION_REVISION) |
9 | 9 | |
10 | 10 | libBasicUsageEnvironment_VERSION_CURRENT=1 |
13 | 13 | LIB_SUFFIX = a |
14 | 14 | LIBS_FOR_CONSOLE_APPLICATION = -lws2_32 -lssl -lcrypto |
15 | 15 | LIBS_FOR_GUI_APPLICATION = -lws2_32 |
16 | EXE = | |
16 | EXE = .exe |
3 | 3 | #ifndef _GROUPSOCK_VERSION_HH |
4 | 4 | #define _GROUPSOCK_VERSION_HH |
5 | 5 | |
6 | #define GROUPSOCK_LIBRARY_VERSION_STRING "2020.04.12" | |
7 | #define GROUPSOCK_LIBRARY_VERSION_INT 1586649600 | |
6 | #define GROUPSOCK_LIBRARY_VERSION_STRING "2020.07.09" | |
7 | #define GROUPSOCK_LIBRARY_VERSION_INT 1594252800 | |
8 | 8 | |
9 | 9 | #endif |
0 | PREFIX = /usr/local | |
0 | 1 | INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include -I../BasicUsageEnvironment/include |
1 | 2 | # Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later. |
2 | 3 | libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX) |
1 | 1 | |
2 | 2 | HLS_PROXY = live555HLSProxy$(EXE) |
3 | 3 | |
4 | PREFIX = /usr/local | |
5 | 4 | ALL = $(HLS_PROXY) |
6 | 5 | all: $(ALL) |
7 | 6 |
33 | 33 | portNumBits tunnelOverHTTPPortNum = 0; |
34 | 34 | char const* hlsPrefix; |
35 | 35 | MediaSession* session; |
36 | MPEG2TransportStreamFromESSource* transportStream; | |
37 | MediaSubsessionIterator* iter; | |
36 | 38 | MediaSubsession* subsession; |
39 | unsigned numUsableSubsessions = 0; | |
37 | 40 | double duration = 0.0; |
38 | 41 | Boolean createHandlerServerForREGISTERCommand = False; |
39 | 42 | portNumBits handlerServerForREGISTERCommandPortNum = 0; |
197 | 200 | return env << subsession.mediumName() << "/" << subsession.codecName(); |
198 | 201 | } |
199 | 202 | |
200 | void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString); // forward | |
203 | void setupNextSubsession(RTSPClient* rtspClient); // forward | |
201 | 204 | |
202 | 205 | void continueAfterDESCRIBE(RTSPClient* rtspClient, int resultCode, char* resultString) { |
203 | 206 | do { |
206 | 209 | delete[] resultString; |
207 | 210 | break; |
208 | 211 | } |
212 | ||
213 | // Create a Transport Stream to multiplex the Elementary Stream data from each subsession: | |
214 | transportStream = MPEG2TransportStreamFromESSource::createNew(*env); | |
209 | 215 | |
210 | 216 | // Create a media session object from the SDP description. |
211 | 217 | // Then iterate over it, to look for subsession(s) that we can handle: |
219 | 225 | break; |
220 | 226 | } |
221 | 227 | |
222 | MediaSubsessionIterator* iter = new MediaSubsessionIterator(*session); | |
223 | while ((subsession = iter->next()) != NULL) { | |
224 | if (strcmp(subsession->mediumName(), "video") == 0 && | |
225 | strcmp(subsession->codecName(), "H264") == 0) break; // use this subsession | |
226 | } | |
227 | delete iter; | |
228 | ||
229 | if (subsession == NULL) { | |
230 | *env << *rtspClient << "This stream has no usable subsessions\n"; | |
231 | break; | |
232 | } | |
233 | ||
234 | if (!subsession->initiate()) { | |
235 | *env << *rtspClient << "Failed to initiate the \"" << *subsession << "\" subsession: " << env->getResultMsg() << "\n"; | |
236 | break; | |
237 | } else { | |
238 | *env << *rtspClient << "Initiated the \"" << *subsession << "\" subsession\n"; | |
239 | } | |
240 | ||
241 | // Continue setting up this subsession, by sending a RTSP "SETUP" command: | |
242 | rtspClient->sendSetupCommand(*subsession, continueAfterSETUP, False, streamUsingTCP, | |
243 | False, ourAuthenticator); | |
228 | iter = new MediaSubsessionIterator(*session); | |
229 | setupNextSubsession(rtspClient); | |
244 | 230 | return; |
245 | 231 | } while (0); |
246 | 232 | |
248 | 234 | exit(1); |
249 | 235 | } |
250 | 236 | |
251 | void segmentationCallback(void* clientData, char const* segmentFileName, double segmentDuration); // forward | |
252 | void afterPlaying(void* clientData); // forward | |
253 | void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString); // forward | |
237 | void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString); // forward | |
238 | void startPlayingSession(RTSPClient* rtspClient); // forward | |
239 | ||
240 | void setupNextSubsession(RTSPClient* rtspClient) { | |
241 | subsession = iter->next(); | |
242 | if (subsession != NULL) { | |
243 | // Check whether this subsession is a codec that we support. | |
244 | // We support H.264 or H.265 video, and AAC audio. | |
245 | if ((strcmp(subsession->mediumName(), "video") == 0 && | |
246 | (strcmp(subsession->codecName(), "H264") == 0 || | |
247 | strcmp(subsession->codecName(), "H265") == 0)) || | |
248 | (strcmp(subsession->mediumName(), "audio") == 0 && | |
249 | strcmp(subsession->codecName(), "MPEG4-GENERIC"/*aka. AAC*/) == 0)) { | |
250 | // Use this subsession. | |
251 | ++numUsableSubsessions; | |
252 | if (!subsession->initiate()) { | |
253 | *env << *rtspClient << "Failed to initiate the \"" << *subsession << "\" subsession: " << env->getResultMsg() << "\n"; | |
254 | } else { | |
255 | *env << *rtspClient << "Initiated the \"" << *subsession << "\" subsession\n"; | |
256 | ||
257 | // Continue setting up this subsession, by sending a RTSP "SETUP" command: | |
258 | rtspClient->sendSetupCommand(*subsession, continueAfterSETUP, False, streamUsingTCP, | |
259 | False, ourAuthenticator); | |
260 | return; | |
261 | } | |
262 | } | |
263 | setupNextSubsession(rtspClient); // give up on this subsession; go to the next one | |
264 | return; | |
265 | } | |
266 | ||
267 | // We've gone through all of the subsessions. | |
268 | if (numUsableSubsessions == 0) { | |
269 | *env << *rtspClient << "This stream has no usable subsessions\n"; | |
270 | exit(0); | |
271 | } | |
272 | ||
273 | startPlayingSession(rtspClient); | |
274 | } | |
254 | 275 | |
255 | 276 | void continueAfterSETUP(RTSPClient* rtspClient, int resultCode, char* resultString) { |
256 | 277 | do { |
262 | 283 | |
263 | 284 | *env << *rtspClient << "Set up the \"" << *subsession << "\" subsession\n"; |
264 | 285 | |
265 | // Having successfully setup the subsession, create a data sink for it, and call "startPlaying()" on it. | |
266 | // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later, | |
267 | // after we've sent a RTSP "PLAY" command.) | |
268 | ||
269 | subsession->sink | |
270 | = HLSSegmenter::createNew(*env, OUR_HLS_SEGMENTATION_DURATION, hlsPrefix, segmentationCallback); | |
271 | ||
272 | // Create a 'framer' filter for the input source, to put the stream of NAL units into a | |
273 | // form that's usable in output Transport Streams. | |
274 | // (Note that we use a *DiscreteFramer*, because the input source is a stream of discrete | |
275 | // NAL units - i.e., one at a time.) | |
276 | H264VideoStreamDiscreteFramer* framer | |
277 | = H264VideoStreamDiscreteFramer::createNew(*env, subsession->readSource(), | |
278 | True/*includeStartCodeInOutput*/, | |
279 | True/*insertAccessUnitDelimiters*/); | |
280 | ||
281 | // Then create a filter that packs the H.264 video data into a Transport Stream: | |
282 | MPEG2TransportStreamFromESSource* tsFrames = MPEG2TransportStreamFromESSource::createNew(*env); | |
283 | tsFrames->addNewVideoSource(framer, 5/*mpegVersion: H.264*/); | |
284 | ||
285 | // Start playing the sink object: | |
286 | *env << "Beginning to read...\n"; | |
287 | subsession->sink->startPlaying(*tsFrames, afterPlaying, NULL); | |
286 | // Feed this subsession's input source into the Transport Stream: | |
287 | if (strcmp(subsession->mediumName(), "video") == 0) { | |
288 | // Create a 'framer' filter for the input source, to put the stream of NAL units into a | |
289 | // form that's usable to output to the Transport Stream. | |
290 | // (Note that we use a *DiscreteFramer*, because the input source is a stream of discrete | |
291 | // NAL units - i.e., one at a time.) | |
292 | H264or5VideoStreamDiscreteFramer* framer; | |
293 | int mpegVersion; | |
294 | ||
295 | if (strcmp(subsession->codecName(), "H264") == 0) { | |
296 | mpegVersion = 5; // for H.264 | |
297 | framer = H264VideoStreamDiscreteFramer::createNew(*env, subsession->readSource(), | |
298 | True/*includeStartCodeInOutput*/, | |
299 | True/*insertAccessUnitDelimiters*/); | |
300 | ||
301 | // Add any known SPS and PPS NAL units to the framer, so they'll get output ASAP: | |
302 | u_int8_t* sps = NULL; unsigned spsSize = 0; | |
303 | u_int8_t* pps = NULL; unsigned ppsSize = 0; | |
304 | unsigned numSPropRecords; | |
305 | SPropRecord* sPropRecords | |
306 | = parseSPropParameterSets(subsession->fmtp_spropparametersets(), numSPropRecords); | |
307 | if (numSPropRecords > 0) { | |
308 | sps = sPropRecords[0].sPropBytes; | |
309 | spsSize = sPropRecords[0].sPropLength; | |
310 | } | |
311 | if (numSPropRecords > 1) { | |
312 | pps = sPropRecords[1].sPropBytes; | |
313 | ppsSize = sPropRecords[1].sPropLength; | |
314 | } | |
315 | framer->setVPSandSPSandPPS(NULL, 0, sps, spsSize, pps, ppsSize); | |
316 | delete[] sPropRecords; | |
317 | } else { // H.265 | |
318 | mpegVersion = 6; // for H.265 | |
319 | framer = H265VideoStreamDiscreteFramer::createNew(*env, subsession->readSource(), | |
320 | True/*includeStartCodeInOutput*/, | |
321 | True/*insertAccessUnitDelimiters*/); | |
322 | ||
323 | // Add any known VPS, SPS and PPS NAL units to the framer, so they'll get output ASAP: | |
324 | u_int8_t* vps = NULL; unsigned vpsSize = 0; | |
325 | u_int8_t* sps = NULL; unsigned spsSize = 0; | |
326 | u_int8_t* pps = NULL; unsigned ppsSize = 0; | |
327 | unsigned numSPropRecords; | |
328 | SPropRecord *sPropRecordsVPS, *sPropRecordsSPS, *sPropRecordsPPS; | |
329 | ||
330 | sPropRecordsVPS = parseSPropParameterSets(subsession->fmtp_spropvps(), numSPropRecords); | |
331 | if (numSPropRecords > 0) { | |
332 | vps = sPropRecordsVPS[0].sPropBytes; | |
333 | vpsSize = sPropRecordsVPS[0].sPropLength; | |
334 | } | |
335 | ||
336 | sPropRecordsSPS = parseSPropParameterSets(subsession->fmtp_spropsps(), numSPropRecords); | |
337 | if (numSPropRecords > 0) { | |
338 | sps = sPropRecordsSPS[0].sPropBytes; | |
339 | spsSize = sPropRecordsSPS[0].sPropLength; | |
340 | } | |
341 | ||
342 | sPropRecordsPPS = parseSPropParameterSets(subsession->fmtp_sproppps(), numSPropRecords); | |
343 | if (numSPropRecords > 0) { | |
344 | pps = sPropRecordsPPS[0].sPropBytes; | |
345 | ppsSize = sPropRecordsPPS[0].sPropLength; | |
346 | } | |
347 | ||
348 | framer->setVPSandSPSandPPS(vps, vpsSize, sps, spsSize, pps, ppsSize); | |
349 | delete[] sPropRecordsVPS; delete[] sPropRecordsSPS; delete[] sPropRecordsPPS; | |
350 | } | |
351 | ||
352 | transportStream->addNewVideoSource(framer, mpegVersion); | |
353 | } else { // audio (AAC) | |
354 | // Create a 'framer' filter for the input source, to add a ADTS header to each AAC frame, | |
355 | // to make the audio playable. | |
356 | ADTSAudioStreamDiscreteFramer* framer | |
357 | = ADTSAudioStreamDiscreteFramer::createNew(*env, subsession->readSource(), | |
358 | subsession->fmtp_config()); | |
359 | transportStream->addNewAudioSource(framer, 4/*mpegVersion: AAC*/); | |
360 | } | |
288 | 361 | |
289 | 362 | // Also set up BYE handler//#####@@@@@ |
290 | ||
291 | // Finally, send a RTSP "PLAY" command to tell the server to start streaming: | |
292 | if (session->absStartTime() != NULL) { | |
293 | // Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command: | |
294 | rtspClient->sendPlayCommand(*session, continueAfterPLAY, session->absStartTime(), session->absEndTime(), 1.0f, ourAuthenticator); | |
295 | } else { | |
296 | duration = session->playEndTime() - session->playStartTime(); | |
297 | rtspClient->sendPlayCommand(*session, continueAfterPLAY, 0.0f, -1.0f, 1.0f, ourAuthenticator); | |
298 | } | |
299 | ||
300 | return; | |
301 | 363 | } while (0); |
302 | 364 | |
303 | // An error occurred: | |
304 | exit(1); | |
365 | // Set up the next subsession, if any: | |
366 | setupNextSubsession(rtspClient); | |
367 | } | |
368 | ||
369 | void continueAfterPLAY(RTSPClient* rtspClient, int resultCode, char* resultString); // forward | |
370 | void segmentationCallback(void* clientData, char const* segmentFileName, double segmentDuration); // forward | |
371 | void afterPlaying(void* clientData); // forward | |
372 | ||
373 | void startPlayingSession(RTSPClient* rtspClient) { | |
374 | // Having successfully setup the session, create a data sink for it, and call "startPlaying()" on it. | |
375 | // (This will prepare the data sink to receive data; the actual flow of data from the client won't start happening until later, | |
376 | // after we've sent a RTSP "PLAY" command.) | |
377 | ||
378 | MediaSink* sink | |
379 | = HLSSegmenter::createNew(*env, OUR_HLS_SEGMENTATION_DURATION, hlsPrefix, segmentationCallback); | |
380 | ||
381 | // Start playing the sink object: | |
382 | *env << "Beginning to read...\n"; | |
383 | sink->startPlaying(*transportStream, afterPlaying, NULL); | |
384 | ||
385 | // Now, send a RTSP "PLAY" command to start the streaming: | |
386 | if (session->absStartTime() != NULL) { | |
387 | // Special case: The stream is indexed by 'absolute' time, so send an appropriate "PLAY" command: | |
388 | rtspClient->sendPlayCommand(*session, continueAfterPLAY, session->absStartTime(), session->absEndTime(), 1.0f, ourAuthenticator); | |
389 | } else { | |
390 | duration = session->playEndTime() - session->playStartTime(); | |
391 | rtspClient->sendPlayCommand(*session, continueAfterPLAY, 0.0f, -1.0f, 1.0f, ourAuthenticator); | |
392 | } | |
305 | 393 | } |
306 | 394 | |
307 | 395 | // A record that defines a segment that has been written. These records are kept in a list: |
0 | ********** | |
1 | This library is free software; you can redistribute it and/or modify it under | |
2 | the terms of the GNU Lesser General Public License as published by the | |
3 | Free Software Foundation; either version 3 of the License, or (at your | |
4 | option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.) | |
5 | ||
6 | This library is distributed in the hope that it will be useful, but WITHOUT | |
7 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
8 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for | |
9 | more details. | |
10 | ||
11 | You should have received a copy of the GNU Lesser General Public License | |
12 | along with this library; if not, write to the Free Software Foundation, Inc., | |
13 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
14 | **********/ | |
15 | // "liveMedia" | |
16 | // Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved. | |
17 | // A generic media server class, used to implement a RTSP server, and any other server that uses | |
18 | // "ServerMediaSession" objects to describe media to be served. | |
19 | // Implementation | |
20 | ||
21 | #include "GenericMediaServer.hh" | |
22 | #include <GroupsockHelper.hh> | |
23 | #if defined(__WIN32__) || defined(_WIN32) || defined(_QNX4) | |
24 | #define snprintf _snprintf | |
25 | #endif | |
26 | ||
27 | ////////// GenericMediaServer implementation ////////// | |
28 | ||
29 | void GenericMediaServer::addServerMediaSession(ServerMediaSession* serverMediaSession) { | |
30 | if (serverMediaSession == NULL) return; | |
31 | ||
32 | char const* sessionName = serverMediaSession->streamName(); | |
33 | if (sessionName == NULL) sessionName = ""; | |
34 | removeServerMediaSession(sessionName); // in case an existing "ServerMediaSession" with this name already exists | |
35 | ||
36 | fServerMediaSessions->Add(sessionName, (void*)serverMediaSession); | |
37 | } | |
38 | ||
39 | ServerMediaSession* GenericMediaServer | |
40 | ::lookupServerMediaSession(char const* streamName, Boolean /*isFirstLookupInSession*/) { | |
41 | // Default implementation: | |
42 | return (ServerMediaSession*)(fServerMediaSessions->Lookup(streamName)); | |
43 | } | |
44 | ||
45 | void GenericMediaServer::removeServerMediaSession(ServerMediaSession* serverMediaSession) { | |
46 | if (serverMediaSession == NULL) return; | |
47 | ||
48 | fServerMediaSessions->Remove(serverMediaSession->streamName()); | |
49 | if (serverMediaSession->referenceCount() == 0) { | |
50 | Medium::close(serverMediaSession); | |
51 | } else { | |
52 | serverMediaSession->deleteWhenUnreferenced() = True; | |
53 | } | |
54 | } | |
55 | ||
56 | void GenericMediaServer::removeServerMediaSession(char const* streamName) { | |
57 | removeServerMediaSession(GenericMediaServer::lookupServerMediaSession(streamName)); | |
58 | } | |
59 | ||
60 | void GenericMediaServer::closeAllClientSessionsForServerMediaSession(ServerMediaSession* serverMediaSession) { | |
61 | if (serverMediaSession == NULL) return; | |
62 | ||
63 | HashTable::Iterator* iter = HashTable::Iterator::create(*fClientSessions); | |
64 | GenericMediaServer::ClientSession* clientSession; | |
65 | char const* key; // dummy | |
66 | while ((clientSession = (GenericMediaServer::ClientSession*)(iter->next(key))) != NULL) { | |
67 | if (clientSession->fOurServerMediaSession == serverMediaSession) { | |
68 | delete clientSession; | |
69 | } | |
70 | } | |
71 | delete iter; | |
72 | } | |
73 | ||
74 | void GenericMediaServer::closeAllClientSessionsForServerMediaSession(char const* streamName) { | |
75 | closeAllClientSessionsForServerMediaSession(lookupServerMediaSession(streamName)); | |
76 | } | |
77 | ||
78 | void GenericMediaServer::deleteServerMediaSession(ServerMediaSession* serverMediaSession) { | |
79 | if (serverMediaSession == NULL) return; | |
80 | ||
81 | closeAllClientSessionsForServerMediaSession(serverMediaSession); | |
82 | removeServerMediaSession(serverMediaSession); | |
83 | } | |
84 | ||
85 | void GenericMediaServer::deleteServerMediaSession(char const* streamName) { | |
86 | deleteServerMediaSession(lookupServerMediaSession(streamName)); | |
87 | } | |
88 | ||
89 | GenericMediaServer | |
90 | ::GenericMediaServer(UsageEnvironment& env, int ourSocket, Port ourPort, | |
91 | unsigned reclamationSeconds) | |
92 | : Medium(env), | |
93 | fServerSocket(ourSocket), fServerPort(ourPort), fReclamationSeconds(reclamationSeconds), | |
94 | fServerMediaSessions(HashTable::create(STRING_HASH_KEYS)), | |
95 | fClientConnections(HashTable::create(ONE_WORD_HASH_KEYS)), | |
96 | fClientSessions(HashTable::create(STRING_HASH_KEYS)), | |
97 | fPreviousClientSessionId(0) | |
98 | { | |
99 | ignoreSigPipeOnSocket(fServerSocket); // so that clients on the same host that are killed don't also kill us | |
100 | ||
101 | // Arrange to handle connections from others: | |
102 | env.taskScheduler().turnOnBackgroundReadHandling(fServerSocket, incomingConnectionHandler, this); | |
103 | } | |
104 | ||
105 | GenericMediaServer::~GenericMediaServer() { | |
106 | // Turn off background read handling: | |
107 | envir().taskScheduler().turnOffBackgroundReadHandling(fServerSocket); | |
108 | ::closeSocket(fServerSocket); | |
109 | } | |
110 | ||
111 | void GenericMediaServer::cleanup() { | |
112 | // This member function must be called in the destructor of any subclass of | |
113 | // "GenericMediaServer". (We don't call this in the destructor of "GenericMediaServer" itself, | |
114 | // because by that time, the subclass destructor will already have been called, and this may | |
115 | // affect (break) the destruction of the "ClientSession" and "ClientConnection" objects, which | |
116 | // themselves will have been subclassed.) | |
117 | ||
118 | // Close all client session objects: | |
119 | GenericMediaServer::ClientSession* clientSession; | |
120 | while ((clientSession = (GenericMediaServer::ClientSession*)fClientSessions->getFirst()) != NULL) { | |
121 | delete clientSession; | |
122 | } | |
123 | delete fClientSessions; | |
124 | ||
125 | // Close all client connection objects: | |
126 | GenericMediaServer::ClientConnection* connection; | |
127 | while ((connection = (GenericMediaServer::ClientConnection*)fClientConnections->getFirst()) != NULL) { | |
128 | delete connection; | |
129 | } | |
130 | delete fClientConnections; | |
131 | ||
132 | // Delete all server media sessions | |
133 | ServerMediaSession* serverMediaSession; | |
134 | while ((serverMediaSession = (ServerMediaSession*)fServerMediaSessions->getFirst()) != NULL) { | |
135 | removeServerMediaSession(serverMediaSession); // will delete it, because it no longer has any 'client session' objects using it | |
136 | } | |
137 | delete fServerMediaSessions; | |
138 | } | |
139 | ||
140 | #define LISTEN_BACKLOG_SIZE 20 | |
141 | ||
142 | int GenericMediaServer::setUpOurSocket(UsageEnvironment& env, Port& ourPort) { | |
143 | int ourSocket = -1; | |
144 | ||
145 | do { | |
146 | // The following statement is enabled by default. | |
147 | // Don't disable it (by defining ALLOW_SERVER_PORT_REUSE) unless you know what you're doing. | |
148 | #if !defined(ALLOW_SERVER_PORT_REUSE) && !defined(ALLOW_RTSP_SERVER_PORT_REUSE) | |
149 | // ALLOW_RTSP_SERVER_PORT_REUSE is for backwards-compatibility ##### | |
150 | NoReuse dummy(env); // Don't use this socket if there's already a local server using it | |
151 | #endif | |
152 | ||
153 | ourSocket = setupStreamSocket(env, ourPort, True, True); | |
154 | if (ourSocket < 0) break; | |
155 | ||
156 | // Make sure we have a big send buffer: | |
157 | if (!increaseSendBufferTo(env, ourSocket, 50*1024)) break; | |
158 | ||
159 | // Allow multiple simultaneous connections: | |
160 | if (listen(ourSocket, LISTEN_BACKLOG_SIZE) < 0) { | |
161 | env.setResultErrMsg("listen() failed: "); | |
162 | break; | |
163 | } | |
164 | ||
165 | if (ourPort.num() == 0) { | |
166 | // bind() will have chosen a port for us; return it also: | |
167 | if (!getSourcePort(env, ourSocket, ourPort)) break; | |
168 | } | |
169 | ||
170 | return ourSocket; | |
171 | } while (0); | |
172 | ||
173 | if (ourSocket != -1) ::closeSocket(ourSocket); | |
174 | return -1; | |
175 | } | |
176 | ||
177 | void GenericMediaServer::incomingConnectionHandler(void* instance, int /*mask*/) { | |
178 | GenericMediaServer* server = (GenericMediaServer*)instance; | |
179 | server->incomingConnectionHandler(); | |
180 | } | |
181 | void GenericMediaServer::incomingConnectionHandler() { | |
182 | incomingConnectionHandlerOnSocket(fServerSocket); | |
183 | } | |
184 | ||
185 | void GenericMediaServer::incomingConnectionHandlerOnSocket(int serverSocket) { | |
186 | struct sockaddr_in clientAddr; | |
187 | SOCKLEN_T clientAddrLen = sizeof clientAddr; | |
188 | int clientSocket = accept(serverSocket, (struct sockaddr*)&clientAddr, &clientAddrLen); | |
189 | if (clientSocket < 0) { | |
190 | int err = envir().getErrno(); | |
191 | if (err != EWOULDBLOCK) { | |
192 | envir().setResultErrMsg("accept() failed: "); | |
193 | } | |
194 | return; | |
195 | } | |
196 | ignoreSigPipeOnSocket(clientSocket); // so that clients on the same host that are killed don't also kill us | |
197 | makeSocketNonBlocking(clientSocket); | |
198 | increaseSendBufferTo(envir(), clientSocket, 50*1024); | |
199 | ||
200 | #ifdef DEBUG | |
201 | envir() << "accept()ed connection from " << AddressString(clientAddr).val() << "\n"; | |
202 | #endif | |
203 | ||
204 | // Create a new object for handling this connection: | |
205 | (void)createNewClientConnection(clientSocket, clientAddr); | |
206 | } | |
207 | ||
208 | ||
209 | ////////// GenericMediaServer::ClientConnection implementation ////////// | |
210 | ||
211 | GenericMediaServer::ClientConnection | |
212 | ::ClientConnection(GenericMediaServer& ourServer, int clientSocket, struct sockaddr_in clientAddr) | |
213 | : fOurServer(ourServer), fOurSocket(clientSocket), fClientAddr(clientAddr) { | |
214 | // Add ourself to our 'client connections' table: | |
215 | fOurServer.fClientConnections->Add((char const*)this, this); | |
216 | ||
217 | // Arrange to handle incoming requests: | |
218 | resetRequestBuffer(); | |
219 | envir().taskScheduler() | |
220 | .setBackgroundHandling(fOurSocket, SOCKET_READABLE|SOCKET_EXCEPTION, incomingRequestHandler, this); | |
221 | } | |
222 | ||
223 | GenericMediaServer::ClientConnection::~ClientConnection() { | |
224 | // Remove ourself from the server's 'client connections' hash table before we go: | |
225 | fOurServer.fClientConnections->Remove((char const*)this); | |
226 | ||
227 | closeSockets(); | |
228 | } | |
229 | ||
230 | void GenericMediaServer::ClientConnection::closeSockets() { | |
231 | // Turn off background handling on our socket: | |
232 | envir().taskScheduler().disableBackgroundHandling(fOurSocket); | |
233 | if (fOurSocket>= 0) ::closeSocket(fOurSocket); | |
234 | ||
235 | fOurSocket = -1; | |
236 | } | |
237 | ||
238 | void GenericMediaServer::ClientConnection::incomingRequestHandler(void* instance, int /*mask*/) { | |
239 | ClientConnection* connection = (ClientConnection*)instance; | |
240 | connection->incomingRequestHandler(); | |
241 | } | |
242 | ||
243 | void GenericMediaServer::ClientConnection::incomingRequestHandler() { | |
244 | struct sockaddr_in dummy; // 'from' address, meaningless in this case | |
245 | ||
246 | int bytesRead = readSocket(envir(), fOurSocket, &fRequestBuffer[fRequestBytesAlreadySeen], fRequestBufferBytesLeft, dummy); | |
247 | handleRequestBytes(bytesRead); | |
248 | } | |
249 | ||
250 | void GenericMediaServer::ClientConnection::resetRequestBuffer() { | |
251 | fRequestBytesAlreadySeen = 0; | |
252 | fRequestBufferBytesLeft = sizeof fRequestBuffer; | |
253 | } | |
254 | ||
255 | ||
256 | ////////// GenericMediaServer::ClientSession implementation ////////// | |
257 | ||
258 | GenericMediaServer::ClientSession | |
259 | ::ClientSession(GenericMediaServer& ourServer, u_int32_t sessionId) | |
260 | : fOurServer(ourServer), fOurSessionId(sessionId), fOurServerMediaSession(NULL), | |
261 | fLivenessCheckTask(NULL) { | |
262 | noteLiveness(); | |
263 | } | |
264 | ||
265 | GenericMediaServer::ClientSession::~ClientSession() { | |
266 | // Turn off any liveness checking: | |
267 | envir().taskScheduler().unscheduleDelayedTask(fLivenessCheckTask); | |
268 | ||
269 | // Remove ourself from the server's 'client sessions' hash table before we go: | |
270 | char sessionIdStr[8+1]; | |
271 | sprintf(sessionIdStr, "%08X", fOurSessionId); | |
272 | fOurServer.fClientSessions->Remove(sessionIdStr); | |
273 | ||
274 | if (fOurServerMediaSession != NULL) { | |
275 | fOurServerMediaSession->decrementReferenceCount(); | |
276 | if (fOurServerMediaSession->referenceCount() == 0 | |
277 | && fOurServerMediaSession->deleteWhenUnreferenced()) { | |
278 | fOurServer.removeServerMediaSession(fOurServerMediaSession); | |
279 | fOurServerMediaSession = NULL; | |
280 | } | |
281 | } | |
282 | } | |
283 | ||
284 | void GenericMediaServer::ClientSession::noteLiveness() { | |
285 | #ifdef DEBUG | |
286 | char const* streamName | |
287 | = (fOurServerMediaSession == NULL) ? "???" : fOurServerMediaSession->streamName(); | |
288 | fprintf(stderr, "Client session (id \"%08X\", stream name \"%s\"): Liveness indication\n", | |
289 | fOurSessionId, streamName); | |
290 | #endif | |
291 | if (fOurServerMediaSession != NULL) fOurServerMediaSession->noteLiveness(); | |
292 | ||
293 | if (fOurServer.fReclamationSeconds > 0) { | |
294 | envir().taskScheduler().rescheduleDelayedTask(fLivenessCheckTask, | |
295 | fOurServer.fReclamationSeconds*1000000, | |
296 | (TaskFunc*)livenessTimeoutTask, this); | |
297 | } | |
298 | } | |
299 | ||
300 | void GenericMediaServer::ClientSession::noteClientLiveness(ClientSession* clientSession) { | |
301 | clientSession->noteLiveness(); | |
302 | } | |
303 | ||
304 | void GenericMediaServer::ClientSession::livenessTimeoutTask(ClientSession* clientSession) { | |
305 | // If this gets called, the client session is assumed to have timed out, so delete it: | |
306 | #ifdef DEBUG | |
307 | char const* streamName | |
308 | = (clientSession->fOurServerMediaSession == NULL) ? "???" : clientSession->fOurServerMediaSession->streamName(); | |
309 | fprintf(stderr, "Client session (id \"%08X\", stream name \"%s\") has timed out (due to inactivity)\n", | |
310 | clientSession->fOurSessionId, streamName); | |
311 | #endif | |
312 | clientSession->fLivenessCheckTask = NULL; | |
313 | delete clientSession; | |
314 | } | |
315 | ||
316 | GenericMediaServer::ClientSession* GenericMediaServer::createNewClientSessionWithId() { | |
317 | u_int32_t sessionId; | |
318 | char sessionIdStr[8+1]; | |
319 | ||
320 | // Choose a random (unused) 32-bit integer for the session id | |
321 | // (it will be encoded as a 8-digit hex number). (We avoid choosing session id 0, | |
322 | // because that has a special use by some servers. Similarly, we avoid choosing the same | |
323 | // session id twice in a row.) | |
324 | do { | |
325 | sessionId = (u_int32_t)our_random32(); | |
326 | snprintf(sessionIdStr, sizeof sessionIdStr, "%08X", sessionId); | |
327 | } while (sessionId == 0 || sessionId == fPreviousClientSessionId | |
328 | || lookupClientSession(sessionIdStr) != NULL); | |
329 | fPreviousClientSessionId = sessionId; | |
330 | ||
331 | ClientSession* clientSession = createNewClientSession(sessionId); | |
332 | if (clientSession != NULL) fClientSessions->Add(sessionIdStr, clientSession); | |
333 | ||
334 | return clientSession; | |
335 | } | |
336 | ||
337 | GenericMediaServer::ClientSession* | |
338 | GenericMediaServer::lookupClientSession(u_int32_t sessionId) { | |
339 | char sessionIdStr[8+1]; | |
340 | snprintf(sessionIdStr, sizeof sessionIdStr, "%08X", sessionId); | |
341 | return lookupClientSession(sessionIdStr); | |
342 | } | |
343 | ||
344 | GenericMediaServer::ClientSession* | |
345 | GenericMediaServer::lookupClientSession(char const* sessionIdStr) { | |
346 | return (GenericMediaServer::ClientSession*)fClientSessions->Lookup(sessionIdStr); | |
347 | } | |
348 | ||
349 | ||
350 | ////////// ServerMediaSessionIterator implementation ////////// | |
351 | ||
352 | GenericMediaServer::ServerMediaSessionIterator | |
353 | ::ServerMediaSessionIterator(GenericMediaServer& server) | |
354 | : fOurIterator((server.fServerMediaSessions == NULL) | |
355 | ? NULL : HashTable::Iterator::create(*server.fServerMediaSessions)) { | |
356 | } | |
357 | ||
358 | GenericMediaServer::ServerMediaSessionIterator::~ServerMediaSessionIterator() { | |
359 | delete fOurIterator; | |
360 | } | |
361 | ||
362 | ServerMediaSession* GenericMediaServer::ServerMediaSessionIterator::next() { | |
363 | if (fOurIterator == NULL) return NULL; | |
364 | ||
365 | char const* key; // dummy | |
366 | return (ServerMediaSession*)(fOurIterator->next(key)); | |
367 | } | |
368 | ||
369 | ||
370 | ////////// UserAuthenticationDatabase implementation ////////// | |
371 | ||
372 | UserAuthenticationDatabase::UserAuthenticationDatabase(char const* realm, | |
373 | Boolean passwordsAreMD5) | |
374 | : fTable(HashTable::create(STRING_HASH_KEYS)), | |
375 | fRealm(strDup(realm == NULL ? "LIVE555 Streaming Media" : realm)), | |
376 | fPasswordsAreMD5(passwordsAreMD5) { | |
377 | } | |
378 | ||
379 | UserAuthenticationDatabase::~UserAuthenticationDatabase() { | |
380 | delete[] fRealm; | |
381 | ||
382 | // Delete the allocated 'password' strings that we stored in the table, and then the table itself: | |
383 | char* password; | |
384 | while ((password = (char*)fTable->RemoveNext()) != NULL) { | |
385 | delete[] password; | |
386 | } | |
387 | delete fTable; | |
388 | } | |
389 | ||
390 | void UserAuthenticationDatabase::addUserRecord(char const* username, | |
391 | char const* password) { | |
392 | char* oldPassword = (char*)fTable->Add(username, (void*)(strDup(password))); | |
393 | delete[] oldPassword; // if any | |
394 | } | |
395 | ||
396 | void UserAuthenticationDatabase::removeUserRecord(char const* username) { | |
397 | char* password = (char*)(fTable->Lookup(username)); | |
398 | fTable->Remove(username); | |
399 | delete[] password; | |
400 | } | |
401 | ||
402 | char const* UserAuthenticationDatabase::lookupPassword(char const* username) { | |
403 | return (char const*)(fTable->Lookup(username)); | |
404 | } |
0 | /********** | |
1 | This library is free software; you can redistribute it and/or modify it under | |
2 | the terms of the GNU Lesser General Public License as published by the | |
3 | Free Software Foundation; either version 3 of the License, or (at your | |
4 | option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.) | |
5 | ||
6 | This library is distributed in the hope that it will be useful, but WITHOUT | |
7 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
8 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for | |
9 | more details. | |
10 | ||
11 | You should have received a copy of the GNU Lesser General Public License | |
12 | along with this library; if not, write to the Free Software Foundation, Inc., | |
13 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
14 | **********/ | |
15 | // "liveMedia" | |
16 | // Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved. | |
17 | // An abstraction of a network interface used for RTP (or RTCP). | |
18 | // (This allows the RTP-over-TCP hack (RFC 2326, section 10.12) to | |
19 | // be implemented transparently.) | |
20 | // Implementation | |
21 | ||
22 | #include "RTPInterface.hh" | |
23 | #include <GroupsockHelper.hh> | |
24 | #include <stdio.h> | |
25 | ||
26 | ////////// Helper Functions - Definition ////////// | |
27 | ||
28 | // Helper routines and data structures, used to implement | |
29 | // sending/receiving RTP/RTCP over a TCP socket: | |
30 | ||
31 | class tcpStreamRecord { | |
32 | public: | |
33 | tcpStreamRecord(int streamSocketNum, unsigned char streamChannelId, | |
34 | tcpStreamRecord* next); | |
35 | virtual ~tcpStreamRecord(); | |
36 | ||
37 | public: | |
38 | tcpStreamRecord* fNext; | |
39 | int fStreamSocketNum; | |
40 | unsigned char fStreamChannelId; | |
41 | }; | |
42 | ||
43 | // Reading RTP-over-TCP is implemented using two levels of hash tables. | |
44 | // The top-level hash table maps TCP socket numbers to a | |
45 | // "SocketDescriptor" that contains a hash table for each of the | |
46 | // sub-channels that are reading from this socket. | |
47 | ||
48 | static HashTable* socketHashTable(UsageEnvironment& env, Boolean createIfNotPresent = True) { | |
49 | _Tables* ourTables = _Tables::getOurTables(env, createIfNotPresent); | |
50 | if (ourTables == NULL) return NULL; | |
51 | ||
52 | if (ourTables->socketTable == NULL) { | |
53 | // Create a new socket number -> SocketDescriptor mapping table: | |
54 | ourTables->socketTable = HashTable::create(ONE_WORD_HASH_KEYS); | |
55 | } | |
56 | return (HashTable*)(ourTables->socketTable); | |
57 | } | |
58 | ||
59 | class SocketDescriptor { | |
60 | public: | |
61 | SocketDescriptor(UsageEnvironment& env, int socketNum); | |
62 | virtual ~SocketDescriptor(); | |
63 | ||
64 | void registerRTPInterface(unsigned char streamChannelId, | |
65 | RTPInterface* rtpInterface); | |
66 | RTPInterface* lookupRTPInterface(unsigned char streamChannelId); | |
67 | void deregisterRTPInterface(unsigned char streamChannelId); | |
68 | ||
69 | void setServerRequestAlternativeByteHandler(ServerRequestAlternativeByteHandler* handler, void* clientData) { | |
70 | fServerRequestAlternativeByteHandler = handler; | |
71 | fServerRequestAlternativeByteHandlerClientData = clientData; | |
72 | } | |
73 | ||
74 | private: | |
75 | static void tcpReadHandler(SocketDescriptor*, int mask); | |
76 | Boolean tcpReadHandler1(int mask); | |
77 | ||
78 | private: | |
79 | UsageEnvironment& fEnv; | |
80 | int fOurSocketNum; | |
81 | HashTable* fSubChannelHashTable; | |
82 | ServerRequestAlternativeByteHandler* fServerRequestAlternativeByteHandler; | |
83 | void* fServerRequestAlternativeByteHandlerClientData; | |
84 | u_int8_t fStreamChannelId, fSizeByte1; | |
85 | Boolean fReadErrorOccurred, fDeleteMyselfNext, fAreInReadHandlerLoop; | |
86 | enum { AWAITING_DOLLAR, AWAITING_STREAM_CHANNEL_ID, AWAITING_SIZE1, AWAITING_SIZE2, AWAITING_PACKET_DATA } fTCPReadingState; | |
87 | }; | |
88 | ||
89 | static SocketDescriptor* lookupSocketDescriptor(UsageEnvironment& env, int sockNum, Boolean createIfNotFound = True) { | |
90 | HashTable* table = socketHashTable(env, createIfNotFound); | |
91 | if (table == NULL) return NULL; | |
92 | ||
93 | char const* key = (char const*)(long)sockNum; | |
94 | SocketDescriptor* socketDescriptor = (SocketDescriptor*)(table->Lookup(key)); | |
95 | if (socketDescriptor == NULL) { | |
96 | if (createIfNotFound) { | |
97 | socketDescriptor = new SocketDescriptor(env, sockNum); | |
98 | table->Add((char const*)(long)(sockNum), socketDescriptor); | |
99 | } else if (table->IsEmpty()) { | |
100 | // We can also delete the table (to reclaim space): | |
101 | _Tables* ourTables = _Tables::getOurTables(env); | |
102 | delete table; | |
103 | ourTables->socketTable = NULL; | |
104 | ourTables->reclaimIfPossible(); | |
105 | } | |
106 | } | |
107 | ||
108 | return socketDescriptor; | |
109 | } | |
110 | ||
111 | static void removeSocketDescription(UsageEnvironment& env, int sockNum) { | |
112 | char const* key = (char const*)(long)sockNum; | |
113 | HashTable* table = socketHashTable(env); | |
114 | table->Remove(key); | |
115 | ||
116 | if (table->IsEmpty()) { | |
117 | // We can also delete the table (to reclaim space): | |
118 | _Tables* ourTables = _Tables::getOurTables(env); | |
119 | delete table; | |
120 | ourTables->socketTable = NULL; | |
121 | ourTables->reclaimIfPossible(); | |
122 | } | |
123 | } | |
124 | ||
125 | ||
126 | ////////// RTPInterface - Implementation ////////// | |
127 | ||
128 | RTPInterface::RTPInterface(Medium* owner, Groupsock* gs) | |
129 | : fOwner(owner), fGS(gs), | |
130 | fTCPStreams(NULL), | |
131 | fNextTCPReadSize(0), fNextTCPReadStreamSocketNum(-1), | |
132 | fNextTCPReadStreamChannelId(0xFF), fReadHandlerProc(NULL), | |
133 | fAuxReadHandlerFunc(NULL), fAuxReadHandlerClientData(NULL) { | |
134 | // Make the socket non-blocking, even though it will be read from only asynchronously, when packets arrive. | |
135 | // The reason for this is that, in some OSs, reads on a blocking socket can (allegedly) sometimes block, | |
136 | // even if the socket was previously reported (e.g., by "select()") as having data available. | |
137 | // (This can supposedly happen if the UDP checksum fails, for example.) | |
138 | makeSocketNonBlocking(fGS->socketNum()); | |
139 | increaseSendBufferTo(envir(), fGS->socketNum(), 50*1024); | |
140 | } | |
141 | ||
142 | RTPInterface::~RTPInterface() { | |
143 | stopNetworkReading(); | |
144 | delete fTCPStreams; | |
145 | } | |
146 | ||
147 | void RTPInterface::setStreamSocket(int sockNum, | |
148 | unsigned char streamChannelId) { | |
149 | fGS->removeAllDestinations(); | |
150 | envir().taskScheduler().disableBackgroundHandling(fGS->socketNum()); // turn off any reading on our datagram socket | |
151 | fGS->reset(); // and close our datagram socket, because we won't be using it anymore | |
152 | ||
153 | addStreamSocket(sockNum, streamChannelId); | |
154 | } | |
155 | ||
156 | void RTPInterface::addStreamSocket(int sockNum, | |
157 | unsigned char streamChannelId) { | |
158 | if (sockNum < 0) return; | |
159 | ||
160 | for (tcpStreamRecord* streams = fTCPStreams; streams != NULL; | |
161 | streams = streams->fNext) { | |
162 | if (streams->fStreamSocketNum == sockNum | |
163 | && streams->fStreamChannelId == streamChannelId) { | |
164 | return; // we already have it | |
165 | } | |
166 | } | |
167 | ||
168 | fTCPStreams = new tcpStreamRecord(sockNum, streamChannelId, fTCPStreams); | |
169 | ||
170 | // Also, make sure this new socket is set up for receiving RTP/RTCP-over-TCP: | |
171 | SocketDescriptor* socketDescriptor = lookupSocketDescriptor(envir(), sockNum); | |
172 | socketDescriptor->registerRTPInterface(streamChannelId, this); | |
173 | } | |
174 | ||
175 | static void deregisterSocket(UsageEnvironment& env, int sockNum, unsigned char streamChannelId) { | |
176 | SocketDescriptor* socketDescriptor = lookupSocketDescriptor(env, sockNum, False); | |
177 | if (socketDescriptor != NULL) { | |
178 | socketDescriptor->deregisterRTPInterface(streamChannelId); | |
179 | // Note: This may delete "socketDescriptor", | |
180 | // if no more interfaces are using this socket | |
181 | } | |
182 | } | |
183 | ||
184 | void RTPInterface::removeStreamSocket(int sockNum, | |
185 | unsigned char streamChannelId) { | |
186 | // Remove - from our list of 'TCP streams' - the record of the (sockNum,streamChannelId) pair. | |
187 | // (However "streamChannelId" == 0xFF is a special case, meaning remove all | |
188 | // (sockNum,*) pairs.) | |
189 | ||
190 | while (1) { | |
191 | tcpStreamRecord** streamsPtr = &fTCPStreams; | |
192 | ||
193 | while (*streamsPtr != NULL) { | |
194 | if ((*streamsPtr)->fStreamSocketNum == sockNum | |
195 | && (streamChannelId == 0xFF || streamChannelId == (*streamsPtr)->fStreamChannelId)) { | |
196 | // Delete the record pointed to by *streamsPtr : | |
197 | unsigned char streamChannelIdToRemove = (*streamsPtr)->fStreamChannelId; | |
198 | tcpStreamRecord* next = (*streamsPtr)->fNext; | |
199 | (*streamsPtr)->fNext = NULL; | |
200 | delete (*streamsPtr); | |
201 | *streamsPtr = next; | |
202 | ||
203 | // And 'deregister' this socket,channelId pair: | |
204 | deregisterSocket(envir(), sockNum, streamChannelIdToRemove); | |
205 | ||
206 | if (streamChannelId != 0xFF) return; // we're done | |
207 | break; // start again from the beginning of the list, in case the list has changed | |
208 | } else { | |
209 | streamsPtr = &((*streamsPtr)->fNext); | |
210 | } | |
211 | } | |
212 | if (*streamsPtr == NULL) break; | |
213 | } | |
214 | } | |
215 | ||
216 | void RTPInterface::setServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum, | |
217 | ServerRequestAlternativeByteHandler* handler, void* clientData) { | |
218 | SocketDescriptor* socketDescriptor = lookupSocketDescriptor(env, socketNum, False); | |
219 | ||
220 | if (socketDescriptor != NULL) socketDescriptor->setServerRequestAlternativeByteHandler(handler, clientData); | |
221 | } | |
222 | ||
223 | void RTPInterface::clearServerRequestAlternativeByteHandler(UsageEnvironment& env, int socketNum) { | |
224 | SocketDescriptor* socketDescriptor = lookupSocketDescriptor(env, socketNum, False); | |
225 | ||
226 | if (socketDescriptor != NULL) { | |
227 | //%%%%% | |
228 | socketDescriptor->setServerRequestAlternativeByteHandler(NULL, NULL); | |
229 | } | |
230 | } | |
231 | ||
232 | Boolean RTPInterface::sendPacket(unsigned char* packet, unsigned packetSize) { | |
233 | Boolean success = True; // we'll return False instead if any of the sends fail | |
234 | ||
235 | // Normal case: Send as a UDP packet: | |
236 | if (!fGS->output(envir(), packet, packetSize)) success = False; | |
237 | ||
238 | // Also, send over each of our TCP sockets: | |
239 | tcpStreamRecord* nextStream; | |
240 | for (tcpStreamRecord* stream = fTCPStreams; stream != NULL; stream = nextStream) { | |
241 | nextStream = stream->fNext; // Set this now, in case the following deletes "stream": | |
242 | if (!sendRTPorRTCPPacketOverTCP(packet, packetSize, | |
243 | stream->fStreamSocketNum, stream->fStreamChannelId)) { | |
244 | success = False; | |
245 | } | |
246 | } | |
247 | ||
248 | return success; | |
249 | } | |
250 | ||
251 | void RTPInterface | |
252 | ::startNetworkReading(TaskScheduler::BackgroundHandlerProc* handlerProc) { | |
253 | // Normal case: Arrange to read UDP packets: | |
254 | envir().taskScheduler(). | |
255 | turnOnBackgroundReadHandling(fGS->socketNum(), handlerProc, fOwner); | |
256 | ||
257 | // Also, receive RTP over TCP, on each of our TCP connections: | |
258 | fReadHandlerProc = handlerProc; | |
259 | for (tcpStreamRecord* streams = fTCPStreams; streams != NULL; | |
260 | streams = streams->fNext) { | |
261 | // Get a socket descriptor for "streams->fStreamSocketNum": | |
262 | SocketDescriptor* socketDescriptor = lookupSocketDescriptor(envir(), streams->fStreamSocketNum); | |
263 | ||
264 | // Tell it about our subChannel: | |
265 | socketDescriptor->registerRTPInterface(streams->fStreamChannelId, this); | |
266 | } | |
267 | } | |
268 | ||
269 | Boolean RTPInterface::handleRead(unsigned char* buffer, unsigned bufferMaxSize, | |
270 | unsigned& bytesRead, struct sockaddr_in& fromAddress, | |
271 | int& tcpSocketNum, unsigned char& tcpStreamChannelId, | |
272 | Boolean& packetReadWasIncomplete) { | |
273 | packetReadWasIncomplete = False; // by default | |
274 | Boolean readSuccess; | |
275 | if (fNextTCPReadStreamSocketNum < 0) { | |
276 | // Normal case: read from the (datagram) 'groupsock': | |
277 | tcpSocketNum = -1; | |
278 | readSuccess = fGS->handleRead(buffer, bufferMaxSize, bytesRead, fromAddress); | |
279 | } else { | |
280 | // Read from the TCP connection: | |
281 | tcpSocketNum = fNextTCPReadStreamSocketNum; | |
282 | tcpStreamChannelId = fNextTCPReadStreamChannelId; | |
283 | ||
284 | bytesRead = 0; | |
285 | unsigned totBytesToRead = fNextTCPReadSize; | |
286 | if (totBytesToRead > bufferMaxSize) totBytesToRead = bufferMaxSize; | |
287 | unsigned curBytesToRead = totBytesToRead; | |
288 | int curBytesRead; | |
289 | while ((curBytesRead = readSocket(envir(), fNextTCPReadStreamSocketNum, | |
290 | &buffer[bytesRead], curBytesToRead, | |
291 | fromAddress)) > 0) { | |
292 | bytesRead += curBytesRead; | |
293 | if (bytesRead >= totBytesToRead) break; | |
294 | curBytesToRead -= curBytesRead; | |
295 | } | |
296 | fNextTCPReadSize -= bytesRead; | |
297 | if (fNextTCPReadSize == 0) { | |
298 | // We've read all of the data that we asked for | |
299 | readSuccess = True; | |
300 | } else if (curBytesRead < 0) { | |
301 | // There was an error reading the socket | |
302 | bytesRead = 0; | |
303 | readSuccess = False; | |
304 | } else { | |
305 | // We need to read more bytes, and there was not an error reading the socket | |
306 | packetReadWasIncomplete = True; | |
307 | return True; | |
308 | } | |
309 | fNextTCPReadStreamSocketNum = -1; // default, for next time | |
310 | } | |
311 | ||
312 | if (readSuccess && fAuxReadHandlerFunc != NULL) { | |
313 | // Also pass the newly-read packet data to our auxilliary handler: | |
314 | (*fAuxReadHandlerFunc)(fAuxReadHandlerClientData, buffer, bytesRead); | |
315 | } | |
316 | return readSuccess; | |
317 | } | |
318 | ||
319 | void RTPInterface::stopNetworkReading() { | |
320 | // Normal case | |
321 | if (fGS != NULL) envir().taskScheduler().turnOffBackgroundReadHandling(fGS->socketNum()); | |
322 | ||
323 | // Also turn off read handling on each of our TCP connections: | |
324 | for (tcpStreamRecord* streams = fTCPStreams; streams != NULL; streams = streams->fNext) { | |
325 | deregisterSocket(envir(), streams->fStreamSocketNum, streams->fStreamChannelId); | |
326 | } | |
327 | } | |
328 | ||
329 | ||
330 | ////////// Helper Functions - Implementation ///////// | |
331 | ||
332 | Boolean RTPInterface::sendRTPorRTCPPacketOverTCP(u_int8_t* packet, unsigned packetSize, | |
333 | int socketNum, unsigned char streamChannelId) { | |
334 | #ifdef DEBUG_SEND | |
335 | fprintf(stderr, "sendRTPorRTCPPacketOverTCP: %d bytes over channel %d (socket %d)\n", | |
336 | packetSize, streamChannelId, socketNum); fflush(stderr); | |
337 | #endif | |
338 | // Send a RTP/RTCP packet over TCP, using the encoding defined in RFC 2326, section 10.12: | |
339 | // $<streamChannelId><packetSize><packet> | |
340 | // (If the initial "send()" of '$<streamChannelId><packetSize>' succeeds, then we force | |
341 | // the subsequent "send()" for the <packet> data to succeed, even if we have to do so with | |
342 | // a blocking "send()".) | |
343 | do { | |
344 | u_int8_t framingHeader[4]; | |
345 | framingHeader[0] = '$'; | |
346 | framingHeader[1] = streamChannelId; | |
347 | framingHeader[2] = (u_int8_t) ((packetSize&0xFF00)>>8); | |
348 | framingHeader[3] = (u_int8_t) (packetSize&0xFF); | |
349 | if (!sendDataOverTCP(socketNum, framingHeader, 4, False)) break; | |
350 | ||
351 | if (!sendDataOverTCP(socketNum, packet, packetSize, True)) break; | |
352 | #ifdef DEBUG_SEND | |
353 | fprintf(stderr, "sendRTPorRTCPPacketOverTCP: completed\n"); fflush(stderr); | |
354 | #endif | |
355 | ||
356 | return True; | |
357 | } while (0); | |
358 | ||
359 | #ifdef DEBUG_SEND | |
360 | fprintf(stderr, "sendRTPorRTCPPacketOverTCP: failed! (errno %d)\n", envir().getErrno()); fflush(stderr); | |
361 | #endif | |
362 | return False; | |
363 | } | |
364 | ||
365 | #ifndef RTPINTERFACE_BLOCKING_WRITE_TIMEOUT_MS | |
366 | #define RTPINTERFACE_BLOCKING_WRITE_TIMEOUT_MS 500 | |
367 | #endif | |
368 | ||
369 | Boolean RTPInterface::sendDataOverTCP(int socketNum, u_int8_t const* data, unsigned dataSize, Boolean forceSendToSucceed) { | |
370 | int sendResult = send(socketNum, (char const*)data, dataSize, 0/*flags*/); | |
371 | if (sendResult < (int)dataSize) { | |
372 | // The TCP send() failed - at least partially. | |
373 | ||
374 | unsigned numBytesSentSoFar = sendResult < 0 ? 0 : (unsigned)sendResult; | |
375 | if (numBytesSentSoFar > 0 || (forceSendToSucceed && envir().getErrno() == EAGAIN)) { | |
376 | // The OS's TCP send buffer has filled up (because the stream's bitrate has exceeded | |
377 | // the capacity of the TCP connection!). | |
378 | // Force this data write to succeed, by blocking if necessary until it does: | |
379 | unsigned numBytesRemainingToSend = dataSize - numBytesSentSoFar; | |
380 | #ifdef DEBUG_SEND | |
381 | fprintf(stderr, "sendDataOverTCP: resending %d-byte send (blocking)\n", numBytesRemainingToSend); fflush(stderr); | |
382 | #endif | |
383 | makeSocketBlocking(socketNum, RTPINTERFACE_BLOCKING_WRITE_TIMEOUT_MS); | |
384 | sendResult = send(socketNum, (char const*)(&data[numBytesSentSoFar]), numBytesRemainingToSend, 0/*flags*/); | |
385 | if ((unsigned)sendResult != numBytesRemainingToSend) { | |
386 | // The blocking "send()" failed, or timed out. In either case, we assume that the | |
387 | // TCP connection has failed (or is 'hanging' indefinitely), and we stop using it | |
388 | // (for both RTP and RTP). | |
389 | // (If we kept using the socket here, the RTP or RTCP packet write would be in an | |
390 | // incomplete, inconsistent state.) | |
391 | #ifdef DEBUG_SEND | |
392 | fprintf(stderr, "sendDataOverTCP: blocking send() failed (delivering %d bytes out of %d); closing socket %d\n", sendResult, numBytesRemainingToSend, socketNum); fflush(stderr); | |
393 | #endif | |
394 | removeStreamSocket(socketNum, 0xFF); | |
395 | return False; | |
396 | } | |
397 | makeSocketNonBlocking(socketNum); | |
398 | ||
399 | return True; | |
400 | } else if (sendResult < 0 && envir().getErrno() != EAGAIN) { | |
401 | // Because the "send()" call failed, assume that the socket is now unusable, so stop | |
402 | // using it (for both RTP and RTCP): | |
403 | removeStreamSocket(socketNum, 0xFF); | |
404 | } | |
405 | ||
406 | return False; | |
407 | } | |
408 | ||
409 | return True; | |
410 | } | |
411 | ||
412 | SocketDescriptor::SocketDescriptor(UsageEnvironment& env, int socketNum) | |
413 | :fEnv(env), fOurSocketNum(socketNum), | |
414 | fSubChannelHashTable(HashTable::create(ONE_WORD_HASH_KEYS)), | |
415 | fServerRequestAlternativeByteHandler(NULL), fServerRequestAlternativeByteHandlerClientData(NULL), | |
416 | fReadErrorOccurred(False), fDeleteMyselfNext(False), fAreInReadHandlerLoop(False), fTCPReadingState(AWAITING_DOLLAR) { | |
417 | } | |
418 | ||
419 | SocketDescriptor::~SocketDescriptor() { | |
420 | fprintf(stderr, "#####@@@@@ SocketDescriptor[%p]::~SocketDescriptor() 1\n", this); | |
421 | fEnv.taskScheduler().turnOffBackgroundReadHandling(fOurSocketNum); | |
422 | removeSocketDescription(fEnv, fOurSocketNum); | |
423 | ||
424 | if (fSubChannelHashTable != NULL) { | |
425 | // Remove knowledge of this socket from any "RTPInterface"s that are using it: | |
426 | HashTable::Iterator* iter = HashTable::Iterator::create(*fSubChannelHashTable); | |
427 | RTPInterface* rtpInterface; | |
428 | char const* key; | |
429 | ||
430 | while ((rtpInterface = (RTPInterface*)(iter->next(key))) != NULL) { | |
431 | u_int64_t streamChannelIdLong = (u_int64_t)key; | |
432 | unsigned char streamChannelId = (unsigned char)streamChannelIdLong; | |
433 | ||
434 | rtpInterface->removeStreamSocket(fOurSocketNum, streamChannelId); | |
435 | } | |
436 | delete iter; | |
437 | ||
438 | // Then remove the hash table entries themselves, and then remove the hash table: | |
439 | while (fSubChannelHashTable->RemoveNext() != NULL) {} | |
440 | delete fSubChannelHashTable; | |
441 | } | |
442 | ||
443 | fprintf(stderr, "#####@@@@@ SocketDescriptor[%p]::~SocketDescriptor() 8 fServerRequestAlternativeByteHandler %p, fReadErrorOccurred %d\n", this, fServerRequestAlternativeByteHandler, fReadErrorOccurred); | |
444 | // Finally: | |
445 | if (fServerRequestAlternativeByteHandler != NULL) { | |
446 | // Hack: Pass a special character to our alternative byte handler, to tell it that either | |
447 | // - an error occurred when reading the TCP socket, or | |
448 | // - no error occurred, but it needs to take over control of the TCP socket once again. | |
449 | u_int8_t specialChar = fReadErrorOccurred ? 0xFF : 0xFE; | |
450 | (*fServerRequestAlternativeByteHandler)(fServerRequestAlternativeByteHandlerClientData, specialChar); | |
451 | } | |
452 | fprintf(stderr, "#####@@@@@ SocketDescriptor[%p]::~SocketDescriptor() 9\n", this); | |
453 | } | |
454 | ||
455 | void SocketDescriptor::registerRTPInterface(unsigned char streamChannelId, | |
456 | RTPInterface* rtpInterface) { | |
457 | Boolean isFirstRegistration = fSubChannelHashTable->IsEmpty(); | |
458 | #if defined(DEBUG_SEND)||defined(DEBUG_RECEIVE) | |
459 | fprintf(stderr, "SocketDescriptor(socket %d)::registerRTPInterface(channel %d): isFirstRegistration %d\n", fOurSocketNum, streamChannelId, isFirstRegistration); | |
460 | #endif | |
461 | fSubChannelHashTable->Add((char const*)(long)streamChannelId, | |
462 | rtpInterface); | |
463 | ||
464 | if (isFirstRegistration) { | |
465 | // Arrange to handle reads on this TCP socket: | |
466 | TaskScheduler::BackgroundHandlerProc* handler | |
467 | = (TaskScheduler::BackgroundHandlerProc*)&tcpReadHandler; | |
468 | fEnv.taskScheduler(). | |
469 | setBackgroundHandling(fOurSocketNum, SOCKET_READABLE|SOCKET_EXCEPTION, handler, this); | |
470 | } | |
471 | } | |
472 | ||
473 | RTPInterface* SocketDescriptor | |
474 | ::lookupRTPInterface(unsigned char streamChannelId) { | |
475 | char const* lookupArg = (char const*)(long)streamChannelId; | |
476 | return (RTPInterface*)(fSubChannelHashTable->Lookup(lookupArg)); | |
477 | } | |
478 | ||
479 | void SocketDescriptor | |
480 | ::deregisterRTPInterface(unsigned char streamChannelId) { | |
481 | #if defined(DEBUG_SEND)||defined(DEBUG_RECEIVE) | |
482 | fprintf(stderr, "SocketDescriptor(socket %d)::deregisterRTPInterface(channel %d)\n", fOurSocketNum, streamChannelId); | |
483 | #endif | |
484 | fSubChannelHashTable->Remove((char const*)(long)streamChannelId); | |
485 | ||
486 | if (fSubChannelHashTable->IsEmpty()) { | |
487 | // No more interfaces are using us, so it's curtains for us now: | |
488 | if (fAreInReadHandlerLoop) { | |
489 | fDeleteMyselfNext = True; // we can't delete ourself yet, but we'll do so from "tcpReadHandler()" below | |
490 | } else { | |
491 | delete this; | |
492 | } | |
493 | } | |
494 | } | |
495 | ||
496 | void SocketDescriptor::tcpReadHandler(SocketDescriptor* socketDescriptor, int mask) { | |
497 | // Call the read handler until it returns false, with a limit to avoid starving other sockets | |
498 | fprintf(stderr, "#####@@@@@ SocketDescriptor::tcpReadHandler() 1\n"); | |
499 | unsigned count = 2000; | |
500 | socketDescriptor->fAreInReadHandlerLoop = True; | |
501 | while (!socketDescriptor->fDeleteMyselfNext && socketDescriptor->tcpReadHandler1(mask) && --count > 0) {} | |
502 | socketDescriptor->fAreInReadHandlerLoop = False; | |
503 | fprintf(stderr, "#####@@@@@ SocketDescriptor::tcpReadHandler() 8, socketDescriptor->fDeleteMyselfNext %d\n", socketDescriptor->fDeleteMyselfNext); | |
504 | if (socketDescriptor->fDeleteMyselfNext) delete socketDescriptor; | |
505 | } | |
506 | ||
507 | Boolean SocketDescriptor::tcpReadHandler1(int mask) { | |
508 | // We expect the following data over the TCP channel: | |
509 | // optional RTSP command or response bytes (before the first '$' character) | |
510 | // a '$' character | |
511 | // a 1-byte channel id | |
512 | // a 2-byte packet size (in network byte order) | |
513 | // the packet data. | |
514 | // However, because the socket is being read asynchronously, this data might arrive in pieces. | |
515 | ||
516 | u_int8_t c; | |
517 | struct sockaddr_in fromAddress; | |
518 | if (fTCPReadingState != AWAITING_PACKET_DATA) { | |
519 | int result = readSocket(fEnv, fOurSocketNum, &c, 1, fromAddress); | |
520 | if (result == 0) { // There was no more data to read | |
521 | return False; | |
522 | } else if (result != 1) { // error reading TCP socket, so we will no longer handle it | |
523 | #ifdef DEBUG_RECEIVE | |
524 | fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): readSocket(1 byte) returned %d (error)\n", fOurSocketNum, result); | |
525 | #endif | |
526 | fReadErrorOccurred = True; | |
527 | fDeleteMyselfNext = True; | |
528 | return False; | |
529 | } | |
530 | } | |
531 | ||
532 | Boolean callAgain = True; | |
533 | switch (fTCPReadingState) { | |
534 | case AWAITING_DOLLAR: { | |
535 | if (c == '$') { | |
536 | #ifdef DEBUG_RECEIVE | |
537 | fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): Saw '$'\n", fOurSocketNum); | |
538 | #endif | |
539 | fTCPReadingState = AWAITING_STREAM_CHANNEL_ID; | |
540 | } else { | |
541 | // This character is part of a RTSP request or command, which is handled separately: | |
542 | if (fServerRequestAlternativeByteHandler != NULL && c != 0xFF && c != 0xFE) { | |
543 | // Hack: 0xFF and 0xFE are used as special signaling characters, so don't send them | |
544 | (*fServerRequestAlternativeByteHandler)(fServerRequestAlternativeByteHandlerClientData, c); | |
545 | } | |
546 | } | |
547 | break; | |
548 | } | |
549 | case AWAITING_STREAM_CHANNEL_ID: { | |
550 | // The byte that we read is the stream channel id. | |
551 | if (lookupRTPInterface(c) != NULL) { // sanity check | |
552 | fStreamChannelId = c; | |
553 | fTCPReadingState = AWAITING_SIZE1; | |
554 | } else { | |
555 | // This wasn't a stream channel id that we expected. We're (somehow) in a strange state. Try to recover: | |
556 | #ifdef DEBUG_RECEIVE | |
557 | fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): Saw nonexistent stream channel id: 0x%02x\n", fOurSocketNum, c); | |
558 | #endif | |
559 | fTCPReadingState = AWAITING_DOLLAR; | |
560 | } | |
561 | break; | |
562 | } | |
563 | case AWAITING_SIZE1: { | |
564 | // The byte that we read is the first (high) byte of the 16-bit RTP or RTCP packet 'size'. | |
565 | fSizeByte1 = c; | |
566 | fTCPReadingState = AWAITING_SIZE2; | |
567 | break; | |
568 | } | |
569 | case AWAITING_SIZE2: { | |
570 | // The byte that we read is the second (low) byte of the 16-bit RTP or RTCP packet 'size'. | |
571 | unsigned short size = (fSizeByte1<<8)|c; | |
572 | ||
573 | // Record the information about the packet data that will be read next: | |
574 | RTPInterface* rtpInterface = lookupRTPInterface(fStreamChannelId); | |
575 | if (rtpInterface != NULL) { | |
576 | rtpInterface->fNextTCPReadSize = size; | |
577 | rtpInterface->fNextTCPReadStreamSocketNum = fOurSocketNum; | |
578 | rtpInterface->fNextTCPReadStreamChannelId = fStreamChannelId; | |
579 | } | |
580 | fTCPReadingState = AWAITING_PACKET_DATA; | |
581 | break; | |
582 | } | |
583 | case AWAITING_PACKET_DATA: { | |
584 | callAgain = False; | |
585 | fTCPReadingState = AWAITING_DOLLAR; // the next state, unless we end up having to read more data in the current state | |
586 | // Call the appropriate read handler to get the packet data from the TCP stream: | |
587 | RTPInterface* rtpInterface = lookupRTPInterface(fStreamChannelId); | |
588 | if (rtpInterface != NULL) { | |
589 | if (rtpInterface->fNextTCPReadSize == 0) { | |
590 | // We've already read all the data for this packet. | |
591 | break; | |
592 | } | |
593 | if (rtpInterface->fReadHandlerProc != NULL) { | |
594 | #ifdef DEBUG_RECEIVE | |
595 | fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): reading %d bytes on channel %d\n", fOurSocketNum, rtpInterface->fNextTCPReadSize, rtpInterface->fNextTCPReadStreamChannelId); | |
596 | #endif | |
597 | fTCPReadingState = AWAITING_PACKET_DATA; | |
598 | rtpInterface->fReadHandlerProc(rtpInterface->fOwner, mask); | |
599 | } else { | |
600 | #ifdef DEBUG_RECEIVE | |
601 | fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): No handler proc for \"rtpInterface\" for channel %d; need to skip %d remaining bytes\n", fOurSocketNum, fStreamChannelId, rtpInterface->fNextTCPReadSize); | |
602 | #endif | |
603 | int result = readSocket(fEnv, fOurSocketNum, &c, 1, fromAddress); | |
604 | if (result < 0) { // error reading TCP socket, so we will no longer handle it | |
605 | #ifdef DEBUG_RECEIVE | |
606 | fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): readSocket(1 byte) returned %d (error)\n", fOurSocketNum, result); | |
607 | #endif | |
608 | fReadErrorOccurred = True; | |
609 | fDeleteMyselfNext = True; | |
610 | return False; | |
611 | } else { | |
612 | fTCPReadingState = AWAITING_PACKET_DATA; | |
613 | if (result == 1) { | |
614 | --rtpInterface->fNextTCPReadSize; | |
615 | callAgain = True; | |
616 | } | |
617 | } | |
618 | } | |
619 | } | |
620 | #ifdef DEBUG_RECEIVE | |
621 | else fprintf(stderr, "SocketDescriptor(socket %d)::tcpReadHandler(): No \"rtpInterface\" for channel %d\n", fOurSocketNum, fStreamChannelId); | |
622 | #endif | |
623 | } | |
624 | } | |
625 | ||
626 | return callAgain; | |
627 | } | |
628 | ||
629 | ||
630 | ////////// tcpStreamRecord implementation ////////// | |
631 | ||
632 | tcpStreamRecord | |
633 | ::tcpStreamRecord(int streamSocketNum, unsigned char streamChannelId, | |
634 | tcpStreamRecord* next) | |
635 | : fNext(next), | |
636 | fStreamSocketNum(streamSocketNum), fStreamChannelId(streamChannelId) { | |
637 | } | |
638 | ||
639 | tcpStreamRecord::~tcpStreamRecord() { | |
640 | delete fNext; | |
641 | } |
104 | 104 | u_int8_t const audioObjectType = profile + 1; |
105 | 105 | audioSpecificConfig[0] = (audioObjectType<<3) | (samplingFrequencyIndex>>1); |
106 | 106 | audioSpecificConfig[1] = (samplingFrequencyIndex<<7) | (channelConfiguration<<3); |
107 | sprintf(fConfigStr, "%02X%02x", audioSpecificConfig[0], audioSpecificConfig[1]); | |
107 | sprintf(fConfigStr, "%02X%02X", audioSpecificConfig[0], audioSpecificConfig[1]); | |
108 | 108 | } |
109 | 109 | |
110 | 110 | ADTSAudioFileSource::~ADTSAudioFileSource() { |
0 | /********** | |
1 | This library is free software; you can redistribute it and/or modify it under | |
2 | the terms of the GNU Lesser General Public License as published by the | |
3 | Free Software Foundation; either version 3 of the License, or (at your | |
4 | option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.) | |
5 | ||
6 | This library is distributed in the hope that it will be useful, but WITHOUT | |
7 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
8 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for | |
9 | more details. | |
10 | ||
11 | You should have received a copy of the GNU Lesser General Public License | |
12 | along with this library; if not, write to the Free Software Foundation, Inc., | |
13 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
14 | **********/ | |
15 | // "liveMedia" | |
16 | // Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved. | |
17 | // A filter that reads (discrete) AAC audio frames, and outputs each frame with | |
18 | // a preceding ADTS header. | |
19 | // Implementation | |
20 | ||
21 | #include "ADTSAudioStreamDiscreteFramer.hh" | |
22 | ||
23 | static u_int8_t hexToBinary(char c) { | |
24 | if (c >= '0' && c <= '9') return c - '0'; | |
25 | if (c >= 'a' && c <= 'f') return 10 + (c - 'a'); | |
26 | if (c >= 'A' && c <= 'F') return 10 + (c - 'A'); | |
27 | ||
28 | return 0; // default if 'c' is not hex | |
29 | } | |
30 | ||
31 | ADTSAudioStreamDiscreteFramer* ADTSAudioStreamDiscreteFramer | |
32 | ::createNew(UsageEnvironment& env, FramedSource* inputSource, char const* configStr) { | |
33 | u_int16_t configValue = 0; | |
34 | ||
35 | if (configStr != NULL && strlen(configStr) >= 4) { | |
36 | configValue = | |
37 | (hexToBinary(configStr[0])<<12)| | |
38 | (hexToBinary(configStr[1])<<8)| | |
39 | (hexToBinary(configStr[2])<<4)| | |
40 | hexToBinary(configStr[3]); | |
41 | } | |
42 | ||
43 | // Unpack the 2-byte 'config' value to get "profile", "samplingFrequencyIndex", | |
44 | // and "channelConfiguration": | |
45 | u_int8_t audioObjectType = configValue>>11; | |
46 | u_int8_t profile = audioObjectType == 0 ? 0 : audioObjectType-1; | |
47 | u_int8_t samplingFrequencyIndex = (configValue&0x0780)>>7; | |
48 | u_int8_t channelConfiguration = (configValue&0x0078)>>3; | |
49 | ||
50 | return new ADTSAudioStreamDiscreteFramer(env, inputSource, | |
51 | profile, samplingFrequencyIndex, channelConfiguration); | |
52 | } | |
53 | ||
54 | ADTSAudioStreamDiscreteFramer | |
55 | ::ADTSAudioStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource, | |
56 | u_int8_t profile, u_int8_t samplingFrequencyIndex, u_int8_t channelConfiguration) | |
57 | : FramedFilter(env, inputSource) { | |
58 | // Set up the ADTS header that we'll be prepending to each audio frame. | |
59 | // This will be fixed, except for the frame size. | |
60 | profile &= 0x03; // use 2 bits only | |
61 | samplingFrequencyIndex &= 0x0F; // use 4 bits only | |
62 | channelConfiguration &= 0x07; // use 3 bits only | |
63 | ||
64 | fADTSHeader[0] = 0xFF; // first 8 bits of syncword | |
65 | fADTSHeader[1] = 0xF1; // last 4 bits of syncword; version 0; layer 0; protection absent | |
66 | fADTSHeader[2] = (profile<<6)|(samplingFrequencyIndex<<2)|(channelConfiguration>>2); | |
67 | fADTSHeader[3] = channelConfiguration<<6; | |
68 | fADTSHeader[4] = 0; | |
69 | fADTSHeader[5] = 0x1F; // set 'buffer fullness' to all-1s | |
70 | fADTSHeader[6] = 0xFC; // set 'buffer fullness' to all-1s | |
71 | } | |
72 | ||
73 | ADTSAudioStreamDiscreteFramer::~ADTSAudioStreamDiscreteFramer() { | |
74 | } | |
75 | ||
76 | void ADTSAudioStreamDiscreteFramer::doGetNextFrame() { | |
77 | // Arrange to read data (an AAC audio frame) from our data source, directly into the | |
78 | // downstream object's buffer, allowing for the ATDS header in front. | |
79 | // Make sure there's enoughn space: | |
80 | if (fMaxSize <= ADTS_HEADER_SIZE) { | |
81 | fNumTruncatedBytes = ADTS_HEADER_SIZE - fMaxSize; | |
82 | handleClosure(); | |
83 | return; | |
84 | } | |
85 | ||
86 | fInputSource->getNextFrame(fTo + ADTS_HEADER_SIZE, fMaxSize - ADTS_HEADER_SIZE, | |
87 | afterGettingFrame, this, | |
88 | FramedSource::handleClosure, this); | |
89 | } | |
90 | ||
91 | void ADTSAudioStreamDiscreteFramer | |
92 | ::afterGettingFrame(void* clientData, unsigned frameSize, | |
93 | unsigned numTruncatedBytes, | |
94 | struct timeval presentationTime, | |
95 | unsigned durationInMicroseconds) { | |
96 | ADTSAudioStreamDiscreteFramer* source = (ADTSAudioStreamDiscreteFramer*)clientData; | |
97 | source->afterGettingFrame1(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds); | |
98 | } | |
99 | ||
100 | void ADTSAudioStreamDiscreteFramer | |
101 | ::afterGettingFrame1(unsigned frameSize, unsigned numTruncatedBytes, | |
102 | struct timeval presentationTime, | |
103 | unsigned durationInMicroseconds) { | |
104 | frameSize += ADTS_HEADER_SIZE; | |
105 | ||
106 | // Fill in the ADTS header with the (updated) frame size: | |
107 | fFrameSize = frameSize; | |
108 | frameSize &= 0x1FFF; // use only 13 bits in the ADTS header | |
109 | fADTSHeader[3] = (fADTSHeader[3]&0xFC)|(frameSize>>11); | |
110 | fADTSHeader[4] = frameSize>>3; | |
111 | fADTSHeader[5] = (fADTSHeader[5]&0x1F)|(frameSize<<5); | |
112 | ||
113 | // And copy the ADTS header to the destination: | |
114 | fTo[0] = fADTSHeader[0]; | |
115 | fTo[1] = fADTSHeader[1]; | |
116 | fTo[2] = fADTSHeader[2]; | |
117 | fTo[3] = fADTSHeader[3]; | |
118 | fTo[4] = fADTSHeader[4]; | |
119 | fTo[5] = fADTSHeader[5]; | |
120 | fTo[6] = fADTSHeader[6]; | |
121 | ||
122 | // Complete delivery to the downstream object: | |
123 | fNumTruncatedBytes = numTruncatedBytes; | |
124 | fPresentationTime = presentationTime; | |
125 | fDurationInMicroseconds = durationInMicroseconds; | |
126 | afterGetting(this); | |
127 | } |
49 | 49 | // Deliver an "access_unit_delimiter" NAL unit instead: |
50 | 50 | unsigned const audNALSize = fHNumber == 264 ? 2 : 3; |
51 | 51 | |
52 | if (audNALSize > fMaxSize) { // there's no space | |
53 | fNumTruncatedBytes = audNALSize - fMaxSize; | |
52 | // If we have VPS,SPS,PPS NAL units, then append those as well: | |
53 | unsigned nalDataSize | |
54 | = audNALSize + fLastSeenVPSSize + fLastSeenSPSSize + fLastSeenPPSSize; | |
55 | if (fIncludeStartCodeInOutput) { | |
56 | // Add the size of the 4-byte start codes: | |
57 | if (fLastSeenVPSSize > 0) nalDataSize += 4; | |
58 | if (fLastSeenSPSSize > 0) nalDataSize += 4; | |
59 | if (fLastSeenPPSSize > 0) nalDataSize += 4; | |
60 | } | |
61 | ||
62 | if (nalDataSize > fMaxSize) { // there's no space | |
63 | fNumTruncatedBytes = nalDataSize - fMaxSize; | |
54 | 64 | handleClosure(); |
55 | 65 | return; |
56 | 66 | } |
64 | 74 | *fTo++ = 0x50; // "pic_type" (2); "rbsp_trailing_bits()" (Is this correct??) |
65 | 75 | } |
66 | 76 | |
67 | fFrameSize = (fIncludeStartCodeInOutput ? 4: 0) + audNALSize; | |
77 | if (fLastSeenVPSSize > 0) { | |
78 | if (fIncludeStartCodeInOutput) { *fTo++ = 0x00; *fTo++ = 0x00; *fTo++ = 0x00; *fTo++ = 0x01; } | |
79 | memmove(fTo, fLastSeenVPS, fLastSeenVPSSize); fTo += fLastSeenVPSSize; | |
80 | } | |
81 | if (fLastSeenSPSSize > 0) { | |
82 | if (fIncludeStartCodeInOutput) { *fTo++ = 0x00; *fTo++ = 0x00; *fTo++ = 0x00; *fTo++ = 0x01; } | |
83 | memmove(fTo, fLastSeenSPS, fLastSeenSPSSize); fTo += fLastSeenSPSSize; | |
84 | } | |
85 | if (fLastSeenPPSSize > 0) { | |
86 | if (fIncludeStartCodeInOutput) { *fTo++ = 0x00; *fTo++ = 0x00; *fTo++ = 0x00; *fTo++ = 0x01; } | |
87 | memmove(fTo, fLastSeenPPS, fLastSeenPPSSize); fTo += fLastSeenPPSSize; | |
88 | } | |
89 | ||
90 | fFrameSize = (fIncludeStartCodeInOutput ? 4: 0) + nalDataSize; | |
91 | ||
68 | 92 | pictureEndMarker() = False; // for next time |
69 | 93 | afterGetting(this); // complete delivery to the downstream object |
70 | 94 | } else { |
24 | 24 | EVP_DigestUpdate(ctx, data2, data2Length); |
25 | 25 | } |
26 | 26 | EVP_DigestFinal(ctx, resultDigest, NULL); |
27 | EVP_MD_CTX_destroy(ctx); | |
27 | 28 | } |
28 | 29 | |
29 | 30 | void HMAC_SHA1(u_int8_t const* key, unsigned keyLength, u_int8_t const* text, unsigned textLength, |
219 | 219 | |
220 | 220 | // Do the delivery: |
221 | 221 | fParent.handleNewBuffer(fInputBuffer, fInputBufferBytesAvailable, |
222 | fMPEGVersion, fSCR, fPID); | |
222 | fMPEGVersion, fSCR, fPID); | |
223 | 223 | |
224 | 224 | return True; |
225 | 225 | } |
249 | 249 | pts += fPCR.remainingBits/90000.0; |
250 | 250 | pts += fPCR.extension/27000000.0; |
251 | 251 | |
252 | double lastSubSegmentDuration = fPreviousPTS == 0.0 ? 0.0 : pts - fPreviousPTS; | |
252 | double lastSubSegmentDuration | |
253 | = (fPreviousPTS == 0.0) || (pts < fPreviousPTS) ? 0.0 : pts - fPreviousPTS; | |
253 | 254 | fCurrentSegmentDuration += lastSubSegmentDuration; |
254 | 255 | |
255 | 256 | // Check whether we need to segment the stream now: |
10 | 10 | $(CPLUSPLUS_COMPILER) -c $(CPLUSPLUS_FLAGS) $< |
11 | 11 | |
12 | 12 | MP3_SOURCE_OBJS = MP3FileSource.$(OBJ) MP3Transcoder.$(OBJ) MP3ADU.$(OBJ) MP3ADUdescriptor.$(OBJ) MP3ADUinterleaving.$(OBJ) MP3ADUTranscoder.$(OBJ) MP3StreamState.$(OBJ) MP3Internals.$(OBJ) MP3InternalsHuffman.$(OBJ) MP3InternalsHuffmanTable.$(OBJ) MP3ADURTPSource.$(OBJ) |
13 | MPEG_SOURCE_OBJS = MPEG1or2Demux.$(OBJ) MPEG1or2DemuxedElementaryStream.$(OBJ) MPEGVideoStreamFramer.$(OBJ) MPEG1or2VideoStreamFramer.$(OBJ) MPEG1or2VideoStreamDiscreteFramer.$(OBJ) MPEG4VideoStreamFramer.$(OBJ) MPEG4VideoStreamDiscreteFramer.$(OBJ) H264or5VideoStreamFramer.$(OBJ) H264or5VideoStreamDiscreteFramer.$(OBJ) H264VideoStreamFramer.$(OBJ) H264VideoStreamDiscreteFramer.$(OBJ) H265VideoStreamFramer.$(OBJ) H265VideoStreamDiscreteFramer.$(OBJ) MPEGVideoStreamParser.$(OBJ) MPEG1or2AudioStreamFramer.$(OBJ) MPEG1or2AudioRTPSource.$(OBJ) MPEG4LATMAudioRTPSource.$(OBJ) MPEG4ESVideoRTPSource.$(OBJ) MPEG4GenericRTPSource.$(OBJ) $(MP3_SOURCE_OBJS) MPEG1or2VideoRTPSource.$(OBJ) MPEG2TransportStreamMultiplexor.$(OBJ) MPEG2TransportStreamFromPESSource.$(OBJ) MPEG2TransportStreamFromESSource.$(OBJ) MPEG2TransportStreamFramer.$(OBJ) MPEG2TransportStreamAccumulator.$(OBJ) ADTSAudioFileSource.$(OBJ) | |
13 | MPEG_SOURCE_OBJS = MPEG1or2Demux.$(OBJ) MPEG1or2DemuxedElementaryStream.$(OBJ) MPEGVideoStreamFramer.$(OBJ) MPEG1or2VideoStreamFramer.$(OBJ) MPEG1or2VideoStreamDiscreteFramer.$(OBJ) MPEG4VideoStreamFramer.$(OBJ) MPEG4VideoStreamDiscreteFramer.$(OBJ) H264or5VideoStreamFramer.$(OBJ) H264or5VideoStreamDiscreteFramer.$(OBJ) H264VideoStreamFramer.$(OBJ) H264VideoStreamDiscreteFramer.$(OBJ) H265VideoStreamFramer.$(OBJ) H265VideoStreamDiscreteFramer.$(OBJ) MPEGVideoStreamParser.$(OBJ) MPEG1or2AudioStreamFramer.$(OBJ) MPEG1or2AudioRTPSource.$(OBJ) MPEG4LATMAudioRTPSource.$(OBJ) MPEG4ESVideoRTPSource.$(OBJ) MPEG4GenericRTPSource.$(OBJ) $(MP3_SOURCE_OBJS) MPEG1or2VideoRTPSource.$(OBJ) MPEG2TransportStreamMultiplexor.$(OBJ) MPEG2TransportStreamFromPESSource.$(OBJ) MPEG2TransportStreamFromESSource.$(OBJ) MPEG2TransportStreamFramer.$(OBJ) MPEG2TransportStreamAccumulator.$(OBJ) ADTSAudioFileSource.$(OBJ) ADTSAudioStreamDiscreteFramer.$(OBJ) | |
14 | 14 | #JPEG_SOURCE_OBJS = JPEGVideoSource.$(OBJ) JPEGVideoRTPSource.$(OBJ) JPEG2000VideoStreamFramer.$(OBJ) JPEG2000VideoStreamParser.$(OBJ) JPEG2000VideoRTPSource.$(OBJ) |
15 | 15 | JPEG_SOURCE_OBJS = JPEGVideoSource.$(OBJ) JPEGVideoRTPSource.$(OBJ) JPEG2000VideoRTPSource.$(OBJ) |
16 | 16 | H263_SOURCE_OBJS = H263plusVideoRTPSource.$(OBJ) H263plusVideoStreamFramer.$(OBJ) H263plusVideoStreamParser.$(OBJ) |
192 | 192 | MPEG2TransportStreamAccumulator.$(CPP): include/MPEG2TransportStreamAccumulator.hh |
193 | 193 | include/MPEG2TransportStreamAccumulator.hh: include/FramedFilter.hh |
194 | 194 | ADTSAudioFileSource.$(CPP): include/ADTSAudioFileSource.hh include/InputFile.hh |
195 | ADTSAudioStreamDiscreteFramer.$(CPP): include/ADTSAudioStreamDiscreteFramer.hh | |
196 | include/ADTSAudioStreamDiscreteFramer.hh: include/FramedFilter.hh | |
195 | 197 | JPEGVideoSource.$(CPP): include/JPEGVideoSource.hh |
196 | 198 | include/JPEGVideoSource.hh: include/FramedSource.hh |
197 | 199 | JPEGVideoRTPSource.$(CPP): include/JPEGVideoRTPSource.hh |
423 | 425 | |
424 | 426 | include/liveMedia.hh:: include/MPEG1or2AudioRTPSink.hh include/MP3ADURTPSink.hh include/MPEG1or2VideoRTPSink.hh include/MPEG4ESVideoRTPSink.hh include/BasicUDPSink.hh include/AMRAudioFileSink.hh include/H264VideoFileSink.hh include/H265VideoFileSink.hh include/OggFileSink.hh include/GSMAudioRTPSink.hh include/H263plusVideoRTPSink.hh include/H264VideoRTPSink.hh include/H265VideoRTPSink.hh include/DVVideoRTPSource.hh include/DVVideoRTPSink.hh include/DVVideoStreamFramer.hh include/H264VideoStreamFramer.hh include/H265VideoStreamFramer.hh include/H264VideoStreamDiscreteFramer.hh include/H265VideoStreamDiscreteFramer.hh include/JPEGVideoRTPSink.hh include/SimpleRTPSink.hh include/uLawAudioFilter.hh include/MPEG2IndexFromTransportStream.hh include/MPEG2TransportStreamTrickModeFilter.hh include/ByteStreamMultiFileSource.hh include/ByteStreamMemoryBufferSource.hh include/BasicUDPSource.hh include/SimpleRTPSource.hh include/MPEG1or2AudioRTPSource.hh include/MPEG4LATMAudioRTPSource.hh include/MPEG4LATMAudioRTPSink.hh include/MPEG4ESVideoRTPSource.hh include/MPEG4GenericRTPSource.hh include/MP3ADURTPSource.hh include/QCELPAudioRTPSource.hh include/AMRAudioRTPSource.hh include/JPEGVideoRTPSource.hh include/JPEGVideoSource.hh include/MPEG1or2VideoRTPSource.hh include/VorbisAudioRTPSource.hh include/TheoraVideoRTPSource.hh include/VP8VideoRTPSource.hh include/VP9VideoRTPSource.hh include/RawVideoRTPSource.hh |
425 | 427 | |
426 | include/liveMedia.hh:: include/MPEG2TransportStreamFromPESSource.hh include/MPEG2TransportStreamFromESSource.hh include/MPEG2TransportStreamFramer.hh include/ADTSAudioFileSource.hh include/H261VideoRTPSource.hh include/H263plusVideoRTPSource.hh include/H264VideoRTPSource.hh include/H265VideoRTPSource.hh include/MP3FileSource.hh include/MP3ADU.hh include/MP3ADUinterleaving.hh include/MP3Transcoder.hh include/MPEG1or2DemuxedElementaryStream.hh include/MPEG1or2AudioStreamFramer.hh include/MPEG1or2VideoStreamDiscreteFramer.hh include/MPEG4VideoStreamDiscreteFramer.hh include/H263plusVideoStreamFramer.hh include/AC3AudioStreamFramer.hh include/AC3AudioRTPSource.hh include/AC3AudioRTPSink.hh include/VorbisAudioRTPSink.hh include/TheoraVideoRTPSink.hh include/VP8VideoRTPSink.hh include/VP9VideoRTPSink.hh include/MPEG4GenericRTPSink.hh include/DeviceSource.hh include/AudioInputDevice.hh include/WAVAudioFileSource.hh include/StreamReplicator.hh include/RTSPRegisterSender.hh | |
428 | include/liveMedia.hh:: include/MPEG2TransportStreamFromPESSource.hh include/MPEG2TransportStreamFromESSource.hh include/MPEG2TransportStreamFramer.hh include/ADTSAudioFileSource.hh include/ADTSAudioStreamDiscreteFramer.hh include/H261VideoRTPSource.hh include/H263plusVideoRTPSource.hh include/H264VideoRTPSource.hh include/H265VideoRTPSource.hh include/MP3FileSource.hh include/MP3ADU.hh include/MP3ADUinterleaving.hh include/MP3Transcoder.hh include/MPEG1or2DemuxedElementaryStream.hh include/MPEG1or2AudioStreamFramer.hh include/MPEG1or2VideoStreamDiscreteFramer.hh include/MPEG4VideoStreamDiscreteFramer.hh include/H263plusVideoStreamFramer.hh include/AC3AudioStreamFramer.hh include/AC3AudioRTPSource.hh include/AC3AudioRTPSink.hh include/VorbisAudioRTPSink.hh include/TheoraVideoRTPSink.hh include/VP8VideoRTPSink.hh include/VP9VideoRTPSink.hh include/MPEG4GenericRTPSink.hh include/DeviceSource.hh include/AudioInputDevice.hh include/WAVAudioFileSource.hh include/StreamReplicator.hh include/RTSPRegisterSender.hh | |
427 | 429 | |
428 | 430 | include/liveMedia.hh:: include/RTSPClient.hh include/SIPClient.hh include/QuickTimeFileSink.hh include/QuickTimeGenericRTPSource.hh include/AVIFileSink.hh include/PassiveServerMediaSubsession.hh include/MPEG4VideoFileServerMediaSubsession.hh include/H264VideoFileServerMediaSubsession.hh include/H265VideoFileServerMediaSubsession.hh include/WAVAudioFileServerMediaSubsession.hh include/AMRAudioFileServerMediaSubsession.hh include/AMRAudioFileSource.hh include/AMRAudioRTPSink.hh include/T140TextRTPSink.hh include/MP3AudioFileServerMediaSubsession.hh include/MPEG1or2VideoFileServerMediaSubsession.hh include/MPEG1or2FileServerDemux.hh include/MPEG2TransportFileServerMediaSubsession.hh include/H263plusVideoFileServerMediaSubsession.hh include/ADTSAudioFileServerMediaSubsession.hh include/DVVideoFileServerMediaSubsession.hh include/AC3AudioFileServerMediaSubsession.hh include/MPEG2TransportUDPServerMediaSubsession.hh include/MatroskaFileServerDemux.hh include/OggFileServerDemux.hh include/ProxyServerMediaSession.hh include/HLSSegmenter.hh |
429 | 431 |
39 | 39 | } |
40 | 40 | |
41 | 41 | unsigned RTSPClient::sendOptionsCommand(responseHandler* responseHandler, Authenticator* authenticator) { |
42 | if (authenticator != NULL) fCurrentAuthenticator = *authenticator; | |
42 | if (fCurrentAuthenticator < authenticator) fCurrentAuthenticator = *authenticator; | |
43 | 43 | return sendRequest(new RequestRecord(++fCSeq, "OPTIONS", responseHandler)); |
44 | 44 | } |
45 | 45 |
1733 | 1733 | |
1734 | 1734 | // Create the "Range:" header that we'll send back in our response. |
1735 | 1735 | // (Note that we do this after seeking, in case the seeking operation changed the range start time.) |
1736 | char* rangeHeader; | |
1736 | 1737 | if (absStart != NULL) { |
1737 | 1738 | // We're seeking by 'absolute' time: |
1739 | char* rangeHeaderBuf; | |
1740 | ||
1738 | 1741 | if (absEnd == NULL) { |
1739 | sprintf(buf, "Range: clock=%s-\r\n", absStart); | |
1742 | rangeHeaderBuf = new char[100 + strlen(absStart)]; // ample space | |
1743 | sprintf(rangeHeaderBuf, "Range: clock=%s-\r\n", absStart); | |
1740 | 1744 | } else { |
1741 | sprintf(buf, "Range: clock=%s-%s\r\n", absStart, absEnd); | |
1745 | rangeHeaderBuf = new char[100 + strlen(absStart) + strlen(absEnd)]; // ample space | |
1746 | sprintf(rangeHeaderBuf, "Range: clock=%s-%s\r\n", absStart, absEnd); | |
1742 | 1747 | } |
1743 | 1748 | delete[] absStart; delete[] absEnd; |
1749 | rangeHeader = strDup(rangeHeaderBuf); | |
1750 | delete[] rangeHeaderBuf; | |
1744 | 1751 | } else { |
1745 | 1752 | // We're seeking by relative (NPT) time: |
1746 | 1753 | if (!sawRangeHeader || startTimeIsNow) { |
1766 | 1773 | } else { |
1767 | 1774 | sprintf(buf, "Range: npt=%.3f-%.3f\r\n", rangeStart, rangeEnd); |
1768 | 1775 | } |
1769 | } | |
1770 | char* rangeHeader = strDup(buf); | |
1776 | rangeHeader = strDup(buf); | |
1777 | } | |
1771 | 1778 | |
1772 | 1779 | // Now, start streaming: |
1773 | 1780 | for (i = 0; i < fNumStreamStates; ++i) { |
0 | /********** | |
1 | This library is free software; you can redistribute it and/or modify it under | |
2 | the terms of the GNU Lesser General Public License as published by the | |
3 | Free Software Foundation; either version 3 of the License, or (at your | |
4 | option) any later version. (See <http://www.gnu.org/copyleft/lesser.html>.) | |
5 | ||
6 | This library is distributed in the hope that it will be useful, but WITHOUT | |
7 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
8 | FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for | |
9 | more details. | |
10 | ||
11 | You should have received a copy of the GNU Lesser General Public License | |
12 | along with this library; if not, write to the Free Software Foundation, Inc., | |
13 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
14 | **********/ | |
15 | // "liveMedia" | |
16 | // Copyright (c) 1996-2020 Live Networks, Inc. All rights reserved. | |
17 | // A filter that reads (discrete) AAC audio frames, and outputs each frame with | |
18 | // a preceding ADTS header. | |
19 | // C++ header | |
20 | ||
21 | #ifndef _ADTS_AUDIO_STREAM_DISCRETE_FRAMER_HH | |
22 | #define _ADTS_AUDIO_STREAM_DISCRETE_FRAMER_HH | |
23 | ||
24 | #ifndef _FRAMED_FILTER_HH | |
25 | #include "FramedFilter.hh" | |
26 | #endif | |
27 | ||
28 | #define ADTS_HEADER_SIZE 7 // we don't include a checksum | |
29 | ||
30 | class ADTSAudioStreamDiscreteFramer: public FramedFilter { | |
31 | public: | |
32 | static ADTSAudioStreamDiscreteFramer* | |
33 | createNew(UsageEnvironment& env, FramedSource* inputSource, char const* configStr); | |
34 | // "configStr" should be a 4-character hexadecimal string for a 2-byte value | |
35 | ||
36 | protected: | |
37 | ADTSAudioStreamDiscreteFramer(UsageEnvironment& env, FramedSource* inputSource, | |
38 | u_int8_t profile, u_int8_t samplingFrequencyIndex, u_int8_t channelConfiguration); | |
39 | // called only by createNew() | |
40 | virtual ~ADTSAudioStreamDiscreteFramer(); | |
41 | ||
42 | protected: | |
43 | // redefined virtual functions: | |
44 | virtual void doGetNextFrame(); | |
45 | ||
46 | protected: | |
47 | static void afterGettingFrame(void* clientData, unsigned frameSize, | |
48 | unsigned numTruncatedBytes, | |
49 | struct timeval presentationTime, | |
50 | unsigned durationInMicroseconds); | |
51 | void afterGettingFrame1(unsigned frameSize, | |
52 | unsigned numTruncatedBytes, | |
53 | struct timeval presentationTime, | |
54 | unsigned durationInMicroseconds); | |
55 | ||
56 | private: | |
57 | u_int8_t fADTSHeader[ADTS_HEADER_SIZE]; | |
58 | }; | |
59 | ||
60 | #endif |
75 | 75 | #include "MPEG2TransportStreamFromESSource.hh" |
76 | 76 | #include "MPEG2TransportStreamFramer.hh" |
77 | 77 | #include "ADTSAudioFileSource.hh" |
78 | #include "ADTSAudioStreamDiscreteFramer.hh" | |
78 | 79 | #include "H261VideoRTPSource.hh" |
79 | 80 | #include "H263plusVideoRTPSource.hh" |
80 | 81 | #include "H264VideoRTPSource.hh" |
3 | 3 | #ifndef _LIVEMEDIA_VERSION_HH |
4 | 4 | #define _LIVEMEDIA_VERSION_HH |
5 | 5 | |
6 | #define LIVEMEDIA_LIBRARY_VERSION_STRING "2020.04.12" | |
7 | #define LIVEMEDIA_LIBRARY_VERSION_INT 1586649600 | |
6 | #define LIVEMEDIA_LIBRARY_VERSION_STRING "2020.07.09" | |
7 | #define LIVEMEDIA_LIBRARY_VERSION_INT 1594252800 | |
8 | 8 | |
9 | 9 | #endif |
0 | PREFIX = /usr/local | |
0 | 1 | INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include -I../BasicUsageEnvironment/include |
1 | 2 | # Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later. |
2 | 3 | libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX) |
1 | 1 | |
2 | 2 | MEDIA_SERVER = live555MediaServer$(EXE) |
3 | 3 | |
4 | PREFIX = /usr/local | |
5 | 4 | ALL = $(MEDIA_SERVER) |
6 | 5 | all: $(ALL) |
7 | 6 |
4 | 4 | #ifndef _MEDIA_SERVER_VERSION_HH |
5 | 5 | #define _MEDIA_SERVER_VERSION_HH |
6 | 6 | |
7 | #define MEDIA_SERVER_VERSION_STRING "1.00" | |
7 | #define MEDIA_SERVER_VERSION_STRING "1.01" | |
8 | 8 | |
9 | 9 | #endif |
0 | PREFIX = /usr/local | |
0 | 1 | INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include -I../BasicUsageEnvironment/include |
1 | 2 | # Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later. |
2 | 3 | libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX) |
1 | 1 | |
2 | 2 | PROXY_SERVER = live555ProxyServer$(EXE) |
3 | 3 | |
4 | PREFIX = /usr/local | |
5 | 4 | ALL = $(PROXY_SERVER) |
6 | 5 | all: $(ALL) |
7 | 6 |
0 | PREFIX = /usr/local | |
0 | 1 | INCLUDES = -I../UsageEnvironment/include -I../groupsock/include -I../liveMedia/include -I../BasicUsageEnvironment/include |
1 | 2 | # Default library filename suffixes for each library that we link with. The "config.*" file might redefine these later. |
2 | 3 | libliveMedia_LIB_SUFFIX = $(LIB_SUFFIX) |
12 | 12 | |
13 | 13 | MISC_APPS = testMPEG1or2Splitter$(EXE) testMPEG1or2ProgramToTransportStream$(EXE) testH264VideoToTransportStream$(EXE) testH265VideoToTransportStream$(EXE) MPEG2TransportStreamIndexer$(EXE) testMPEG2TransportStreamTrickPlay$(EXE) registerRTSPStream$(EXE) testMKVSplitter$(EXE) testMPEG2TransportStreamSplitter$(EXE) mikeyParse$(EXE) |
14 | 14 | |
15 | PREFIX = /usr/local | |
16 | 15 | ALL = $(MULTICAST_APPS) $(UNICAST_APPS) $(HLS_APPS) $(MISC_APPS) |
17 | 16 | all: $(ALL) |
18 | 17 |
905 | 905 | } else if (strcmp(subsession->codecName(), "VORBIS") == 0 || |
906 | 906 | strcmp(subsession->codecName(), "OPUS") == 0) { |
907 | 907 | createOggFileSink = True; |
908 | } else if (strcmp(subsession->codecName(), "MPEG4-GENERIC") == 0) { | |
909 | // For AAC audio, we use a regular file sink, but add a 'ADTS framer' filter | |
910 | // to the end of the data source, so that the resulting file is playable: | |
911 | FramedFilter* adtsFramer | |
912 | = ADTSAudioStreamDiscreteFramer::createNew(*env, subsession->readSource(), | |
913 | subsession->fmtp_config()); | |
914 | subsession->addFilter(adtsFramer); | |
908 | 915 | } |
909 | 916 | } |
910 | 917 | if (createOggFileSink) { |