play-daemon/daemon/transcoder.py
2022-04-26 11:10:50 +02:00

211 lines
8.8 KiB
Python

import logging
import multiprocessing as mp
import os
import shutil
import time
import ffmpeg
class Transcoder:
def __init__(self, config):
self.jobs = int(config['transcoder']['jobs'])
self.logger = logging.getLogger('play-daemon')
ffmpeg_debug = config.getboolean('transcoder', 'ffmpeg_debug',
fallback=False)
self.worker = _Worker(self.logger, ffmpeg_debug)
self.variants = {720: (26, 'fast'),
1080: (26, 'fast')}
def convert(self, package):
base = package['base']
workbase = package['workbase']
package_id = package['id']
pending = []
self.logger.info('%s - Started transcoding', package_id)
with mp.Pool(processes=self.jobs) as pool:
self.logger.debug('%s - Pool created', package_id)
for stream in package['sources']:
transcodes = {}
# If the stream is a collection of slide images that has to be
# converted to a video
if 'demux_file' in stream:
self.logger.debug('%s - Processing stream %s',
package_id,
'slides job in demux.txt')
# Create the different variants
for maxheight in self.variants:
crf, preset = self.variants[maxheight]
# Call the _Worker function asyncronously
transcodes[maxheight] = pool.apply_async(
_Worker.make_slides_video,
(self.worker,
package_id,
stream['demux_file'],
workbase,
maxheight, preset, crf))
_, ext = os.path.splitext(stream['poster'])
slides_poster = f'slides_poster{ext}'
shutil.copy2(stream['poster'], os.path.join(workbase,
slides_poster))
stream['poster'] = slides_poster
# Remove the reference to the demuxfile since it is
# no longer needed
stream.pop('demux_file')
# If the stream is a regular video it needs to be transcoded
# with new resolution
elif 'video' in stream:
streampath_rel = stream['video']
streampath_abs = os.path.join(base, streampath_rel)
self.logger.debug('%s - Processing stream %s',
package_id,
streampath_rel)
# Create the different variants
for maxheight in self.variants:
crf, preset = self.variants[maxheight]
# Call the _Worker function asyncronously
transcodes[maxheight] = pool.apply_async(
_Worker.transcode,
(self.worker,
package_id,
streampath_abs,
workbase,
maxheight, preset, crf))
if 'poster' in stream:
posterpath_rel = stream['poster']
if posterpath_rel:
_, ext = os.path.splitext(posterpath_rel)
posterbase, _ = os.path.splitext(
os.path.basename(streampath_abs))
postername = f'{posterbase}{ext}'
shutil.copy2(os.path.join(base, posterpath_rel),
os.path.join(workbase, postername))
stream['poster'] = postername
else:
posterjob = pool.apply_async(_Worker.make_poster,
(self.worker,
streampath_abs,
workbase))
transcodes['poster'] = posterjob
# Store the jobs and streams in the pending array
pending.append({'jobs': transcodes,
'data': stream})
# Close the pool
pool.close()
pool.join()
self.logger.info('%s - Finished transcoding', package_id)
package['sources'] = []
for item in pending:
stream = item['data']
jobs = item['jobs']
streams = {}
for maxheight in self.variants:
# Get the jobs. If not done wait until they are ready.
streams[maxheight] = jobs[maxheight].get()
stream['video'] = streams
if 'poster' in jobs:
stream['poster'] = jobs['poster'].get()
package['sources'].append(stream)
# Return the finished streams
return package
class _Worker:
def __init__(self, logger, ffmpeg_verbose):
self.postertime = 1
self.logger = logger
self.ffmpeg_quiet = not ffmpeg_verbose
def _has_audio(self, instream):
for stream in ffmpeg.probe(instream)['streams']:
if stream['codec_type'] == 'audio':
return True
return False
def make_poster(self, instream, outdir):
name = os.path.basename(instream)
name, _ = os.path.splitext(name)
poster = f'{name}-poster.jpg'
outpath = os.path.join(outdir, poster)
ffmpeg.input(instream, ss=self.postertime) \
.output(outpath, vframes=1) \
.run(quiet=True)
return poster
def transcode(self, package_id, instream, outdir,
maxheight, preset, crf):
self.logger.debug('%s - %s - Preparing stream for processing',
package_id,
instream)
name = os.path.basename(instream)
name, ext = os.path.splitext(name)
outstream = f'{name}-{maxheight}-{preset}-{crf}.mp4'
outpath = os.path.join(outdir, outstream)
self.logger.debug('%s - %s - Building filter graph',
package_id,
instream)
initial = ffmpeg.input(instream)
scaled = initial.video.filter('scale',
height=f'min(in_h, {maxheight})',
width=-2)
joined = scaled
if self._has_audio(instream):
joined = ffmpeg.concat(scaled, initial.audio, v=1, a=1)
result = joined.output(outpath,
vcodec='libx264',
acodec='aac',
audio_bitrate='160k',
movflags='+faststart',
preset=preset,
crf=crf)
self.logger.debug('%s - %s - Calling ffmpeg',
package_id,
instream)
start = time.time()
self.logger.debug('%s - FFmpeg commandline: %s',
package_id, ' '.join(result.compile()))
result.run(quiet=self.ffmpeg_quiet)
runtime = time.time() - start
self.logger.info('%s - Transcoded %s in %s seconds',
package_id, outstream, runtime)
return outstream
# Function to make a video from images used for the slideshow
def make_slides_video(self, package_id, demux_file, outdir,
maxheight, preset, crf):
self.logger.debug('%s - Preparing slides for processing', package_id)
outstream = f'slides-{maxheight}-{preset}-{crf}.mp4'
outpath = os.path.join(outdir, outstream)
self.logger.debug('%s - Building slide video from demux.txt',
package_id)
# https://ffmpeg.org/ffmpeg-formats.html#toc-concat-1
# https://github.com/kkroening/ffmpeg-python/tree/master/examples
initial = ffmpeg.input(demux_file, safe=0)
scaled = initial.filter('scale',
height=f'min(in_h, {maxheight})',
width=-2)
result = scaled.output(outpath,
crf=crf,
preset=preset,
pix_fmt='yuv420p')
self.logger.debug('%s - Calling ffmpeg', package_id)
start = time.time()
result.run(quiet=self.ffmpeg_quiet)
runtime = time.time() - start
self.logger.info('%s - Transcoded %s in %s seconds',
package_id, outstream, runtime)
return outstream