cbf29c4962
so the code was just adding unnecessary complexity. The pipeline now uses
mp.pool to manage ffmpeg jobs as before.
This reverts commit f91109fb3e
and deletes the
WorkThread class and its associated tests.
114 lines
2.8 KiB
Plaintext
114 lines
2.8 KiB
Plaintext
[Pipeline]
|
|
# These directories must all share a filesystem, in order to be able to avoid
|
|
# races when packages are modified.
|
|
|
|
## The directory where incoming queue items are picked up.
|
|
queuedir = /some/dir
|
|
|
|
## The parent directory of all incoming job files. Each job uploading files
|
|
## must create its own subdirectory here.
|
|
uploaddir = /another/dir
|
|
|
|
## The directory storing finished packages.
|
|
packagedir = /a/third/dir
|
|
|
|
## The directory where the pipeline will store temporary files while working
|
|
## on a job.
|
|
tempdir = /a/fourth/dir
|
|
|
|
## The directory that keeps track of newly assigned package id:s
|
|
cachedir = /a/fifth
|
|
|
|
|
|
[Logging]
|
|
# Logging settings. This entire section is optional.
|
|
# see https://docs.python.org/3.9/library/logging.html for details
|
|
|
|
## What log messages are shown on stderr.
|
|
## Defaults to ERROR if omitted.
|
|
## For the spammiest logging, use the number 5 here.
|
|
## Beware that this can log multiple gigabytes per hour.
|
|
log_level = DEBUG
|
|
|
|
## Mail settings for emailing log messages.
|
|
## If mail_level is not set, no emails will be sent.
|
|
## If mail_level is present, all other mail_* fields must be configured.
|
|
mail_level = ERROR
|
|
mail_from = messages@example.com
|
|
mail_to = admin@example.com
|
|
mail_subject = play-daemon has encountered an error
|
|
|
|
|
|
[Notifier]
|
|
# The destination for notifications sent by the pipeline
|
|
url = https://example.com/notify
|
|
|
|
# The authorization token to use for authenticating to the recipient
|
|
token = A70keN
|
|
|
|
|
|
[FFmpeg]
|
|
# The maximum number of ffmpeg jobs to run in parallel
|
|
workers = 4
|
|
|
|
|
|
[Daisy]
|
|
# Credentials and URL for Daisy API access
|
|
user = someuser
|
|
password = somepass
|
|
url = https://example.com/daisyapi/
|
|
|
|
|
|
[Ldap]
|
|
url = ldaps://ldap.example.com
|
|
base_dn = dc=example,dc=com
|
|
|
|
|
|
[SubtitlesWhisperHandler]
|
|
# The whisper model to use for subtitle generation
|
|
whispermodel = large-v2
|
|
|
|
# Where to store model data
|
|
modeldir = /some/path
|
|
|
|
# What device type to use. Accepts 'cpu' or 'gpu'.
|
|
device = gpu
|
|
|
|
# CPU only. Sets the number of workers to use.
|
|
count = 2
|
|
|
|
# GPU only. Specifies which GPUs to use, as a comma-separated list.
|
|
# GPUs are indexed from 0. nvidia-smi reports index and bus ID of
|
|
# each available GPU.
|
|
gpu_ids = 0, 1
|
|
|
|
# GPU only. Sets the number of workers to start on each used gpu.
|
|
threads_per_gpu = 3
|
|
|
|
|
|
[ThumbnailHandler]
|
|
# The base image to use when creating presentation thumbnails
|
|
baseimage = /path/to/template.png
|
|
|
|
# The color to use for thumbnail image text
|
|
## Must be a valid PIL color
|
|
textcolor = white
|
|
|
|
|
|
[MediasiteProcessor]
|
|
# Credentials to use when downloading files from mediasite
|
|
user = someuser
|
|
password = somepass
|
|
|
|
# The chunk size to use when downloading files.
|
|
## 10MiB; seems optimal for speed
|
|
## Tested 8k, 10MiB and 20MiB
|
|
chunksize = 10485760
|
|
|
|
|
|
[CatturaProcessor]
|
|
# Recorders: <name> = <daisy:lokalid>
|
|
## <name> must match the name configured in the recorder
|
|
## under Settings > System > Recorder Name
|
|
Aula = 620
|