From 094b04f9a0a4b11a7cc46b4262a7648e7289a001 Mon Sep 17 00:00:00 2001 From: Pengfei Qu Date: Fri, 10 Jan 2020 23:38:57 -0500 Subject: [PATCH] update the docker image to 20.1 --- ad-insertion/ad-segment/Dockerfile | 2 +- ad-insertion/ad-transcode/Dockerfile | 2 +- .../analytics/VCAC-A/ffmpeg/Dockerfile | 2 +- .../emotion_recognition/1/pipeline.json | 2 +- .../face_recognition/1/pipeline.json | 2 +- .../object_detection/1/pipeline.json | 2 +- ad-insertion/analytics/VCAC-A/gst/Dockerfile | 4 +--- ad-insertion/analytics/Xeon/ffmpeg/Dockerfile | 2 +- .../emotion_recognition/1/pipeline.json | 2 +- .../face_recognition/1/pipeline.json | 2 +- .../object_detection/1/pipeline.json | 2 +- ad-insertion/analytics/Xeon/gst/Dockerfile | 2 +- .../analytics/app/modules/FFmpegPipeline.py | 21 ++++++++++++++++--- .../analytics/app/modules/GstGVAJSONMeta.py | 2 +- common/abr_hls_dash.py | 8 +++---- content-provider/archive/Dockerfile | 2 +- content-provider/transcode/Dockerfile | 2 +- 17 files changed, 37 insertions(+), 24 deletions(-) diff --git a/ad-insertion/ad-segment/Dockerfile b/ad-insertion/ad-segment/Dockerfile index 00342fcf..91a41061 100644 --- a/ad-insertion/ad-segment/Dockerfile +++ b/ad-insertion/ad-segment/Dockerfile @@ -1,5 +1,5 @@ -FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:19.10.1 +FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:20.1 RUN apt-get update && apt-get install -y -q youtube-dl bc wget && rm -rf /var/lib/apt/lists/*; #### diff --git a/ad-insertion/ad-transcode/Dockerfile b/ad-insertion/ad-transcode/Dockerfile index 466f64aa..de0f0a93 100755 --- a/ad-insertion/ad-transcode/Dockerfile +++ b/ad-insertion/ad-transcode/Dockerfile @@ -1,5 +1,5 @@ -FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:19.10.1 +FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:20.1 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends nginx python3-tornado python3-urllib3 python3-requests python3-psutil python3-pip && rm -rf /var/lib/apt/lists/* && \ pip3 install 'kafka-python>=1.4.7' 'kazoo>=2.6.1' diff --git a/ad-insertion/analytics/VCAC-A/ffmpeg/Dockerfile b/ad-insertion/analytics/VCAC-A/ffmpeg/Dockerfile index f4689d9a..b21cfb4a 100644 --- a/ad-insertion/analytics/VCAC-A/ffmpeg/Dockerfile +++ b/ad-insertion/analytics/VCAC-A/ffmpeg/Dockerfile @@ -1,6 +1,6 @@ # ssai_analytics_ffmpeg_vcac-a -ARG base_name=openvisualcloud/vcaca-ubuntu1804-analytics-ffmpeg:19.11 +ARG base_name=openvisualcloud/vcaca-ubuntu1804-analytics-ffmpeg:20.1 FROM ${base_name} # Fetch python3 and Install python3 diff --git a/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/emotion_recognition/1/pipeline.json b/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/emotion_recognition/1/pipeline.json index aeb2ccd2..70779011 100644 --- a/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/emotion_recognition/1/pipeline.json +++ b/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/emotion_recognition/1/pipeline.json @@ -2,7 +2,7 @@ "name": "emotion_recognition", "version": 1, "type": "FFmpeg", - "template":"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device /dev/dri/renderD128 -i \"{source[uri]}\" -vf \"detect=model={models[face_detection_retail][1][network]}:model_proc=\"{models[face_detection_retail][1][proc]}\":interval=\"{parameters[every-nth-frame]}\":device=HDDL,classify=model=\"{models[emotion_recognition][1][network]}\":model_proc=\"{models[emotion_recognition][1][proc]}\":device=HDDL\"", + "template":"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device /dev/dri/renderD128 -i \"{source[uri]}\" -vf \"detect=model={models[face_detection_retail][1][network]}:model_proc=\"{models[face_detection_retail][1][proc]}\":interval=\"{parameters[every-nth-frame]}\":device=HDDL,classify=model=\"{models[emotion_recognition][1][network]}\":model_proc=\"{models[emotion_recognition][1][proc]}\":device=HDDL,metaconvert=converter=json:method=all:source=NULL:tags=NULL\"", "description":"Emotion Recognition", "parameters": { "type" : "object", diff --git a/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/face_recognition/1/pipeline.json b/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/face_recognition/1/pipeline.json index b08bd973..db24051a 100644 --- a/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/face_recognition/1/pipeline.json +++ b/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/face_recognition/1/pipeline.json @@ -2,7 +2,7 @@ "name": "face_recognition", "version": 1, "type": "FFmpeg", - "template":"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device /dev/dri/renderD128 -i \"{source[uri]}\" -vf \"detect=model={models[face_detection_retail][1][network]}:model_proc=\"{models[face_detection_retail][1][proc]}\":interval=\"{parameters[every-nth-frame]}\":device=HDDL,classify=model=\"{models[face_reidentification][1][network]}\":model_proc=\"{models[face_reidentification][1][proc]}\":device=HDDL\",identify=gallery=\"/home/gallery/face_gallery_FP16/gallery.json\"", + "template":"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device /dev/dri/renderD128 -i \"{source[uri]}\" -vf \"detect=model={models[face_detection_retail][1][network]}:model_proc=\"{models[face_detection_retail][1][proc]}\":interval=\"{parameters[every-nth-frame]}\":device=HDDL,classify=model=\"{models[face_reidentification][1][network]}\":model_proc=\"{models[face_reidentification][1][proc]}\":device=HDDL\",identify=gallery=\"/home/gallery/face_gallery_FP16/gallery.json,metaconvert=converter=json:method=all:source=NULL:tags=NULL\"", "description":"Face Recognition", "parameters": { "type" : "object", diff --git a/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/object_detection/1/pipeline.json b/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/object_detection/1/pipeline.json index d75a1e34..e4764f8c 100644 --- a/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/object_detection/1/pipeline.json +++ b/ad-insertion/analytics/VCAC-A/ffmpeg/pipelines/object_detection/1/pipeline.json @@ -2,7 +2,7 @@ "name": "object_detection", "version": 1, "type": "FFmpeg", - "template":"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device /dev/dri/renderD128 -i \"{source[uri]}\" -vf \"detect=model={models[object_detection][1][network]}:device=HDDL:model_proc=\"{models[object_detection][1][proc]}\":interval={parameters[every-nth-frame]}\"", + "template":"-flags unaligned -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device /dev/dri/renderD128 -i \"{source[uri]}\" -vf \"detect=model={models[object_detection][1][network]}:device=HDDL:model_proc=\"{models[object_detection][1][proc]}\":interval={parameters[every-nth-frame]},metaconvert=converter=json:method=all:source=NULL:tags=NULL\"", "description": "Object Detection", "parameters": { "type" : "object", diff --git a/ad-insertion/analytics/VCAC-A/gst/Dockerfile b/ad-insertion/analytics/VCAC-A/gst/Dockerfile index 063559dd..1d2e69a9 100644 --- a/ad-insertion/analytics/VCAC-A/gst/Dockerfile +++ b/ad-insertion/analytics/VCAC-A/gst/Dockerfile @@ -1,13 +1,11 @@ # ssai_analytics_gst_vcac-a -FROM openvisualcloud/vcaca-ubuntu1804-analytics-gst:19.11 +FROM openvisualcloud/vcaca-ubuntu1804-analytics-gst:20.1 # Fetch python3 and Install python3 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends libjson-c3 python3-gst-1.0 python3-jsonschema python3-gi python3-requests python3-tornado python3-pip python3-setuptools python3-wheel && rm -rf /var/lib/apt/lists/* && \ pip3 install 'kafka-python>=1.4.7' 'kazoo>=2.6.1' -#COPY ./app/server/requirements.txt / - COPY app /home/ COPY models/ /home/models/ COPY gallery/ /home/gallery/ diff --git a/ad-insertion/analytics/Xeon/ffmpeg/Dockerfile b/ad-insertion/analytics/Xeon/ffmpeg/Dockerfile index ee274232..99b8a0c1 100644 --- a/ad-insertion/analytics/Xeon/ffmpeg/Dockerfile +++ b/ad-insertion/analytics/Xeon/ffmpeg/Dockerfile @@ -1,6 +1,6 @@ # ssai_analytics_ffmpeg_xeon -From openvisualcloud/xeon-ubuntu1804-analytics-ffmpeg:19.11 +From openvisualcloud/xeon-ubuntu1804-analytics-ffmpeg:20.1 # Fetch python3 and Install python3 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends python3-gst-1.0 python3-jsonschema python3-pip && rm -rf /var/lib/apt/lists/* && \ diff --git a/ad-insertion/analytics/Xeon/ffmpeg/pipelines/emotion_recognition/1/pipeline.json b/ad-insertion/analytics/Xeon/ffmpeg/pipelines/emotion_recognition/1/pipeline.json index db8a3545..5b2ff8c5 100644 --- a/ad-insertion/analytics/Xeon/ffmpeg/pipelines/emotion_recognition/1/pipeline.json +++ b/ad-insertion/analytics/Xeon/ffmpeg/pipelines/emotion_recognition/1/pipeline.json @@ -2,7 +2,7 @@ "name": "emotion_recognition", "version": 1, "type": "FFmpeg", - "template":"-i \"{source[uri]}\" -vf \"detect=model={models[face_detection_retail][1][network]}:model_proc=\"{models[face_detection_retail][1][proc]}\":interval=\"{parameters[every-nth-frame]}\":device=CPU,classify=model=\"{models[emotion_recognition][1][network]}\":model_proc=\"{models[emotion_recognition][1][proc]}\":device=CPU\"", + "template":"-i \"{source[uri]}\" -vf \"detect=model={models[face_detection_retail][1][network]}:model_proc=\"{models[face_detection_retail][1][proc]}\":interval=\"{parameters[every-nth-frame]}\":device=CPU,classify=model=\"{models[emotion_recognition][1][network]}\":model_proc=\"{models[emotion_recognition][1][proc]}\":device=CPU,metaconvert=converter=json:method=all:source=NULL:tags=NULL\"", "description":"Emotion Recognition", "parameters": { "type" : "object", diff --git a/ad-insertion/analytics/Xeon/ffmpeg/pipelines/face_recognition/1/pipeline.json b/ad-insertion/analytics/Xeon/ffmpeg/pipelines/face_recognition/1/pipeline.json index 8d2fbe4d..42eadf6a 100644 --- a/ad-insertion/analytics/Xeon/ffmpeg/pipelines/face_recognition/1/pipeline.json +++ b/ad-insertion/analytics/Xeon/ffmpeg/pipelines/face_recognition/1/pipeline.json @@ -2,7 +2,7 @@ "name": "face_recognition", "version": 1, "type": "FFmpeg", - "template":"-i \"{source[uri]}\" -vf \"detect=model={models[face_detection_retail][1][network]}:model_proc=\"{models[face_detection_retail][1][proc]}\":interval=\"{parameters[every-nth-frame]}\":device=CPU,classify=model=\"{models[face_reidentification][1][network]}\":model_proc=\"{models[face_reidentification][1][proc]}\":device=CPU,identify=gallery=\"/home/gallery/face_gallery_FP32/gallery.json\"\"", + "template":"-i \"{source[uri]}\" -vf \"detect=model={models[face_detection_retail][1][network]}:model_proc=\"{models[face_detection_retail][1][proc]}\":interval=\"{parameters[every-nth-frame]}\":device=CPU,classify=model=\"{models[face_reidentification][1][network]}\":model_proc=\"{models[face_reidentification][1][proc]}\":device=CPU,identify=gallery=\"/home/gallery/face_gallery_FP32/gallery.json\",metaconvert=converter=json:method=all:source=NULL:tags=NULL\"", "description":"Face Recognition Pipeline", "parameters": { "type" : "object", diff --git a/ad-insertion/analytics/Xeon/ffmpeg/pipelines/object_detection/1/pipeline.json b/ad-insertion/analytics/Xeon/ffmpeg/pipelines/object_detection/1/pipeline.json index 6480b85f..d636e953 100644 --- a/ad-insertion/analytics/Xeon/ffmpeg/pipelines/object_detection/1/pipeline.json +++ b/ad-insertion/analytics/Xeon/ffmpeg/pipelines/object_detection/1/pipeline.json @@ -2,7 +2,7 @@ "name": "object_detection", "version": 1, "type": "FFmpeg", - "template":"-i \"{source[uri]}\" -vf \"detect=model={models[object_detection][1][network]}:model_proc=\"{models[object_detection][1][proc]}\":interval={parameters[every-nth-frame]}:device=CPU\"", + "template":"-i \"{source[uri]}\" -vf \"detect=model={models[object_detection][1][network]}:model_proc=\"{models[object_detection][1][proc]}\":interval={parameters[every-nth-frame]}:device=CPU,metaconvert=converter=json:method=all:source=NULL:tags=NULL\"", "description": "Object Detection", "parameters": { "type" : "object", diff --git a/ad-insertion/analytics/Xeon/gst/Dockerfile b/ad-insertion/analytics/Xeon/gst/Dockerfile index cd467080..6605816e 100644 --- a/ad-insertion/analytics/Xeon/gst/Dockerfile +++ b/ad-insertion/analytics/Xeon/gst/Dockerfile @@ -1,6 +1,6 @@ # ssai_analytics_gst_xeon -FROM openvisualcloud/xeon-ubuntu1804-analytics-gst:19.11 +FROM openvisualcloud/xeon-ubuntu1804-analytics-gst:20.1 # Fetch python3 and Install python3 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q python3-gst-1.0 python3-jsonschema python3-pip ca-certificates && rm -rf /var/lib/apt/lists/* && \ diff --git a/ad-insertion/analytics/app/modules/FFmpegPipeline.py b/ad-insertion/analytics/app/modules/FFmpegPipeline.py index 3cf64787..b0332b74 100644 --- a/ad-insertion/analytics/app/modules/FFmpegPipeline.py +++ b/ad-insertion/analytics/app/modules/FFmpegPipeline.py @@ -153,6 +153,23 @@ def _add_default_models(self,args): device = FFmpegPipeline.DEVICEID_MAP[int(filter_params['device'])] filter_params["model"] = ModelManager.get_default_network_for_device(device,filter_params["model"]) new_filters.append(self._join_filter_params(filter_params)) + elif (filter_params['type'] == "metaconvert"): + # use the file to send the tags info to ffmpeg + tmp_file = "/tmp/timestamp" + if "tags" in self.request: + try: + tmp_tags="" + for key in self.request["tags"]: + #filter_params["tags"] = "{\"%s\":%s}" % (key, self.request["tags"][key]) + tmp_tags = "{\"%s\":%s}" % (key, self.request["tags"][key]) + with open(tmp_file,'w') as f: + f.write(tmp_tags) + except Exception: + logger.error("Error adding tags") + source_uri ="source=" + "'" + self.request["source"]["uri"].replace(":","\\:") + "'" + filter_params_str = self._join_filter_params(filter_params).replace("source=NULL",source_uri) + filter_params_str = filter_params_str.replace("tags=NULL","tags=file|"+tmp_file) + new_filters.append(filter_params_str) else: new_filters.append(_filter) args[vf_index+1] =','.join(new_filters) @@ -164,9 +181,7 @@ def start(self): self._ffmpeg_launch_string = string.Formatter().vformat(self.template, [], self.request) args = ['ffmpeg'] args.extend(shlex.split(self._ffmpeg_launch_string)) - iemetadata_args = ["-f", "iemetadata", "-source_url", self.request["source"]["uri"]] - - self._add_tags(iemetadata_args) + iemetadata_args = ["-f", "metapublish", "-method", "1", "-output_format", "stream"] if 'destination' in self.request: if self.request['destination']['type'] == "kafka": diff --git a/ad-insertion/analytics/app/modules/GstGVAJSONMeta.py b/ad-insertion/analytics/app/modules/GstGVAJSONMeta.py index b3f82320..473edb26 100644 --- a/ad-insertion/analytics/app/modules/GstGVAJSONMeta.py +++ b/ad-insertion/analytics/app/modules/GstGVAJSONMeta.py @@ -6,7 +6,7 @@ from ctypes import * # pylint: disable=unused-wildcard-import -clib = CDLL("/usr/lib/x86_64-linux-gnu/gstreamer-1.0/libgstvideoanalyticsmeta.so") +clib = CDLL("/usr/local/lib/x86_64-linux-gnu/gstreamer-1.0/libgstvideoanalyticsmeta.so") # json meta diff --git a/common/abr_hls_dash.py b/common/abr_hls_dash.py index 6d7e4571..536c2447 100755 --- a/common/abr_hls_dash.py +++ b/common/abr_hls_dash.py @@ -40,7 +40,7 @@ def check_renditions(frame_height, renditions=renditions_sample): return min_res def GetABRCommand(in_file, target, streaming_type, renditions=renditions_sample, duration=2,segment_num=0,fade_type=None,content_type=None): - ffprobe_cmd = ["/usr/bin/ffprobe", "-v", "quiet", "-print_format", "json", "-show_streams",in_file] + ffprobe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", "-show_streams",in_file] process_id = subprocess.Popen(ffprobe_cmd,stdout=subprocess.PIPE) # the `multiprocessing.Process` process will wait until @@ -77,7 +77,7 @@ def GetABRCommand(in_file, target, streaming_type, renditions=renditions_sample, cmd = [] cmd_abr = [] - cmd_base = ["/usr/bin/ffmpeg", "-hide_banner", "-y","-i", in_file] + cmd_base = ["ffmpeg", "-hide_banner", "-y","-i", in_file] if clip_a_duration == 0 and content_type == "ad": cmd_base += ["-f", "lavfi", "-i", "anullsrc=channel_layout=stereo:sample_rate="+str(44100)] cmd_misc = ["-hide_banner", "-y"] @@ -168,7 +168,7 @@ def GetABRCommand(in_file, target, streaming_type, renditions=renditions_sample, def GetFadeCommand(in_file, target, fade_type): # ffprobe -v quiet -print_format json -show_streams - ffprobe_cmd = ["/usr/bin/ffprobe", "-v", "quiet", "-print_format", "json", "-show_streams",in_file] + ffprobe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", "-show_streams",in_file] process_id = subprocess.Popen(ffprobe_cmd,stdout=subprocess.PIPE) # the `multiprocessing.Process` process will wait until @@ -192,7 +192,7 @@ def GetFadeCommand(in_file, target, fade_type): a_st = (int)(clip_a_duration - duration) cmd = [] - cmd_base = ["/usr/bin/ffmpeg", "-i", in_file] + cmd_base = ["ffmpeg", "-i", in_file] cmd_fade_in_out = ["-af", "afade="+fade_type+":"+"st="+str(a_st)+":"+"d="+str(duration), "-c:v", "copy"] cmd = cmd_base + cmd_fade_in_out diff --git a/content-provider/archive/Dockerfile b/content-provider/archive/Dockerfile index ddcfe803..0dc70234 100644 --- a/content-provider/archive/Dockerfile +++ b/content-provider/archive/Dockerfile @@ -1,5 +1,5 @@ -FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:19.10.1 +FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:20.1 RUN apt-get update && apt-get install -y -q youtube-dl bc wget && rm -rf /var/lib/apt/lists/*; #### diff --git a/content-provider/transcode/Dockerfile b/content-provider/transcode/Dockerfile index 089d8470..58c8877f 100644 --- a/content-provider/transcode/Dockerfile +++ b/content-provider/transcode/Dockerfile @@ -1,5 +1,5 @@ -FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:19.10.1 +FROM openvisualcloud/xeon-ubuntu1804-media-ffmpeg:20.1 RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y -q --no-install-recommends nginx python3-tornado python3-urllib3 python3-requests python3-psutil python3-pip && rm -rf /var/lib/apt/lists/* && \ pip3 install 'kafka-python>=1.4.7' 'kazoo>=2.6.1'