gstAutotest/gstreamerAutotest.py
Artur Mukhamadiev 8b9190bb86 [init] just move from pipelines repo
:Release Notes:
-

:Detailed Notes:
-

:Testing Performed:
-

:QA Notes:
-

:Issues Addressed:
-
2025-10-11 18:41:28 +03:00

285 lines
9.5 KiB
Python

#!/usr/bin/python
from itertools import product
import qa
from latencyParse import getLatencyTable
import os, stat, subprocess
import pandas as pd
from extra import log_args_decorator
options = {
"x264enc": {
"bitrate": ["10000", "20000", "5000"],
"speed-preset": ["ultrafast", "fast", "medium"],
"tune": ["zerolatency"],
"sliced-threads": ["true", "false"],
"b-adapt": ["true", "false"],
"rc-lookahead": ["40", "0"],
"ref": ["3", "0"]
},
"nvh264enc": {
"bitrate": ["10000", "20000", "5000"],
"preset": ["4", "5", "1"],
"rc-lookahead": ["0"],
"rc-mode": ["2", "0", "5"],
"zerolatency": ["true", "false"],
},
"nvv4l2h264enc": {
"bitrate": ["10000000", "20000000", "5000000"],
"profile": ["0", "1", "2"],
"preset-id": ["1", "2", "3"],
"control-rate": ["1", "2"],
"idrinterval": ["1", "256"],
"tuning-info-id": ["4", "2", "3"]
}
}
videos = {
"base-daVinci": "./test.yuv"
}
testsource = "videotestsrc pattern=smpte"
videosrc = {
"raw":["filesrc location=", " ! rawvideoparse "],
"h264": ["filesrc location=", " ! decodebin"]
}
psnr_check = {
"x264enc": "-pixel_format yuv420p -color_range pc",
"nvh264enc": "-pixel_format nv12 -color_range tv",
"nvv4l2h264enc": "-pixel_format nv12 -color_range tv"
}
with_docker = [ "nvv4l2h264enc" ]
repeats = 3
formats = {
"x264enc": "I420",
"nvh264enc": "NV12",
"nvv4l2h264enc": "NV12"
}
profiles = ["baseline", "main"]
videoconvert = {
"nvv4l2h264enc": "nvvideoconvert",
"nvh264enc": "videoconvert",
"x264enc": "videoconvert"
}
video_info = {
"video1":"-video_size 1920x1080 -framerate 23.98",
"sample-surgery":"-video_size 1280x720 -framerate 29.97",
"base-daVinci": "-video_size 1280x720 -framerate 59.94"
}
gst_video_info = {
"video1":"format=I420,height=1080,width=1920,framerate=24000/1001",
"base-daVinci": "format=2 height=720 width=1280 colorimetry=bt601 framerate=60000/1001"
}
latency_filename = "latency-traces-autotest.log"
# Step-by-step:
# 1. Generate all combinations for each encoder
# 2. For each combination, create a GStreamer pipeline string
# 3. Start each pipeline with latency tracing enabled
# 3.1 Monitor CPU, GPU and memory usage during each pipeline run (nah, later, maybe)
# 4. Start latency parsing script after each pipeline and store results in a pandas dataframe:
# - two key columns: encoder name, parameters string
# 5. Run PSNR check after each pipeline and add results in the dataframe
# 6. Save dataframe to CSV file
class Pipeline:
def __init__(self):
self.pipeline = "gst-launch-1.0 -e "
self.options = ""
def add_tracing(self):
self.pipeline = (
"GST_DEBUG_COLOR_MODE=off " +
"GST_TRACERS=\"latency(flags=pipeline+element)\" " +
"GST_DEBUG=GST_TRACER:7 GST_DEBUG_FILE=" + latency_filename + " " +
self.pipeline
)
return self
def add_source(self, source):
self.pipeline += source + " ! clocksync sync-to-first=true ! "
return self
def __add_tee(self, encoder):
pass
#self.pipeline += "tee name=t t. ! queue max-size-time=5000000000 max-size-bytes=100485760 max-size-buffers=1000 ! filesink location=\"base-autotest.yuv\" "
def add_encoder(self, encoder, params):
self.pipeline += videoconvert[encoder] + " ! "
self.pipeline += "capsfilter caps=video/x-raw,format=" + formats[encoder] + " ! "
#self.__add_tee(encoder)
self.options += " ".join(params) + " "
#self.pipeline += "t. ! queue max-size-time=5000000000 max-size-bytes=100485760 max-size-buffers=1000 ! "
self.pipeline += encoder + " "
self.pipeline += " ".join(params) + " "
return self
def add_profile(self, profile):
self.pipeline += "! capsfilter caps=\"video/x-h264,profile=" + profile + "\" ! "
self.options += "profile=" + profile + " "
return self
def to_file(self, filename):
self.pipeline += "h264parse ! mpegtsmux ! filesink location=\"" + filename + "\""
return self
def makeVideoSrc(videoName):
return videosrc["raw"][0] + videos[videoName] + videosrc["raw"][1] + gst_video_info[videoName]
def generateEncoderStrings():
global options
result = dict()
for encoder, value in options.items():
result[encoder] = generate_combinations(value)
return result
def generate_combinations(config_dict):
"""
Generate all combinations of values from a configuration dictionary.
Args:
config_dict (dict): Dictionary with parameter names as keys and lists of values as values
Returns:
list: List of strings containing all parameter combinations
"""
combinations = []
keys = list(config_dict.keys())
value_lists = [config_dict[key] for key in keys]
for combo in product(*value_lists):
param_strings = []
for key, value in zip(keys, combo):
param_strings.append(f"{key}={value}")
combinations.append(" ".join(param_strings))
return combinations
qualityDataframe = pd.DataFrame()
latencyDataframe = pd.DataFrame()
dockerRunString = "sudo -S docker container exec deepstream-gst bash"
def execPermissions(scriptFile = "to_exec.sh"):
current_permissions = os.stat(scriptFile).st_mode
new_permissions = current_permissions | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(scriptFile, new_permissions)
def writeToExecFile(contents, file):
with open(file, "w") as f:
f.write(str(contents))
execPermissions(file)
def is_docker(func):
def wrapper(pipeline):
script_name = "to_exec.sh"
for encoder in with_docker:
if encoder in pipeline:
writeToExecFile(pipeline, script_name)
pipeline = dockerRunString + f" {script_name}"
func(pipeline)
return wrapper
def is_sudo(pipeline):
if pipeline.startswith("sudo"):
return True
return False
def passwordAuth(proc):
password = os.getenv("UAUTH")
if password is not None:
proc.communicate(password)
def printLog(file):
with open(file, "r") as f:
out = f.read()
print(out)
@is_docker
@log_args_decorator
def run_pipeline(pipeline):
logfile = "pipeline-log.txt"
with open(logfile, "w") as f:
proc = subprocess.Popen(pipeline, shell=True,
stdin=subprocess.PIPE, stdout=f,
stderr=subprocess.STDOUT, text=True)
if is_sudo(pipeline):
passwordAuth(proc)
code = proc.wait()
printLog(logfile)
if proc.returncode != 0:
raise Exception("Pipeline failed, see log for details")
def time_trace(func):
def wrapper():
import time
start_time = time.time()
func()
end_time = time.time()
elapsed_time = end_time - start_time
print(f"Total execution time: {elapsed_time} seconds")
return wrapper
@time_trace
def run_autotest():
encoders = generateEncoderStrings()
for encoder, combinations in encoders.items():
qualityDataframe = pd.DataFrame()
latencyDataframe = pd.DataFrame()
for params in combinations:
for profile in profiles:
for videoName, videoPath in videos.items():
for _ in range(repeats):
filename = "autotest-" + encoder + "-" + profile + "-test-" + videoName + ".mp4"
pipeline = Pipeline()
pipeline = (
pipeline.add_tracing()
.add_source(makeVideoSrc(videoName))
.add_encoder(encoder, params.split(" "))
.add_profile(profile)
.to_file(filename)
)
print(pipeline.pipeline)
try:
run_pipeline(pipeline.pipeline)
except Exception as e:
print(f"Error occurred: {e}")
continue
psnr_metrics, ssim_metrics = qa.run_quality_check(
videoPath,
filename,
video_info[videoName] + " " + psnr_check[encoder]
)
dfPsnr = qa.parse_quality_report(psnr_metrics, ssim_metrics)
print("-----")
dfLatency = getLatencyTable(latency_filename)
columnsQ = pd.MultiIndex.from_tuples(
[(encoder, profile, videoName, params, col) for col in dfPsnr.columns]
)
columnsLatency = pd.MultiIndex.from_tuples(
[(encoder, profile, videoName, params, col) for col in dfLatency.columns]
)
dfPsnr.columns = columnsQ
dfLatency.columns = columnsLatency
qualityDataframe = pd.concat([qualityDataframe, dfPsnr], axis=1)
latencyDataframe = pd.concat([latencyDataframe, dfLatency], axis=1)
print("=====")
print("Current results:")
print(dfPsnr)
print(dfLatency)
qualityDataframe.to_csv(f"qualityResults{encoder}.csv")
latencyDataframe.to_csv(f"latencyDataframe{encoder}.csv")
run_autotest()