mirror of
https://github.com/cosyneco/MediaPipe.NET.git
synced 2025-05-21 17:37:15 +08:00
Add Macro benchmark
Signed-off-by: Ayase Minori <ayane@vignetteapp.org>
This commit is contained in:
79
Mediapipe.Net.Benchmarks/BlazeNetBenchmark.cs
Normal file
79
Mediapipe.Net.Benchmarks/BlazeNetBenchmark.cs
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
// Copyright (c) homuler and The Vignette Authors
|
||||||
|
// This file is part of MediaPipe.NET.
|
||||||
|
// MediaPipe.NET is licensed under the MIT License. See LICENSE for details.
|
||||||
|
|
||||||
|
using BenchmarkDotNet.Attributes;
|
||||||
|
using BenchmarkDotNet.Engines;
|
||||||
|
using Mediapipe.Net.Framework;
|
||||||
|
using Mediapipe.Net.Framework.Packets;
|
||||||
|
using Mediapipe.Net.Framework.Protobuf;
|
||||||
|
using SixLabors.ImageSharp;
|
||||||
|
using SixLabors.ImageSharp.PixelFormats;
|
||||||
|
using ImageFrame = Mediapipe.Net.Framework.Format.ImageFrame;
|
||||||
|
|
||||||
|
namespace Mediapipe.Net.Benchmarks
|
||||||
|
{
|
||||||
|
[SimpleJob(RunStrategy.Throughput, launchCount: 20, warmupCount: 10)]
|
||||||
|
[MinColumn, MaxColumn, MeanColumn,MedianColumn]
|
||||||
|
public class BlazeNetBenchmark
|
||||||
|
{
|
||||||
|
private readonly ImageFrame referenceFrame;
|
||||||
|
|
||||||
|
|
||||||
|
public BlazeNetBenchmark()
|
||||||
|
{
|
||||||
|
var rawImage = Image.Load<Rgba32>("TestData/reference.png");
|
||||||
|
|
||||||
|
// convert to span first before converting to ImageFrame
|
||||||
|
var rawImageSpan = new Span<byte>();
|
||||||
|
rawImage.CopyPixelDataTo(rawImageSpan);
|
||||||
|
|
||||||
|
// widthStep is a thing from opencv, so we'll need to calculate it ourselves
|
||||||
|
// this is calculated as width * bits per pixel, so rgba32 would be 4 bytes per pixel
|
||||||
|
var widthStep = rawImage.Width * rawImage.PixelType.BitsPerPixel;
|
||||||
|
|
||||||
|
referenceFrame = new ImageFrame(ImageFormat.Types.Format.Sbgra, rawImage.Width, rawImage.Height, widthStep, rawImageSpan);
|
||||||
|
}
|
||||||
|
|
||||||
|
[Benchmark]
|
||||||
|
public void BlazeFaceBenchmark()
|
||||||
|
{
|
||||||
|
// read pbtxt file to a string
|
||||||
|
var graphCfg = File.ReadAllText("TestData/face_detection_front_cpu.pbtxt");
|
||||||
|
var graph = new CalculatorGraph(graphCfg);
|
||||||
|
var poller = graph.AddOutputStreamPoller<ImageFrame>("output_video").Value();
|
||||||
|
|
||||||
|
graph.ObserveOutputStream<NormalizedLandmarkListPacket, NormalizedLandmarkList>("multi_face_landmarks",
|
||||||
|
(packet) =>
|
||||||
|
{
|
||||||
|
var landmarks = packet.Get();
|
||||||
|
|
||||||
|
// report the landmarks
|
||||||
|
foreach (var landmark in landmarks.Landmark)
|
||||||
|
{
|
||||||
|
Console.WriteLine($"Landmark: {landmark.X}, {landmark.Y}, {landmark.Z}");
|
||||||
|
// do we have accuracy data too?
|
||||||
|
if (landmark.HasVisibility)
|
||||||
|
{
|
||||||
|
Console.WriteLine($"Visibility: {landmark.Visibility}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, out var cbHandle).AssertOk();
|
||||||
|
|
||||||
|
// start the graph
|
||||||
|
graph.StartRun().AssertOk();
|
||||||
|
|
||||||
|
// send the image to the graph
|
||||||
|
var inputFrame = new ImageFramePacket(referenceFrame);
|
||||||
|
graph.AddPacketToInputStream("input_video", inputFrame).AssertOk();
|
||||||
|
|
||||||
|
// wait for the graph to finish
|
||||||
|
graph.WaitUntilIdle().AssertOk();
|
||||||
|
|
||||||
|
// close the graph and clean up
|
||||||
|
graph.CloseInputStream("input_video").AssertOk();
|
||||||
|
graph.WaitUntilDone().AssertOk();
|
||||||
|
cbHandle.Free();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -10,6 +10,7 @@
|
|||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<PackageReference Include="BenchmarkDotNet" Version="0.13.12" />
|
<PackageReference Include="BenchmarkDotNet" Version="0.13.12" />
|
||||||
<PackageReference Include="Mediapipe.Net.Runtime.CPU" Version="0.9.1" />
|
<PackageReference Include="Mediapipe.Net.Runtime.CPU" Version="0.9.1" />
|
||||||
|
<PackageReference Include="SixLabors.ImageSharp" Version="3.1.3" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
|
@ -0,0 +1,63 @@
|
|||||||
|
# MediaPipe graph that performs face mesh with TensorFlow Lite on CPU.
|
||||||
|
|
||||||
|
# Input image. (ImageFrame)
|
||||||
|
input_stream: "input_video"
|
||||||
|
|
||||||
|
# Output image with rendered results. (ImageFrame)
|
||||||
|
output_stream: "output_video"
|
||||||
|
# Collection of detected/processed faces, each represented as a list of
|
||||||
|
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||||
|
output_stream: "multi_face_landmarks"
|
||||||
|
|
||||||
|
# Throttles the images flowing downstream for flow control. It passes through
|
||||||
|
# the very first incoming image unaltered, and waits for downstream nodes
|
||||||
|
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||||
|
# passes through another image. All images that come in while waiting are
|
||||||
|
# dropped, limiting the number of in-flight images in most part of the graph to
|
||||||
|
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
||||||
|
# excessively, which leads to increased latency and memory usage, unwanted in
|
||||||
|
# real-time mobile applications. It also eliminates unnecessarily computation,
|
||||||
|
# e.g., the output produced by a node may get dropped downstream if the
|
||||||
|
# subsequent nodes are still busy processing previous inputs.
|
||||||
|
node {
|
||||||
|
calculator: "FlowLimiterCalculator"
|
||||||
|
input_stream: "input_video"
|
||||||
|
input_stream: "FINISHED:output_video"
|
||||||
|
input_stream_info: {
|
||||||
|
tag_index: "FINISHED"
|
||||||
|
back_edge: true
|
||||||
|
}
|
||||||
|
output_stream: "throttled_input_video"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Defines side packets for further use in the graph.
|
||||||
|
node {
|
||||||
|
calculator: "ConstantSidePacketCalculator"
|
||||||
|
output_side_packet: "PACKET:num_faces"
|
||||||
|
node_options: {
|
||||||
|
[type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: {
|
||||||
|
packet { int_value: 1 }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Subgraph that detects faces and corresponding landmarks.
|
||||||
|
node {
|
||||||
|
calculator: "FaceLandmarkFrontCpu"
|
||||||
|
input_stream: "IMAGE:throttled_input_video"
|
||||||
|
input_side_packet: "NUM_FACES:num_faces"
|
||||||
|
output_stream: "LANDMARKS:multi_face_landmarks"
|
||||||
|
output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks"
|
||||||
|
output_stream: "DETECTIONS:face_detections"
|
||||||
|
output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Subgraph that renders face-landmark annotation onto the input image.
|
||||||
|
node {
|
||||||
|
calculator: "FaceRendererCpu"
|
||||||
|
input_stream: "IMAGE:throttled_input_video"
|
||||||
|
input_stream: "LANDMARKS:multi_face_landmarks"
|
||||||
|
input_stream: "NORM_RECTS:face_rects_from_landmarks"
|
||||||
|
input_stream: "DETECTIONS:face_detections"
|
||||||
|
output_stream: "IMAGE:output_video"
|
||||||
|
}
|
Reference in New Issue
Block a user