Tuesday, February 11, 2025

Azure AI services - Detect and Analyze Faces

Azure AI services - Detect and Analyze Faces:

Source: https://github.com/MicrosoftLearning/mslearn-ai-vision

1. Azure AI services multi-service account - Create Azure resource.

Detect Faces:
C# Code:
using System;
using System.Drawing;
using Microsoft.Extensions.Configuration;
using Azure;
using System.IO;

// dotnet add package Azure.AI.Vision.ImageAnalysis -v 1.0.0-beta.3

// Import namespaces
using Azure.AI.Vision.ImageAnalysis;

namespace detect_people
{
    class Program
    {
        static void Main(string[] args)
        {
            try
            {
                // Get config settings from AppSettings
                IConfigurationBuilder builder =
                new ConfigurationBuilder().AddJsonFile("appsettings.json");
                IConfigurationRoot configuration = builder.Build();
                string aiSvcEndpoint =
                "https://sreemultiserviceaccount1.cognitiveservices.azure.com/";
                //configuration["AIServicesEndpoint"];
                string aiSvcKey = "2D9XtWQ0Yfuw3AAAEACOGFMV1"; //configuration["AIServiceKey"];

                // Get image
                string imageFile = "images/people.jpg";
                if (args.Length > 0)
                {
                    imageFile = args[0];
                }

                // Authenticate Azure AI Vision client
                ImageAnalysisClient cvClient = new ImageAnalysisClient(
                    new Uri(aiSvcEndpoint),
                    new AzureKeyCredential(aiSvcKey));

                // Analyze image
                AnalyzeImage(imageFile, cvClient);

            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }

        static void AnalyzeImage(string imageFile, ImageAnalysisClient client)
        {
            Console.WriteLine($"\nAnalyzing {imageFile} \n");

            // Use a file stream to pass the image data to the analyze call
            using FileStream stream = new FileStream(imageFile, FileMode.Open);

            // Get result with specified features to be retrieved (PEOPLE)
            ImageAnalysisResult result = client.Analyze(
                BinaryData.FromStream(stream),
                VisualFeatures.People);

            // Close the stream
            stream.Close();

            // Get people in the image
            if (result.People.Values.Count > 0)
            {
                Console.WriteLine($" People:");

                // Prepare image for drawing
                System.Drawing.Image image = System.Drawing.Image.FromFile(imageFile);
                Graphics graphics = Graphics.FromImage(image);
                Pen pen = new Pen(Color.Cyan, 3);

                // Draw bounding box around detected people
                foreach (DetectedPerson person in result.People.Values)
                {
                    if (person.Confidence > 0.5)
                    {
                        // Draw object bounding box
                        var r = person.BoundingBox;
                        Rectangle rect = new Rectangle(r.X, r.Y, r.Width, r.Height);
                        graphics.DrawRectangle(pen, rect);
                    }

                    // Return the confidence of the person detected
                    Console.WriteLine($"   Bounding box {person.BoundingBox.ToString()},
                    Confidence: {person.Confidence:F2}");
                }

                // Save annotated image
                String output_file = "people.jpg";
                image.Save(output_file);
                Console.WriteLine("  Results saved in " + output_file + "\n");
            }
        }
    }
}



OutPut:



Analyze Faces:
C# Code:
using System;
using System.IO;
using System.Linq;
using System.Drawing;
using System.Collections.Generic;
using System.Threading.Tasks;
using Microsoft.Extensions.Configuration;

// dotnet add package Azure.AI.Vision.Face -v 1.0.0-beta.2

// Import namespaces
using Azure;
using Azure.AI.Vision.Face;

namespace analyze_faces
{
    class Program
    {
        private static FaceClient faceClient;
        static async Task Main(string[] args)
        {
            try
            {
                // Get config settings from AppSettings
                IConfigurationBuilder builder =
                new ConfigurationBuilder().AddJsonFile("appsettings.json");
                IConfigurationRoot configuration = builder.Build();
                string cogSvcEndpoint =
                "https://sreemultiserviceaccount1.cognitiveservices.azure.com/";
                //configuration["AIServicesEndpoint"];
                string cogSvcKey = "2D9XtWQ0Yfuw3AAAEACOGFMV1"; //configuration["AIServiceKey"];

                // Authenticate Face client
                faceClient = new FaceClient(
                    new Uri(cogSvcEndpoint),
                    new AzureKeyCredential(cogSvcKey));

                // Menu for face functions
                Console.WriteLine("1: Detect faces\nAny other key to quit");
                Console.WriteLine("Enter a number:");
                string command = Console.ReadLine();
                switch (command)
                {
                    case "1":
                        await DetectFaces("images/people.jpg");
                        break;
                    default:
                        break;
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }

        static async Task DetectFaces(string imageFile)
        {
            Console.WriteLine($"Detecting faces in {imageFile}");

            // Specify facial features to be retrieved
            FaceAttributeType[] features = new FaceAttributeType[]
            {
     FaceAttributeType.Detection03.HeadPose,
     FaceAttributeType.Detection03.Blur,
     FaceAttributeType.Detection03.Mask
            };

            // Get faces
            using (var imageData = File.OpenRead(imageFile))
            {
                var response = await faceClient.DetectAsync(
                    BinaryData.FromStream(imageData),
                    FaceDetectionModel.Detection03,
                    FaceRecognitionModel.Recognition04,
                    returnFaceId: false,
                    returnFaceAttributes: features);
                IReadOnlyList<FaceDetectionResult> detected_faces = response.Value;

                if (detected_faces.Count() > 0)
                {
                    Console.WriteLine($"{detected_faces.Count()} faces detected.");

                    // Prepare image for drawing
                    Image image = Image.FromFile(imageFile);
                    Graphics graphics = Graphics.FromImage(image);
                    Pen pen = new Pen(Color.LightGreen, 3);
                    Font font = new Font("Arial", 4);
                    SolidBrush brush = new SolidBrush(Color.White);
                    int faceCount = 0;

                    // Draw and annotate each face
                    foreach (var face in detected_faces)
                    {
                        faceCount++;
                        Console.WriteLine($"\nFace number {faceCount}");

                        // Get face properties
                        Console.WriteLine($" - Head Pose (Yaw): {face.FaceAttributes.HeadPose.Yaw}");
                        Console.WriteLine($" - Head Pose (Pitch):
                        {face.FaceAttributes.HeadPose.Pitch}");
                        Console.WriteLine($" - Head Pose (Roll):
                        {face.FaceAttributes.HeadPose.Roll}");
                        Console.WriteLine($" - Blur: {face.FaceAttributes.Blur.BlurLevel}");
                        Console.WriteLine($" - Mask: {face.FaceAttributes.Mask.Type}");

                        // Draw and annotate face
                        var r = face.FaceRectangle;
                        Rectangle rect = new Rectangle(r.Left, r.Top, r.Width, r.Height);
                        graphics.DrawRectangle(pen, rect);
                        string annotation = $"Face number {faceCount}";
                        graphics.DrawString(annotation, font, brush, r.Left, r.Top);
                    }

                    // Save annotated image
                    String output_file = "detected_faces.jpg";
                    image.Save(output_file);
                    Console.WriteLine(" Results saved in " + output_file);
                }
            }
        }
    }
}


OutPut:



Detect Faces:
Python Code:
from dotenv import load_dotenv
import os
from PIL import Image, ImageDraw
import sys
from matplotlib import pyplot as plt
import numpy as np
#pip install azure-ai-vision-imageanalysis==1.0.0b3
# import namespaces
from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential

def main():
    global cv_client

    try:
        # Get Configuration Settings
        load_dotenv()
        ai_endpoint = 'https://sreemultiserviceaccount1.cognitiveservices.azure.com/'           #os.getenv('AI_SERVICE_ENDPOINT')
        ai_key = '2D9XtWQ0Yfuw3AAAEACOGFMV1' #os.getenv('AI_SERVICE_KEY')

        # Get image
        image_file = 'images/people.jpg'
        if len(sys.argv) > 1:
            image_file = sys.argv[1]

        with open(image_file, "rb") as f:
            image_data = f.read()

         # Authenticate Azure AI Vision client
        cv_client = ImageAnalysisClient(
            endpoint=ai_endpoint,
            credential=AzureKeyCredential(ai_key)
        )
       
        # Analyze image
        AnalyzeImage(image_file, image_data, cv_client)

    except Exception as ex:
        print(ex)

def AnalyzeImage(filename, image_data, cv_client):
    print('\nAnalyzing ', filename)

     # Get result with specified features to be retrieved (PEOPLE)
    result = cv_client.analyze(
         image_data=image_data,
        visual_features=[
             VisualFeatures.PEOPLE],
    )
   
    # Identify people in the image
    if result.people is not None:
        print("\nPeople in image:")

        # Prepare image for drawing
        image = Image.open(filename)
        fig = plt.figure(figsize=(image.width/100, image.height/100))
        plt.axis('off')
        draw = ImageDraw.Draw(image)
        color = 'cyan'

         # Draw bounding box around detected people
        for detected_people in result.people.list:
            if(detected_people.confidence > 0.5):
                 # Draw object bounding box
                r = detected_people.bounding_box
                bounding_box = ((r.x, r.y), (r.x + r.width, r.y + r.height))
                draw.rectangle(bounding_box, outline=color, width=3)

                # Return the confidence of the person detected
                print(" {} (confidence: {:.2f}%)".format(detected_people.bounding_box,
                detected_people.confidence * 100))

        # Save annotated image
        plt.imshow(image)
        plt.tight_layout(pad=0)
        outputfile = 'people.jpg'
        fig.savefig(outputfile)
        print('  Results saved in', outputfile)

if __name__ == "__main__":
    main()

OutPut:




Analyze Faces:
Python Code:
from dotenv import load_dotenv
import os
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt

#pip install azure-cognitiveservices-vision-face==0.6.0

# Import namespaces
from azure.cognitiveservices.vision.face import FaceClient
from azure.cognitiveservices.vision.face.models import FaceAttributeType
from msrest.authentication import CognitiveServicesCredentials

def main():
    global face_client

    try:
        # Get Configuration Settings
        load_dotenv()
        # cog_endpoint = os.getenv('AI_SERVICE_ENDPOINT')
        # cog_key = os.getenv('AI_SERVICE_KEY')
       
        cog_endpoint = 'https://sreemultiserviceaccount1.cognitiveservices.azure.com/'
        cog_key ='2D9XtWQ0Yfuw3AAAEACOGFMV1'

        # Authenticate Face client
        credentials = CognitiveServicesCredentials(cog_key)
        face_client = FaceClient(cog_endpoint, credentials)

        # Menu for face functions
        print('1: Detect faces\nAny other key to quit')
        command = input('Enter a number:')
        if command == '1':
            DetectFaces(os.path.join('images','people.jpg'))

    except Exception as ex:
        print(ex)

def DetectFaces(image_file):
    print('Detecting faces in', image_file)

    # Specify facial features to be retrieved
    features = [
        FaceAttributeType.occlusion,
        FaceAttributeType.blur,
        FaceAttributeType.glasses
    ]

    # Get faces
    with open(image_file, mode="rb") as image_data:
        detected_faces = face_client.face.detect_with_stream(
        image=image_data,
        return_face_attributes=features,
        return_face_id=False
    )

    if len(detected_faces) > 0:
        print(len(detected_faces), 'faces detected.')

        # Prepare image for drawing
        fig = plt.figure(figsize=(8, 6))
        plt.axis('off')
        image = Image.open(image_file)
        draw = ImageDraw.Draw(image)
        color = 'lightgreen'
        face_count = 0

        # Draw and annotate each face
        for face in detected_faces:
            # Get face properties
            face_count += 1
            print('\nFace number {}'.format(face_count))
            detected_attributes = face.face_attributes.as_dict()

            if 'blur' in detected_attributes:
                print(' - Blur:')
                for blur_name in detected_attributes['blur']:
                    print('   - {}: {}'.format(blur_name, detected_attributes['blur'][blur_name]))

            if 'occlusion' in detected_attributes:
                print(' - Occlusion:')
                for occlusion_name in detected_attributes['occlusion']:
                    print('   - {}: {}'.format(occlusion_name,                         detected_attributes['occlusion'][occlusion_name]))

            if 'glasses' in detected_attributes:
                print(' - Glasses: {}'.format(detected_attributes['glasses']))

            # Draw and annotate face
            r = face.face_rectangle
            bounding_box = ((r.left, r.top), (r.left + r.width, r.top + r.height))
            draw.rectangle(bounding_box, outline=color, width=5)
            annotation = 'Face number {}'.format(face_count)
            plt.annotate(annotation, (r.left, r.top), backgroundcolor=color)

        # Save annotated image
        plt.imshow(image)
        outputfile = 'detected_faces.jpg'
        fig.savefig(outputfile)
        print('\nResults saved in', outputfile)

if __name__ == "__main__":
    main()

OutPut:








No comments:

Post a Comment

Featured Post

Azure AI services - Detect and Analyze Faces

Azure AI services - Detect and Analyze Faces: Source: https://github.com/MicrosoftLearning/mslearn-ai-vision 1. Azure AI services multi-serv...

Popular posts