Wednesday, January 29, 2025

Azure AI services - Recognize and synthesize speech

Azure AI services - Recognize and synthesize speech:

1. Create 'Speech service' in Azure

C# Code:

using System;
using System.Threading.Tasks;
using Microsoft.Extensions.Configuration;
using System.Media;

// Import namespaces
using Microsoft.CognitiveServices.Speech;
using Microsoft.CognitiveServices.Speech.Audio;

// dotnet add package Microsoft.CognitiveServices.Speech --version 1.30.0
// dotnet add package System.Windows.Extensions --version 4.6.0

namespace speaking_clock
{
    class Program
    {
        private static SpeechConfig speechConfig;
        static async Task Main(string[] args)
        {
            try
            {
                // Get config settings from AppSettings
                // IConfigurationBuilder builder =                 new ConfigurationBuilder().AddJsonFile("appsettings.json");
                // IConfigurationRoot configuration = builder.Build();
                string aiSvcKey = "C7A2c2oHTErWxabILyi7SCucDFXJ3w3AAAYACOGSRaz";                 //configuration["SpeechKey"];
                string aiSvcRegion = "eastus"; // configuration["SpeechRegion"];

                // Configure speech service
                speechConfig = SpeechConfig.FromSubscription(aiSvcKey, aiSvcRegion);
                Console.WriteLine("Ready to use speech service in " + speechConfig.Region);

                // Configure voice
                speechConfig.SpeechSynthesisVoiceName = "en-US-AriaNeural";

                // Get spoken input
                string command = "";
                command = "what time is it?"; //await TranscribeCommand();
                if (command.ToLower() == "what time is it?")
                {
                    await TellTime();
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }

        static async Task<string> TranscribeCommand()
        {
            string command = "";

            // Configure speech recognition
            using AudioConfig audioConfig = AudioConfig.FromDefaultMicrophoneInput();
            using SpeechRecognizer speechRecognizer =             new SpeechRecognizer(speechConfig, audioConfig);
            Console.WriteLine("Speak now...");

            // Configure speech recognition
            // string audioFile = "time.wav";
            // SoundPlayer wavPlayer = new SoundPlayer(audioFile);
            // wavPlayer.Play();
            // using AudioConfig audioConfig = AudioConfig.FromWavFileInput(audioFile);
            // using SpeechRecognizer speechRecognizer =             new SpeechRecognizer(speechConfig, audioConfig);

            // Process speech input
            SpeechRecognitionResult speech = await speechRecognizer.RecognizeOnceAsync();
            if (speech.Reason == ResultReason.RecognizedSpeech)
            {
                command = speech.Text;
                Console.WriteLine(command);
            }
            else
            {
                Console.WriteLine(speech.Reason);
                if (speech.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(speech);
                    Console.WriteLine(cancellation.Reason);
                    Console.WriteLine(cancellation.ErrorDetails);
                }
            }

            // Return the command
            return command;
        }

        static async Task TellTime()
        {
            var now = DateTime.Now;
            string responseText = "The time is " + now.Hour.ToString() + ":" +             now.Minute.ToString("D2");

            // Configure speech synthesis
            speechConfig.SpeechSynthesisVoiceName = "en-GB-RyanNeural";
            using SpeechSynthesizer speechSynthesizer = new SpeechSynthesizer(speechConfig);

            // Synthesize spoken output
            string responseSsml = $@"
                <speak version='1.0' xmlns='http://www.w3.org/2001/10/synthesis' xml:lang='en-US'>
                    <voice name='en-GB-LibbyNeural'>
                        {responseText}
                        <break strength='weak'/>
                        Time to end this lab!
                    </voice>
                </speak>";
            SpeechSynthesisResult speak = await speechSynthesizer.SpeakSsmlAsync(responseSsml);
            if (speak.Reason != ResultReason.SynthesizingAudioCompleted)
            {
                Console.WriteLine(speak.Reason);
            }

            // Print the response
            Console.WriteLine(responseText);
        }
    }
}

OutPut:



Python Code:

from dotenv import load_dotenv  
from datetime import datetime  
from playsound import playsound
import os  

# Import namespaces  
import azure.cognitiveservices.speech as speech_sdk  

# pip install azure-cognitiveservices-speech==1.30.0
# pip install python-dotenv
# pip install playsound==1.2.2

def main():  
    try:  
        global speech_config  

        # Get Configuration Settings  
        load_dotenv()  
        ai_key = 'C7A2c2oHTErWxabILyi7SBAACYeBjFXJ3w3AAAYACOGSRaz'  # os.getenv('SPEECH_KEY')  
        ai_region = 'eastus'  # os.getenv('SPEECH_REGION')  

        # Configure speech service  
        speech_config = speech_sdk.SpeechConfig(subscription=ai_key, region=ai_region)  
        print('Ready to use speech service in:', speech_config.region)  

        # Get spoken input  
        command =  'what time is it?' #TranscribeCommand()  
        if command.lower() == 'what time is it?':  
            TellTime()  

    except Exception as ex:  
        print(ex)  

def TranscribeCommand():  
    command = ''  

    # Configure speech recognition  
    # audio_config = speech_sdk.AudioConfig(use_default_microphone=True)  
    # speech_recognizer =     speech_sdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)  
    # print('Speak now...')  
   
     # Configure speech recognition
    current_dir = os.getcwd()
    audioFile = current_dir + '\\time.wav'
    playsound(audioFile)
    audio_config = speech_sdk.AudioConfig(filename=audioFile)
    speech_recognizer = speech_sdk.SpeechRecognizer(speech_config, audio_config)

    # Process speech input  
    speech = speech_recognizer.recognize_once_async().get()  

    if speech.reason == speech_sdk.ResultReason.RecognizedSpeech:  
        command = speech.text  
        print(command)  
    else:  
        print(speech.reason)  
        if speech.reason == speech_sdk.ResultReason.Canceled:  
            cancellation = speech.cancellation_details  
            print(cancellation.reason)  
            print(cancellation.error_details)  

    # Return the command  
    return command  

def TellTime():  
    now = datetime.now()  
    response_text = 'The time is {}:{:02d}'.format(now.hour, now.minute)  

    # Configure speech synthesis  
    speech_config.speech_synthesis_voice_name = "en-GB-RyanNeural"  
    speech_synthesizer = speech_sdk.SpeechSynthesizer(speech_config=speech_config)  

    # Synthesize spoken output  
    # speak = speech_synthesizer.speak_text_async(response_text).get()  
    # if speak.reason != speech_sdk.ResultReason.SynthesizingAudioCompleted:  
    #     print(speak.reason)
   
     # Synthesize spoken output
    responseSsml = " \
        <speak version='1.0' xmlns='http://www.w3.org/2001/10/synthesis' xml:lang='en-US'> \
             <voice name='en-GB-LibbyNeural'> \
                 {} \
                <break strength='weak'/> \
                Time to end this lab! \
            </voice> \
        </speak>".format(response_text)
    speak = speech_synthesizer.speak_ssml_async(responseSsml).get()
   
    if speak.reason != speech_sdk.ResultReason.SynthesizingAudioCompleted:
        print(speak.reason)

    # Print the response  
    print(response_text)  

if __name__ == "__main__":  
    main()

OutPut:





Azure AI services - Use an Azure AI Services Container

 Azure AI services - Use an Azure AI Services Container:

Source : https://github.com/MicrosoftLearning/mslearn-ai-services

Create these two services in Azure.
1. Azure AI services multi-service account
2. Azure Container Instances

2. Azure Container Instances:
az container create --resource-group <your-resource-Group> --name whizcontaineryourname --image mcr.microsoft.com/azure-cognitive-services/textanalytics/sentiment:latest --os-type Linux --cpu 1 --memory 8 --dns-name-label whizdnnsyourname --ports 5000 --location eastus --restart-policy OnFailure --secure-environment-variables ApiKey=<your-Api-key> Billing=<endpoint> --environment-variables Eula=accept --ip-address Public

az container create --resource-group rg1 --name sreecontainer1 --image mcr.microsoft.com/azure-cognitive-services/textanalytics/sentiment:latest --os-type Linux --cpu 1 --memory 8 --dns-name-label sreednns1 --ports 5000 --location eastus --restart-policy OnFailure --secure-environment-variables ApiKey=AjShjnYv3s56NZNIAkHBni7RCPQCOGAIif Billing=https://sreemultiserviceaccount1.cognitiveservices.azure.com/ --environment-variables Eula=accept --ip-address Public

For Docker:
docker run --rm -it -p 5000:5000 --memory 8g --cpus 1 mcr.microsoft.com/azure-cognitive-services/textanalytics/sentiment:latest Eula=accept Billing=<yourEndpoint> ApiKey=<yourKey>
 
Testing:
curl -X POST "http://<your_ACI_IP_address_or_FQDN>:5000/text/analytics/v3.1/sentiment" -H "Content-Type: application/json" --data-ascii "{'documents':[{'id':1,'text':'The performance was amazing! The sound could have been clearer.'},{'id':2,'text':'The food and service were unacceptable. While the host was nice, the waiter was rude and food was cold.'}]}"

curl -X POST "http://sreednns1.eastus.azurecontainer.io:5000/text/analytics/v3.1/sentiment" -H "Content-Type: application/json" --data-ascii "{'documents':[{'id':1,'text':'The performance was amazing! The sound could have been clearer.'},{'id':2,'text':'The food and service were unacceptable. While the host was nice, the waiter was rude and food was cold.'}]}"

OutPut:

{  
  "documents": [  
    {  
      "id": "1",  
      "sentiment": "positive",  
      "confidenceScores": {  
        "positive": 0.99,  
        "neutral": 0.0,  
        "negative": 0.0  
      },  
      "sentences": [  
        {  
          "sentiment": "positive",  
          "confidenceScores": {  
            "positive": 0.99,  
            "neutral": 0.0,  
            "negative": 0.0  
          },  
          "offset": 0,  
          "length": 29,  
          "text": "The performance was amazing! "  
        },  
        {  
          "sentiment": "neutral",  
          "confidenceScores": {  
            "positive": 0.19,  
            "neutral": 0.47,  
            "negative": 0.34  
          },  
          "offset": 29,  
          "length": 34,  
          "text": "The sound could have been clearer."  
        }  
      ],  
      "warnings": []  
    },  
    {  
      "id": "2",  
      "sentiment": "negative",  
      "confidenceScores": {  
        "positive": 0.0,  
        "neutral": 0.01,  
        "negative": 0.98  
      },  
      "sentences": [  
        {  
          "sentiment": "negative",  
          "confidenceScores": {  
            "positive": 0.0,  
            "neutral": 0.01,  
            "negative": 0.99  
          },  
          "offset": 0,  
          "length": 40,  
          "text": "The food and service were unacceptable. "  
        },  
        {  
          "sentiment": "negative",  
          "confidenceScores": {  
            "positive": 0.0,  
            "neutral": 0.02,  
            "negative": 0.98  
          },  
          "offset": 40,  
          "length": 63,  
          "text": "While the host was nice, the waiter was rude and food was cold."  
        }  
      ],  
      "warnings": []  
    }  
  ],  
  "errors": []  
}


Monday, January 27, 2025

Azure AI services - Read Text in Images

 Azure AI services - Read Text in Images: 

Source :
https://github.com/MicrosoftLearning/AI-102-AIEngineer
1. Azure AI services | Azure AI services multi-service account

C# Code:
using Microsoft.Azure.CognitiveServices.Vision.ComputerVision;
using Microsoft.Azure.CognitiveServices.Vision.ComputerVision.Models;
using System;
using System.IO;
using System.Threading;
using System.Threading.Tasks;

// dotnet add package Microsoft.Azure.CognitiveServices.Vision.ComputerVision --version 6.0.0

namespace read_text
{
    class Program
    {
        private static ComputerVisionClient cvClient;
        static async Task Main(string[] args)
        {
            try
            {
                // Get config settings from AppSettings
                // IConfigurationBuilder builder =                 new ConfigurationBuilder().AddJsonFile("appsettings.json");
                // IConfigurationRoot configuration = builder.Build();
                string cogSvcEndpoint = "https://multiserviceaccount1.cognitiveservices.azure.com/";
               // configuration["CognitiveServicesEndpoint"];
                string cogSvcKey = "AjShjnYv3s56Ne4keUlZIqXJ799BAACYeBjFXJ3w3AAAEACOGAIif";                 // configuration["CognitiveServiceKey"];

                ApiKeyServiceClientCredentials credentials =                 new ApiKeyServiceClientCredentials(cogSvcKey);
                cvClient = new ComputerVisionClient(credentials)
                {
                    Endpoint = cogSvcEndpoint
                };

                // Menu for text reading functions
                Console.WriteLine("1: Use Read API for image\n2: Use Read API for document\n3:                 Read handwriting\nAny other key to quit");
                Console.WriteLine("Enter a number:");
                string command = Console.ReadLine();
                string imageFile;
                switch (command)
                {
                    case "1":
                        imageFile = "images/Lincoln.jpg";
                        await GetTextRead(imageFile);
                        break;
                    case "2":
                        imageFile = "images/Rome.pdf";
                        await GetTextRead(imageFile);
                        break;
                    case "3":
                        imageFile = "images/Note.jpg";
                        await GetTextRead(imageFile);
                        break;
                    default:
                        break;
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }

        static async Task GetTextRead(string imageFile)
        {
            Console.WriteLine($"Reading text in {imageFile}\n");
            // Use Read API to read text in image
            using (var imageData = File.OpenRead(imageFile))
            {
                var readOp = await cvClient.ReadInStreamAsync(imageData);

                // Get the async operation ID so we can check for the results
                string operationLocation = readOp.OperationLocation;
                string operationId = operationLocation.Substring(operationLocation.Length - 36);

                // Wait for the asynchronous operation to complete
                ReadOperationResult results;
                do
                {
                    Thread.Sleep(1000);
                    results = await cvClient.GetReadResultAsync(Guid.Parse(operationId));
                }
                while ((results.Status == OperationStatusCodes.Running ||
                        results.Status == OperationStatusCodes.NotStarted));

                // If the operation was successfully, process the text line by line
                if (results.Status == OperationStatusCodes.Succeeded)
                {
                    var textUrlFileResults = results.AnalyzeResult.ReadResults;
                    foreach (ReadResult page in textUrlFileResults)
                    {
                        foreach (Line line in page.Lines)
                        {
                            Console.WriteLine(line.Text);

                            // Uncomment the following line if you'd like to see the bounding box
                            //Console.WriteLine(line.BoundingBox);
                        }
                    }
                }
            }


        }
    }
}

Python Code:

# pip install python-dotenv
# pip install pillow
# pip install matplotlib
# pip install azure-cognitiveservices-vision-computervision==0.7.0

from dotenv import load_dotenv
import os
import time
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt

# Import namespaces
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes
from msrest.authentication import CognitiveServicesCredentials

def main():
    global cv_client
    try:
        # Get Configuration Settings
        load_dotenv()
        cog_endpoint = "https://multiserviceaccount1.cognitiveservices.azure.com/"         #os.getenv('COG_SERVICE_ENDPOINT')
        cog_key = "AjShjnYv3s56Ne4keUlZBAACYeBjFXJ3w3AAAEACOGAIif"         # os.getenv('COG_SERVICE_KEY')

        # Authenticate Azure AI Vision client
        credential = CognitiveServicesCredentials(cog_key)
        cv_client = ComputerVisionClient(cog_endpoint, credential)

        # Menu for text reading functions
        print('1: Use Read API for image\n2: Use Read API for document\n3: Read         handwriting\nAny other key to quit')
        command = input('Enter a number:')
        if command == '1':
            image_file = os.path.join('images','Lincoln.jpg')
            GetTextRead(image_file)
        elif command =='2':
            image_file = os.path.join('images','Rome.pdf')
            GetTextRead(image_file)
        elif command =='3':
            image_file = os.path.join('images','Note.jpg')
            GetTextRead(image_file)
    except Exception as ex:
        print(ex)

def GetTextRead(image_file):
    print('Reading text in {}\n'.format(image_file))
   
    # Use Read API to read text in image
    with open(image_file, mode="rb") as image_data:
        read_op = cv_client.read_in_stream(image_data, raw=True)

    # Get the async operation ID so we can check for the results
    operation_location = read_op.headers["Operation-Location"]
    operation_id = operation_location.split("/")[-1]

    # Wait for the asynchronous operation to complete
    while True:
        read_results = cv_client.get_read_result(operation_id)
        if read_results.status not in [OperationStatusCodes.running,         OperationStatusCodes.not_started]:
            break
        time.sleep(1)

    # If the operation was successfully, process the text line by line
    if read_results.status == OperationStatusCodes.succeeded:
        for page in read_results.analyze_result.read_results:
            for line in page.lines:
                print(line.text)
                # Uncomment the following line if you'd like to see the bounding box
                # print(line.bounding_box)

if __name__ == "__main__":
    main()


Input:


Output:


Input:
Save as PDF file.


Output:



Input:


Output:



Azure AI services - Image Analysis with Azure AI Content Safety

Azure AI services - Image Analysis with Azure AI Content Safety:

Source: 
https://learn.microsoft.com/en-us/python/api/overview/azure/ai-contentsafety-readme?view=azure-python 

import os
from azure.ai.contentsafety import ContentSafetyClient
from azure.ai.contentsafety.models import AnalyzeImageOptions, ImageData, ImageCategory
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import HttpResponseError

def analyze_image():
    endpoint = "https://contentsafety.cognitiveservices.azure.com/"
    key = "72OU9CFnMIOX5dtStPFqq3fTzjNkOYeBjFXJ3w3AAAHACOG8nbQ"
    image_path = "download_1.jpg"

    # Create an Azure AI Content Safety client
    client = ContentSafetyClient(endpoint, AzureKeyCredential(key))

    # Build request
    with open(image_path, "rb") as file:
        request = AnalyzeImageOptions(image=ImageData(content=file.read()))

    # Analyze image
    try:
        response = client.analyze_image(request)
    except HttpResponseError as e:
        print("Analyze image failed.")
        if e.error:
            print(f"Error code: {e.error.code}")
            print(f"Error message: {e.error.message}")
            raise
        print(e)
        raise

    hate_result = next(item for item in response.categories_analysis if item.category     == ImageCategory.HATE)
    self_harm_result = next(item for item in response.categories_analysis if item.category     == ImageCategory.SELF_HARM)
    sexual_result = next(item for item in response.categories_analysis if item.category     == ImageCategory.SEXUAL)
    violence_result = next(item for item in response.categories_analysis if item.category     == ImageCategory.VIOLENCE)

    if hate_result:
        print(f"Hate severity: {hate_result.severity}")
    if self_harm_result:
        print(f"SelfHarm severity: {self_harm_result.severity}")
    if sexual_result:
        print(f"Sexual severity: {sexual_result.severity}")
    if violence_result:
        print(f"Violence severity: {violence_result.severity}")

if __name__ == "__main__":
    analyze_image()


Input Image:


OutPut:



Saturday, January 25, 2025

Azure AI services - Develop Azure AI services applications with Azure Key Vault - Language service

Azure AI services - Develop Azure AI services applications with Azure Key Vault:

https://learn.microsoft.com/en-us/azure/ai-services/use-key-vault?tabs=azure-cli&pivots=programming-language-csharp

C# Code:

using Azure;
using Azure.AI.TextAnalytics;
using Azure.Identity;
using Azure.Security.KeyVault.Secrets;
using System;
using System.Threading.Tasks;

namespace ConsoleApp1
{
    internal class Program
    {
        static async Task Main(string[] args)
        {
            var keyVaultName = "sreekeyvault2";
            const string keySecretName = "CognitiveServicesKey";
            const string endpointSecretName = "CognitiveServicesEndpoint";
            var kvUri = $"https://{keyVaultName}.vault.azure.net";
            var keyVaultClient = new SecretClient(new Uri(kvUri),             new DefaultAzureCredential());
            Console.WriteLine($"Retrieving your secrets from {keyVaultName}.");
            var keySecret = await keyVaultClient.GetSecretAsync(keySecretName);
            var endpointSecret =
            await keyVaultClient.GetSecretAsync(endpointSecretName);
            Console.WriteLine($"Your key secret value is: {keySecret.Value.Value}");
            Console.WriteLine($"Your endpoint secret value is:
            {endpointSecret.Value.Value}");
            Console.WriteLine("Secrets retrieved successfully");
            EntityRecognitionExample(keySecret.Value.Value,
            endpointSecret.Value.Value);
            Console.ReadKey();
        }

        private static void EntityRecognitionExample(string keySecret,
        string endpointSecret)
        {
            var exampleString = "I had a wonderful trip to Seattle last week.";
            AzureKeyCredential azureKeyCredential = new AzureKeyCredential(keySecret);
            Uri endpoint = new Uri(endpointSecret);
            var languageServiceClient = new TextAnalyticsClient(endpoint,
            azureKeyCredential);
            Console.WriteLine($"Sending a Named Entity Recognition (NER) request");
            var response = languageServiceClient.RecognizeEntities(exampleString);
            Console.WriteLine("Named Entities:");
            foreach (var entity in response.Value)
            {
                Console.WriteLine($"\tText: {entity.Text},\tCategory:
                {entity.Category},\tSub-Category: {entity.SubCategory}");
                Console.WriteLine($"\t\tScore: {entity.ConfidenceScore:F2},\tLength:
                {entity.Length},\tOffset: {entity.Offset}\n");
            }
        }
    }
}

OutPut:

Friday, January 24, 2025

Azure AI services - Document intelligence - Extract Data from Forms

 Azure AI services - Document intelligence - Extract Data from Forms:

Source:
https://microsoftlearning.github.io/AI-102-AIEngineer/
https://github.com/MicrosoftLearning/AI-102-AIEngineer

1. Create Azure AI services - Document intelligence in Azure Portal
2. Run Powershell script to create Storage account
3. Train the Model
4. Test the Model 

Required Dlls;
dotnet add package Azure.Core --version 1.44.1
dotnet add package Azure.AI.FormRecognizer --version 4.1.0
dotnet add package Azure.AI.FormRecognizer --version 3.0.0
dotnet add package Tabulate.NET --version 1.0.5

2. Run Powershell script to create Storage account

@echo off
SETLOCAL ENABLEDELAYEDEXPANSION

rem Set variable values
set subscription_id=129b2bb6-asdf-asdf-83ba-85bf570bebca
set resource_group=rg1
set location=eastus
set expiry_date=2026-01-01T00:00:00Z

rem Get random numbers to create unique resource names
set unique_id=!random!!random!

rem Create a storage account in your Azure resource group
echo Creating storage...
call az storage account create --name ai102form!unique_id! --subscription !subscription_id! --resource-group !resource_group! --location !location! --sku Standard_LRS --encryption-services blob --default-action Allow --output none --allow-blob-public-access true

echo Uploading files...
rem Get storage key to create a container in the storage account
for /f "tokens=*" %%a in (
'az storage account keys list --subscription !subscription_id! --resource-group !resource_group! --account-name ai102form!unique_id! --query "[?keyName=='key1'].{keyName:keyName, permissions:permissions, value:value}"'
) do (
set key_json=!key_json!%%a
)
set key_string=!key_json:[ { "keyName": "key1", "permissions": "Full", "value": "=!
set AZURE_STORAGE_KEY=!key_string:" } ]=!
rem Create container
call az storage container create --account-name ai102form!unique_id! --name sampleforms --auth-mode key --account-key %AZURE_STORAGE_KEY% --output none
rem Upload files from your local sampleforms folder to a container called sampleforms in the storage account
rem Each file is uploaded as a blob
call az storage blob upload-batch -d sampleforms -s ./sample-forms --account-name ai102form!unique_id! --auth-mode key --account-key %AZURE_STORAGE_KEY%  --output none
rem Set a variable value for future use
set STORAGE_ACCT_NAME=ai102form!unique_id!

rem Get a Shared Access Signature (a signed URI that points to one or more storage resources) for the blobs in sampleforms  
for /f "tokens=*" %%a in (
'az storage container generate-sas --account-name ai102form!unique_id! --name sampleforms --expiry !expiry_date! --permissions rwl'
) do (
set SAS_TOKEN=%%a
set SAS_TOKEN=!SAS_TOKEN:~1,-1!
)
set URI=https://!STORAGE_ACCT_NAME!.blob.core.windows.net/sampleforms?!SAS_TOKEN!

rem Print the generated Shared Access Signature URI, which is used by Azure Storage to authorize access to the storage resource
echo -------------------------------------
echo SAS URI: !URI!


Run the code : dotnet run

OutPut:


3. Train the Model

using System;
using System.IO;
using System.Collections.Generic;
using System.Threading.Tasks;
using Microsoft.Extensions.Configuration;

// import namespaces
using Azure;
using Azure.AI.FormRecognizer;
using Azure.AI.FormRecognizer.Models;
using Azure.AI.FormRecognizer.Training;

namespace train_model
{
    class Program
    {
        static async Task Main(string[] args)
        {
            try
            {
                // Get configuration settings
                // IConfigurationBuilder builder = new ConfigurationBuilder().AddJsonFile("appsettings.json");
                // IConfigurationRoot configuration = builder.Build();
                // string formEndpoint = configuration["FormEndpoint"];
                // string formKey = configuration["FormKey"];
                // string trainingStorageUri = configuration["StorageUri"];

                // "YOUR_FORM_RECOGNIZER_ENDPOINT"
                string formEndpoint = "https://a.cognitiveservices.azure.com/";
                // "YOUR_FORM_RECOGNIZER_KEY"
                string formKey = "1E7gEDsZ2pUAiximoBAACYeBjFXJ3w3AAALACOGu5mm";
                // "YOUR_SAS_URI"
                string trainingStorageUri = "https://8IqQY2WOeCJRHTPFg%3D";

                // Authenticate Form Training Client
                var credential = new AzureKeyCredential(formKey);
                var trainingClient = new FormTrainingClient(new Uri(formEndpoint), credential);

                // Train model
                CustomFormModel model = await trainingClient
                .StartTrainingAsync(new Uri(trainingStorageUri), useTrainingLabels: true)
                .WaitForCompletionAsync();

                // Get model info
                Console.WriteLine($"Custom Model Info:");
                Console.WriteLine($"    Model Id: {model.ModelId}");
                Console.WriteLine($"    Model Status: {model.Status}");
                Console.WriteLine($"    Training model started on: {model.TrainingStartedOn}");
                Console.WriteLine($"    Training model completed on: {model.TrainingCompletedOn}");
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
    }
}


Run the code : dotnet run

OutPut:


4. Test the Model 

using System;
using System.IO;
using System.Collections.Generic;
using System.Threading.Tasks;
using Microsoft.Extensions.Configuration;

// import namespaces
using Azure;
using Azure.AI.FormRecognizer;
using Azure.AI.FormRecognizer.Models;
using Azure.AI.FormRecognizer.Training;

namespace test_model
{
    class Program
    {
        static async Task Main(string[] args)
        {
            try
            {
                // Get configuration settings from AppSettings
                // IConfigurationBuilder builder = new ConfigurationBuilder().AddJsonFile("appsettings.json");
                // IConfigurationRoot configuration = builder.Build();
                // string formEndpoint = configuration["FormEndpoint"];
                // string formKey = configuration["FormKey"];
                // string modelId = configuration["ModelId"];

                // "YOUR_FORM_RECOGNIZER_ENDPOINT";                 string formEndpoint = "https://a.cognitiveservices.azure.com/";
                // "YOUR_FORM_RECOGNIZER_KEY";                 string formKey = "1E7gEDsZ2pUAiximAAALACOGu5mm";
                // "YOUR_MODEL_ID";
                string modelId = "7891e019-9cc2-48a9-a9e6-08ac408484c5";

                // Authenticate Azure AI Document Intelligence Client
                var credential = new AzureKeyCredential(formKey);
                var recognizerClient = new FormRecognizerClient(new Uri(formEndpoint), credential);

                // Get form url for testing  
                string image_file = "test1.jpg";
                using (var image_data = File.OpenRead(image_file))
                {
                    // Use trained model with new form
                    RecognizedFormCollection forms = await recognizerClient
                    .StartRecognizeCustomForms(modelId, image_data)
                    .WaitForCompletionAsync();

                    foreach (RecognizedForm form in forms)
                    {
                        Console.WriteLine($"Form of type: {form.FormType}");
                        foreach (FormField field in form.Fields.Values)
                        {
                            Console.WriteLine($"Field '{field.Name}':");

                            if (field.LabelData != null)
                            {
                                Console.WriteLine($"    Label: '{field.LabelData.Text}'");
                            }

                            Console.WriteLine($"    Value: '{field.ValueData.Text}'");
                            Console.WriteLine($"    Confidence: {field.Confidence}");
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
    }
}

OutPut:






Featured Post

Azure AI services | Language service | Azure Cognitive Search | Sentiment analysis and opinion mining

Azure AI services | Language service | Azure Cognitive Search | Sentiment analysis and opinion mining 1. Create a Language resource in Azure...

Popular posts