Azure AI services - Image Analysis with Azure AI Content Safety:
Source:
https://learn.microsoft.com/en-us/python/api/overview/azure/ai-contentsafety-readme?view=azure-python
import os
from azure.ai.contentsafety import ContentSafetyClient
from azure.ai.contentsafety.models import AnalyzeImageOptions, ImageData, ImageCategory
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import HttpResponseError
def analyze_image():
endpoint = "https://contentsafety.cognitiveservices.azure.com/"
key = "72OU9CFnMIOX5dtStPFqq3fTzjNkOYeBjFXJ3w3AAAHACOG8nbQ"
image_path = "download_1.jpg"
# Create an Azure AI Content Safety client
client = ContentSafetyClient(endpoint, AzureKeyCredential(key))
# Build request
with open(image_path, "rb") as file:
request = AnalyzeImageOptions(image=ImageData(content=file.read()))
# Analyze image
try:
response = client.analyze_image(request)
except HttpResponseError as e:
print("Analyze image failed.")
if e.error:
print(f"Error code: {e.error.code}")
print(f"Error message: {e.error.message}")
raise
print(e)
raise
hate_result = next(item for item in response.categories_analysis if item.category
== ImageCategory.HATE)
self_harm_result = next(item for item in response.categories_analysis if item.category
== ImageCategory.SELF_HARM)
sexual_result = next(item for item in response.categories_analysis if item.category
== ImageCategory.SEXUAL)
violence_result = next(item for item in response.categories_analysis if item.category
== ImageCategory.VIOLENCE)
if hate_result:
print(f"Hate severity: {hate_result.severity}")
if self_harm_result:
print(f"SelfHarm severity: {self_harm_result.severity}")
if sexual_result:
print(f"Sexual severity: {sexual_result.severity}")
if violence_result:
print(f"Violence severity: {violence_result.severity}")
if __name__ == "__main__":
analyze_image()
Input Image:
OutPut:
No comments:
Post a Comment