Skip to content

Commit

Permalink
Use black to reformat all samples and tests
Browse files Browse the repository at this point in the history
  • Loading branch information
zihyunting committed May 9, 2024
1 parent e7c81b7 commit 85554ab
Show file tree
Hide file tree
Showing 24 changed files with 700 additions and 263 deletions.
24 changes: 17 additions & 7 deletions sdk/face/azure-ai-vision-face/samples/samples_authentication.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,15 @@
from shared.helpers import beautify_json, get_logger


class FaceAuthentication():
class FaceAuthentication:
def __init__(self):
load_dotenv(find_dotenv())
self.endpoint = os.getenv(CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT)
self.key = os.getenv(CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY)
self.endpoint = os.getenv(
CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT
)
self.key = os.getenv(
CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY
)
self.logger = get_logger("sample_authentication")

def authentication_by_api_key(self):
Expand All @@ -54,13 +58,16 @@ def authentication_by_api_key(self):
from azure.ai.vision.face.models import FaceDetectionModel, FaceRecognitionModel

self.logger.info("Instantiate a FaceClient using an api key")
with FaceClient(endpoint=self.endpoint, credential=AzureKeyCredential(self.key)) as face_client:
with FaceClient(
endpoint=self.endpoint, credential=AzureKeyCredential(self.key)
) as face_client:
sample_file_path = helpers.get_image_path(TestImages.DEFAULT_IMAGE_FILE)
result = face_client.detect( # type: ignore
helpers.read_file_content(sample_file_path),
detection_model=FaceDetectionModel.DETECTION_03,
recognition_model=FaceRecognitionModel.RECOGNITION_04,
return_face_id=False)
return_face_id=False,
)

self.logger.info(f"Detect faces from the file: {sample_file_path}")
for idx, face in enumerate(result):
Expand All @@ -73,13 +80,16 @@ def authentication_by_aad_credential(self):
from azure.ai.vision.face.models import FaceDetectionModel, FaceRecognitionModel

self.logger.info("Instantiate a FaceClient using a TokenCredential")
with FaceClient(endpoint=self.endpoint, credential=DefaultAzureCredential()) as face_client:
with FaceClient(
endpoint=self.endpoint, credential=DefaultAzureCredential()
) as face_client:
sample_file_path = helpers.get_image_path(TestImages.DEFAULT_IMAGE_FILE)
result = face_client.detect( # type: ignore
helpers.read_file_content(sample_file_path),
detection_model=FaceDetectionModel.DETECTION_03,
recognition_model=FaceRecognitionModel.RECOGNITION_04,
return_face_id=False)
return_face_id=False,
)

self.logger.info(f"Detect faces from the file: {sample_file_path}")
for idx, face in enumerate(result):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,15 @@
from shared.helpers import beautify_json, get_logger


class FaceAuthentication():
class FaceAuthentication:
def __init__(self):
load_dotenv(find_dotenv())
self.endpoint = os.getenv(CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT)
self.key = os.getenv(CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY)
self.endpoint = os.getenv(
CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT
)
self.key = os.getenv(
CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY
)
self.logger = get_logger("sample_authentication_async")

async def authentication_by_api_key(self):
Expand All @@ -55,13 +59,16 @@ async def authentication_by_api_key(self):
from azure.ai.vision.face.models import FaceDetectionModel, FaceRecognitionModel

self.logger.info("Instantiate a FaceClient using an api key")
async with FaceClient(endpoint=self.endpoint, credential=AzureKeyCredential(self.key)) as face_client:
async with FaceClient(
endpoint=self.endpoint, credential=AzureKeyCredential(self.key)
) as face_client:
sample_file_path = helpers.get_image_path(TestImages.DEFAULT_IMAGE_FILE)
result = await face_client.detect( # type: ignore
helpers.read_file_content(sample_file_path),
detection_model=FaceDetectionModel.DETECTION_03,
recognition_model=FaceRecognitionModel.RECOGNITION_04,
return_face_id=False)
return_face_id=False,
)

self.logger.info(f"Detect faces from the file: {sample_file_path}")
for idx, face in enumerate(result):
Expand All @@ -74,14 +81,16 @@ async def authentication_by_aad_credential(self):
from azure.ai.vision.face.models import FaceDetectionModel, FaceRecognitionModel

self.logger.info("Instantiate a FaceClient using a TokenCredential")
async with DefaultAzureCredential() as credential, \
FaceClient(endpoint=self.endpoint, credential=credential) as face_client:
async with DefaultAzureCredential() as credential, FaceClient(
endpoint=self.endpoint, credential=credential
) as face_client:
sample_file_path = helpers.get_image_path(TestImages.DEFAULT_IMAGE_FILE)
result = await face_client.detect( # type: ignore
helpers.read_file_content(sample_file_path),
detection_model=FaceDetectionModel.DETECTION_03,
recognition_model=FaceRecognitionModel.RECOGNITION_04,
return_face_id=False)
return_face_id=False,
)

self.logger.info(f"Detect faces from the file: {sample_file_path}")
for idx, face in enumerate(result):
Expand Down
43 changes: 31 additions & 12 deletions sdk/face/azure-ai-vision-face/samples/samples_face_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,20 +33,30 @@
from shared.helpers import beautify_json, get_logger


class DetectFaces():
class DetectFaces:
def __init__(self):
load_dotenv(find_dotenv())
self.endpoint = os.getenv(CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT)
self.key = os.getenv(CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY)
self.endpoint = os.getenv(
CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT
)
self.key = os.getenv(
CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY
)
self.logger = get_logger("sample_face_detection")

def detect(self):
from azure.core.credentials import AzureKeyCredential
from azure.ai.vision.face import FaceClient
from azure.ai.vision.face.models import (
FaceDetectionModel, FaceRecognitionModel, FaceAttributeTypeDetection03, FaceAttributeTypeRecognition04)

with FaceClient(endpoint=self.endpoint, credential=AzureKeyCredential(self.key)) as face_client:
FaceDetectionModel,
FaceRecognitionModel,
FaceAttributeTypeDetection03,
FaceAttributeTypeRecognition04,
)

with FaceClient(
endpoint=self.endpoint, credential=AzureKeyCredential(self.key)
) as face_client:
sample_file_path = helpers.get_image_path(TestImages.IMAGE_DETECTION_5)
result = face_client.detect( # type: ignore
helpers.read_file_content(sample_file_path),
Expand All @@ -56,10 +66,12 @@ def detect(self):
return_face_attributes=[
FaceAttributeTypeDetection03.HEAD_POSE, # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeDetection03.MASK, # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION], # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION,
], # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
return_face_landmarks=True,
return_recognition_model=True,
face_id_time_to_live=120)
face_id_time_to_live=120,
)

self.logger.info(f"Detect faces from the file: {sample_file_path}")
for idx, face in enumerate(result):
Expand All @@ -70,9 +82,14 @@ def detect_from_url(self):
from azure.core.credentials import AzureKeyCredential
from azure.ai.vision.face import FaceClient
from azure.ai.vision.face.models import (
FaceDetectionModel, FaceRecognitionModel, FaceAttributeTypeDetection01)

with FaceClient(endpoint=self.endpoint, credential=AzureKeyCredential(self.key)) as face_client:
FaceDetectionModel,
FaceRecognitionModel,
FaceAttributeTypeDetection01,
)

with FaceClient(
endpoint=self.endpoint, credential=AzureKeyCredential(self.key)
) as face_client:
sample_url = TestImages.DEFAULT_IMAGE_URL
result = face_client.detect_from_url( # type: ignore
url=sample_url,
Expand All @@ -83,7 +100,9 @@ def detect_from_url(self):
FaceAttributeTypeDetection01.ACCESSORIES, # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeDetection01.EXPOSURE, # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeDetection01.GLASSES, # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeDetection01.NOISE]) # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeDetection01.NOISE,
],
) # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501

self.logger.info(f"Detect faces from the url: {sample_url}")
for idx, face in enumerate(result):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,20 +34,30 @@
from shared.helpers import beautify_json, get_logger


class DetectFaces():
class DetectFaces:
def __init__(self):
load_dotenv(find_dotenv())
self.endpoint = os.getenv(CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT)
self.key = os.getenv(CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY)
self.endpoint = os.getenv(
CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT
)
self.key = os.getenv(
CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY
)
self.logger = get_logger("sample_face_detection_async")

async def detect(self):
from azure.core.credentials import AzureKeyCredential
from azure.ai.vision.face.aio import FaceClient
from azure.ai.vision.face.models import (
FaceDetectionModel, FaceRecognitionModel, FaceAttributeTypeDetection03, FaceAttributeTypeRecognition04)

async with FaceClient(endpoint=self.endpoint, credential=AzureKeyCredential(self.key)) as face_client:
FaceDetectionModel,
FaceRecognitionModel,
FaceAttributeTypeDetection03,
FaceAttributeTypeRecognition04,
)

async with FaceClient(
endpoint=self.endpoint, credential=AzureKeyCredential(self.key)
) as face_client:
sample_file_path = helpers.get_image_path(TestImages.IMAGE_DETECTION_5)
result = await face_client.detect( # type: ignore
helpers.read_file_content(sample_file_path),
Expand All @@ -57,10 +67,12 @@ async def detect(self):
return_face_attributes=[
FaceAttributeTypeDetection03.HEAD_POSE, # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeDetection03.MASK, # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION], # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION,
], # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
return_face_landmarks=True,
return_recognition_model=True,
face_id_time_to_live=120)
face_id_time_to_live=120,
)

self.logger.info(f"Detect faces from the file: {sample_file_path}")
for idx, face in enumerate(result):
Expand All @@ -71,9 +83,14 @@ async def detect_from_url(self):
from azure.core.credentials import AzureKeyCredential
from azure.ai.vision.face.aio import FaceClient
from azure.ai.vision.face.models import (
FaceDetectionModel, FaceRecognitionModel, FaceAttributeTypeDetection01)

async with FaceClient(endpoint=self.endpoint, credential=AzureKeyCredential(self.key)) as face_client:
FaceDetectionModel,
FaceRecognitionModel,
FaceAttributeTypeDetection01,
)

async with FaceClient(
endpoint=self.endpoint, credential=AzureKeyCredential(self.key)
) as face_client:
sample_url = TestImages.DEFAULT_IMAGE_URL
result = await face_client.detect_from_url( # type: ignore
url=sample_url,
Expand All @@ -84,7 +101,9 @@ async def detect_from_url(self):
FaceAttributeTypeDetection01.ACCESSORIES, # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeDetection01.EXPOSURE, # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeDetection01.GLASSES, # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeDetection01.NOISE]) # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501
FaceAttributeTypeDetection01.NOISE,
],
) # type: ignore # pyright: ignore[reportAttributeAccessIssue] # noqa: E501

self.logger.info(f"Detect faces from the url: {sample_url}")
for idx, face in enumerate(result):
Expand Down
21 changes: 15 additions & 6 deletions sdk/face/azure-ai-vision-face/samples/samples_face_grouping.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,28 +33,37 @@
from shared.helpers import beautify_json, get_logger


class GroupFaces():
class GroupFaces:
def __init__(self):
load_dotenv(find_dotenv())
self.endpoint = os.getenv(CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT)
self.key = os.getenv(CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY)
self.endpoint = os.getenv(
CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT
)
self.key = os.getenv(
CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY
)
self.logger = get_logger("sample_face_grouping")

def group(self):
from azure.core.credentials import AzureKeyCredential
from azure.ai.vision.face import FaceClient
from azure.ai.vision.face.models import FaceDetectionModel, FaceRecognitionModel

with FaceClient(endpoint=self.endpoint, credential=AzureKeyCredential(self.key)) as face_client:
with FaceClient(
endpoint=self.endpoint, credential=AzureKeyCredential(self.key)
) as face_client:
sample_file_path = helpers.get_image_path(TestImages.IMAGE_NINE_FACES)
detect_result = face_client.detect( # type: ignore
helpers.read_file_content(sample_file_path),
detection_model=FaceDetectionModel.DETECTION_03,
recognition_model=FaceRecognitionModel.RECOGNITION_04,
return_face_id=True)
return_face_id=True,
)

face_ids = [face.face_id for face in detect_result]
self.logger.info(f"Detect {len(face_ids)} faces from the file '{sample_file_path}': {face_ids}")
self.logger.info(
f"Detect {len(face_ids)} faces from the file '{sample_file_path}': {face_ids}"
)

group_result = face_client.group(face_ids=face_ids)
self.logger.info(f"Group result: {beautify_json(group_result.as_dict())}")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,28 +34,37 @@
from shared.helpers import beautify_json, get_logger


class GroupFaces():
class GroupFaces:
def __init__(self):
load_dotenv(find_dotenv())
self.endpoint = os.getenv(CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT)
self.key = os.getenv(CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY)
self.endpoint = os.getenv(
CONFIGURATION_NAME_FACE_API_ENDPOINT, DEFAULT_FACE_API_ENDPOINT
)
self.key = os.getenv(
CONFIGURATION_NAME_FACE_API_ACCOUNT_KEY, DEFAULT_FACE_API_ACCOUNT_KEY
)
self.logger = get_logger("sample_face_grouping_async")

async def group(self):
from azure.core.credentials import AzureKeyCredential
from azure.ai.vision.face.aio import FaceClient
from azure.ai.vision.face.models import FaceDetectionModel, FaceRecognitionModel

async with FaceClient(endpoint=self.endpoint, credential=AzureKeyCredential(self.key)) as face_client:
async with FaceClient(
endpoint=self.endpoint, credential=AzureKeyCredential(self.key)
) as face_client:
sample_file_path = helpers.get_image_path(TestImages.IMAGE_NINE_FACES)
detect_result = await face_client.detect( # type: ignore
helpers.read_file_content(sample_file_path),
detection_model=FaceDetectionModel.DETECTION_03,
recognition_model=FaceRecognitionModel.RECOGNITION_04,
return_face_id=True)
return_face_id=True,
)

face_ids = [face.face_id for face in detect_result]
self.logger.info(f"Detect {len(face_ids)} faces from the file '{sample_file_path}': {face_ids}")
self.logger.info(
f"Detect {len(face_ids)} faces from the file '{sample_file_path}': {face_ids}"
)

group_result = await face_client.group(face_ids=face_ids)
self.logger.info(f"Group result: {beautify_json(group_result.as_dict())}")
Expand All @@ -65,5 +74,6 @@ async def main():
sample = GroupFaces()
await sample.group()


if __name__ == "__main__":
asyncio.run(main())
Loading

0 comments on commit 85554ab

Please sign in to comment.