Merge branch 'ai/feat/118-segmentation-autolabel' into 'ai/develop'

Feat: Segmentation 오토 레이블링 API 구현 - S11P21S002-118

See merge request s11-s-project/S11P21S002!48
This commit is contained in:
김태수 2024-09-05 12:52:01 +09:00
commit e86ce42166
3 changed files with 99 additions and 19 deletions

View File

@ -0,0 +1,63 @@
from fastapi import APIRouter, HTTPException
from schemas.predict_request import PredictRequest
from schemas.predict_response import PredictResponse, LabelData
from services.ai_service import load_segmentation_model
from typing import List
router = APIRouter()
@router.post("/segmentation", response_model=List[PredictResponse])
def predict(request: PredictRequest):
version = "0.1.0"
# 모델 로드
try:
model = load_segmentation_model()
except Exception as e:
raise HTTPException(status_code=500, detail="load model exception: "+str(e))
# 추론
results = []
try:
for image in request.image_list:
predict_results = model.predict(
source=image.image_url,
iou=request.iou_threshold,
conf=request.conf_threshold,
classes=request.classes
)
results.append(predict_results[0])
except Exception as e:
raise HTTPException(status_code=500, detail="model predict exception: "+str(e))
# 추론 결과 -> 레이블 객체 파싱
response = []
try:
for (image, result) in zip(request.image_list, results):
label_data:LabelData = {
"version": version,
"task_type": "seg",
"shapes": [
{
"label": summary['name'],
"color": "#ff0000",
"points": list(zip(summary['segments']['x'], summary['segments']['y'])),
"group_id": summary['class'],
"shape_type": "polygon",
"flags": {}
}
for summary in result.summary()
],
"split": "none",
"imageHeight": result.orig_img.shape[0],
"imageWidth": result.orig_img.shape[1],
"imageDepth": result.orig_img.shape[2]
}
response.append({
"image_id":image.image_id,
"image_url":image.image_url,
"data":label_data
})
except Exception as e:
raise HTTPException(status_code=500, detail="label parsing exception: "+str(e))
return response

View File

@ -1,10 +1,12 @@
from fastapi import FastAPI from fastapi import FastAPI
from api.yolo.detection import router as yolo_detection_router from api.yolo.detection import router as yolo_detection_router
from api.yolo.segmentation import router as yolo_segmentation_router
app = FastAPI() app = FastAPI()
# 각 기능별 라우터를 애플리케이션에 등록 # 각 기능별 라우터를 애플리케이션에 등록
app.include_router(yolo_detection_router, prefix="/api") app.include_router(yolo_detection_router, prefix="/api")
app.include_router(yolo_segmentation_router, prefix="/api")
# 애플리케이션 실행 # 애플리케이션 실행
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -2,7 +2,7 @@
from ultralytics import YOLO # Ultralytics YOLO 모델을 가져오기 from ultralytics import YOLO # Ultralytics YOLO 모델을 가져오기
from ultralytics.models.yolo.model import YOLO as YOLO_Model from ultralytics.models.yolo.model import YOLO as YOLO_Model
from ultralytics.nn.tasks import DetectionModel from ultralytics.nn.tasks import DetectionModel, SegmentationModel
import os import os
import torch import torch
@ -22,21 +22,36 @@ def load_detection_model(model_path: str = "test-data/model/yolov8n.pt", device:
if not os.path.exists(model_path) and model_path != "test-data/model/yolov8n.pt": if not os.path.exists(model_path) and model_path != "test-data/model/yolov8n.pt":
raise FileNotFoundError(f"Model file not found at path: {model_path}") raise FileNotFoundError(f"Model file not found at path: {model_path}")
try: model = YOLO(model_path)
model = YOLO(model_path) # Detection 모델인지 검증
# Detection 모델인지 검증 if not (isinstance(model, YOLO_Model) and isinstance(model.model, DetectionModel)):
if not (isinstance(model, YOLO_Model) and isinstance(model.model, DetectionModel)): raise TypeError(f"Invalid model type: {type(model)} (contained model type: {type(model.model)}). Expected a DetectionModel.")
raise TypeError(f"Invalid model type: {type(model)} (contained model type: {type(model.model)}). Expected a DetectionModel.")
# gpu 이용 # gpu 이용
if (device == "auto" and torch.cuda.is_available()): if (device == "auto" and torch.cuda.is_available()):
model.to("cuda") model.to("cuda")
print('gpu 가속 활성화') print('gpu 가속 활성화')
elif (device == "auto"): elif (device == "auto"):
model.to("cpu") model.to("cpu")
else: else:
model.to(device) model.to(device)
return model return model
except Exception as e:
raise RuntimeError(f"Failed to load the model from {model_path}. Error: {str(e)}")
def load_segmentation_model(model_path: str = "test-data/model/yolov8n-seg.pt", device:str ="auto"):
if not os.path.exists(model_path) and model_path != "test-data/model/yolov8n-seg.pt":
raise FileNotFoundError(f"Model file not found at path: {model_path}")
model = YOLO(model_path)
# Segmentation 모델인지 검증
if not (isinstance(model, YOLO_Model) and isinstance(model.model, SegmentationModel)):
raise TypeError(f"Invalid model type: {type(model)} (contained model type: {type(model.model)}). Expected a SegmentationModel.")
# gpu 이용
if (device == "auto" and torch.cuda.is_available()):
model.to("cuda")
print('gpu 가속 활성화')
elif (device == "auto"):
model.to("cpu")
else:
model.to(device)
return model