Merge branch 'ai/feat/52-ai-model-create' into 'ai/develop'

Feat: 추석동안 구현한 모델 관련 API 커밋 머지

See merge request s11-s-project/S11P21S002!97
This commit is contained in:
김용수 2024-09-19 16:54:44 +09:00
commit f9f3a0206d
14 changed files with 323 additions and 115 deletions

View File

@ -20,3 +20,11 @@ conda activate worlabel_ai_env
### app/utils
- 프로젝트 전역에서 이용하는 formatter 등 정의
### resources/models
- yolo 기본 모델 6종(default/pretrained, det/seg/cls) 저장
### resources/projects/{project_id}/models
- 프로젝트별 ai 모델 저장
### resources/datasets
- 훈련 데이터셋 저장

View File

@ -1,47 +1,46 @@
import json
from fastapi import APIRouter, HTTPException
from fastapi import APIRouter, HTTPException, Response
from schemas.predict_request import PredictRequest
from schemas.train_request import TrainRequest
from schemas.predict_response import PredictResponse, LabelData
from services.ai_service import load_detection_model
from typing import List
from utils.websocket_utils import WebSocketClient
from services.load_model import load_detection_model
from utils.dataset_utils import split_data
from utils.file_utils import get_dataset_root_path, process_directories, process_image_and_label, join_path
from utils.websocket_utils import WebSocketClient, WebSocketConnectionException
import asyncio
router = APIRouter()
@router.post("/detection", response_model=List[PredictResponse])
async def predict(request: PredictRequest):
@router.post("/predict")
async def detection_predict(request: PredictRequest):
version = "0.1.0"
print("여기")
# Spring 서버의 WebSocket URL
# TODO: 배포 시 변경
spring_server_ws_url = f"ws://localhost:8080/ws"
print("여기")
# WebSocketClient 인스턴스 생성
ws_client = WebSocketClient(spring_server_ws_url)
# 모델 로드
try:
model = load_detection_model(request.path)
except Exception as e:
raise HTTPException(status_code=500, detail="load model exception: " + str(e))
# 웹소켓 연결
try:
await ws_client.connect()
# 모델 로드
try:
model = load_detection_model()
except Exception as e:
raise HTTPException(status_code=500, detail="load model exception: " + str(e))
if not ws_client.is_connected():
raise WebSocketConnectionException()
# 추론
results = []
total_images = len(request.image_list)
for idx, image in enumerate(request.image_list):
try:
# URL에서 이미지를 메모리로 로드 TODO: 추후 메모리에 할지 어떻게 해야할지 or 병렬 처리 고민
predict_results = model.predict(
source=image.image_url,
iou=request.iou_threshold,
@ -86,22 +85,34 @@ async def predict(request: PredictRequest):
message = {
"project_id": request.project_id,
"progress": progress,
"result": response_item.dict()
"result": response_item.model_dump()
}
await ws_client.send_message("/app/ai/predict/progress", json.dumps(message))
except Exception as e:
raise HTTPException(status_code=500, detail="model predict exception: " + str(e))
# 추론 결과 -> 레이블 객체 파싱
return Response(status_code=204)
# 웹소켓 연결 안된 경우
except WebSocketConnectionException as e:
# 추론
response = []
try:
for (image, result) in zip(request.image_list, results):
label_data: LabelData = {
"version": version,
"task_type": "det",
"shapes": [
for image in request.image_list:
try:
predict_results = model.predict(
source=image.image_url,
iou=request.iou_threshold,
conf=request.conf_threshold,
classes=request.classes
)
# 예측 결과 처리
result = predict_results[0]
label_data = LabelData(
version=version,
task_type="det",
shapes=[
{
"label": summary['name'],
"color": "#ff0000",
@ -115,21 +126,24 @@ async def predict(request: PredictRequest):
}
for summary in result.summary()
],
"split": "none",
"imageHeight": result.orig_img.shape[0],
"imageWidth": result.orig_img.shape[1],
"imageDepth": result.orig_img.shape[2]
}
response.append({
"image_id": image.image_id,
"image_url": image.image_url,
"data": label_data
})
except Exception as e:
raise HTTPException(status_code=500, detail="label parsing exception: " + str(e))
split="none",
imageHeight=result.orig_img.shape[0],
imageWidth=result.orig_img.shape[1],
imageDepth=result.orig_img.shape[2]
)
response_item = PredictResponse(
image_id=image.image_id,
image_url=image.image_url,
data=label_data
)
response.append(response_item)
except Exception as e:
raise HTTPException(status_code=500, detail="model predict exception: " + str(e))
return response
except Exception as e:
print(f"Prediction process failed: {str(e)}")
raise HTTPException(status_code=500, detail="Prediction process failed")
@ -139,8 +153,8 @@ async def predict(request: PredictRequest):
await ws_client.close()
@router.post("/detection/train")
async def train(request: TrainRequest):
@router.post("/train")
async def detection_train(request: TrainRequest):
# 데이터셋 루트 경로 얻기
dataset_root_path = get_dataset_root_path(request.project_id)
@ -202,7 +216,3 @@ async def train(request: TrainRequest):
finally:
if ws_client.is_connected():
await ws_client.close()

91
ai/app/api/yolo/model.py Normal file
View File

@ -0,0 +1,91 @@
from fastapi import APIRouter, HTTPException, File, UploadFile
from schemas.model_create_request import ModelCreateRequest
from services.create_model import create_new_model, upload_tmp_model
from services.load_model import load_model
from utils.file_utils import get_model_paths, delete_file, join_path, save_file, get_file_name
import re
from fastapi.responses import FileResponse
router = APIRouter()
# modelType(detection/segmentation/classification), (default, pretrained), labelCategories
@router.get("/info")
def get_model_info(model_path:str):
try:
model = load_model(model_path=model_path)
except FileNotFoundError:
raise HTTPException(status_code=404,
detail= "모델을 찾을 수 없습니다.")
except Exception as e:
raise HTTPException(status_code=500, detail="model load exception: " + str(e))
pretrained = model.names == {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'}
return {"type": model.task, "pretrained":pretrained, "labelCategories":model.names}
# project_id => model path 리스트 를 가져오는 함수
@router.get("/list")
def get_model_list(project_id:int):
try:
return get_model_paths(project_id)
except FileNotFoundError:
raise HTTPException(status_code=404,
detail= "프로젝트가 찾을 수 없거나 생성된 모델이 없습니다.")
@router.post("/create", status_code=201)
def create_model(request: ModelCreateRequest):
if request.type not in ["seg", "det", "cls"]:
raise HTTPException(status_code=400,
detail= f"Invalid type '{request.type}'. Must be one of \"seg\", \"det\", \"cls\".")
model_path = create_new_model(request.project_id, request.type, request.pretrained)
return {"model_path": model_path}
@router.delete("/delete", status_code=204)
def delete_model(model_path:str):
pattern = r'^resources[/\\]projects[/\\](\d+)[/\\]models[/\\]([a-f0-9\-]+)\.pt$'
if not re.match(pattern, model_path):
raise HTTPException(status_code=400,
detail= "Invalid path format")
try:
delete_file(model_path)
except FileNotFoundError:
raise HTTPException(status_code=404,
detail= "모델을 찾을 수 없습니다.")
@router.post("/upload")
def upload_model(project_id:int, file: UploadFile = File(...)):
# 확장자 확인
if not file.filename.endswith(".pt"):
raise HTTPException(status_code=400, detail="Only .pt files are allowed.")
tmp_path = join_path("resources", "models", "tmp-"+file.filename)
# 임시로 파일 저장
try:
save_file(tmp_path, file)
except Exception as e:
raise HTTPException(status_code=500, detail="file save exception: "+str(e))
# YOLO 모델 변환 및 저장
try:
model_path = upload_tmp_model(project_id, tmp_path)
return {"model_path": model_path}
except Exception as e:
raise HTTPException(status_code=500, detail="file save exception: "+str(e))
finally:
# 임시파일 삭제
delete_file(tmp_path)
@router.get("/download")
def download_model(model_path: str):
pattern = r'^resources[/\\]projects[/\\](\d+)[/\\]models[/\\]([a-f0-9\-]+)\.pt$'
if not re.match(pattern, model_path):
raise HTTPException(status_code=400,
detail= "Invalid path format")
try:
filename = get_file_name(model_path)
# 파일 응답 반환
return FileResponse(model_path, media_type='application/octet-stream', filename=filename)
except FileNotFoundError:
raise HTTPException(status_code=404,
detail= "모델을 찾을 수 없습니다.")

View File

@ -1,12 +1,12 @@
from fastapi import APIRouter, HTTPException
from schemas.predict_request import PredictRequest
from schemas.predict_response import PredictResponse, LabelData
from services.ai_service import load_segmentation_model
from services.load_model import load_segmentation_model
from typing import List
router = APIRouter()
@router.post("/segmentation", response_model=List[PredictResponse])
@router.post("/predict", response_model=List[PredictResponse])
def predict(request: PredictRequest):
version = "0.1.0"

View File

@ -1,12 +1,14 @@
from fastapi import FastAPI
from api.yolo.detection import router as yolo_detection_router
from api.yolo.segmentation import router as yolo_segmentation_router
from api.yolo.model import router as yolo_model_router
app = FastAPI()
# 각 기능별 라우터를 애플리케이션에 등록
app.include_router(yolo_detection_router, prefix="/api")
app.include_router(yolo_segmentation_router, prefix="/api")
app.include_router(yolo_detection_router, prefix="/api/detection", tags=["Detection"])
app.include_router(yolo_segmentation_router, prefix="/api/segmentation", tags=["Segmentation"])
app.include_router(yolo_model_router, prefix="/api/model", tags=["Model"])
# 애플리케이션 실행
if __name__ == "__main__":

View File

@ -0,0 +1,6 @@
from pydantic import BaseModel
class ModelCreateRequest(BaseModel):
project_id: int
type: str
pretrained:bool = True

View File

@ -1,14 +1,20 @@
from pydantic import BaseModel
from pydantic import BaseModel, Field
from typing import List, Optional
class ImageInfo(BaseModel):
image_id: int
image_url: str
image_url: str
class LabelCategory(BaseModel):
label_id: int
label_name: str
class PredictRequest(BaseModel):
project_id: int
image_list: List[ImageInfo]
version: Optional[str] = "latest"
conf_threshold: Optional[float] = 0.25
iou_threshold: Optional[float] = 0.45
version: str = "latest"
conf_threshold: float = 0.25
iou_threshold: float = 0.45
classes: Optional[List[int]] = None
path: Optional[str] = Field(None, alias="model_path")
label_categories: Optional[List[LabelCategory]] = None

View File

@ -1,12 +1,12 @@
from pydantic import BaseModel
from pydantic import BaseModel, Field
from typing import List, Optional, Union
from schemas.predict_response import LabelData
from schemas.predict_request import LabelCategory
class TrainDataInfo(BaseModel):
image_url: str
label: LabelData
class TrainRequest(BaseModel):
project_id: int
data: List[TrainDataInfo]
@ -14,3 +14,5 @@ class TrainRequest(BaseModel):
ratio: float = 0.8 # 훈련/검증 분할 비율
epochs: int = 50 # 훈련 반복 횟수
batch: Union[float, int] = -1 # 훈련 batch 수[int] or GPU의 사용률 자동[float] default(-1): gpu의 60% 사용 유지
path: Optional[str] = Field(None, alias="model_path")
label_categories: Optional[List[LabelCategory]] = None # 새로운 레이블 카테고리 확인용

View File

@ -1,57 +0,0 @@
# ai_service.py
from ultralytics import YOLO # Ultralytics YOLO 모델을 가져오기
from ultralytics.models.yolo.model import YOLO as YOLO_Model
from ultralytics.nn.tasks import DetectionModel, SegmentationModel
import os
import torch
def load_detection_model(model_path: str = os.path.join("test-data","model","yolov8n.pt"), device:str ="auto"):
"""
지정된 경로에서 YOLO 모델을 로드합니다.
Args:
model_path (str): 모델 파일 경로.
device (str): 모델을 로드할 장치. 기본값은 'cpu'.
'cpu' 또는 'cuda' 같은 장치를 지정할 있습니다.
Returns:
YOLO: 로드된 YOLO 모델 인스턴스
"""
if not os.path.exists(model_path) and model_path != "test-data/model/yolov8n.pt":
raise FileNotFoundError(f"Model file not found at path: {model_path}")
model = YOLO(model_path)
# Detection 모델인지 검증
if not (isinstance(model, YOLO_Model) and isinstance(model.model, DetectionModel)):
raise TypeError(f"Invalid model type: {type(model)} (contained model type: {type(model.model)}). Expected a DetectionModel.")
# gpu 이용
if (device == "auto" and torch.cuda.is_available()):
model.to("cuda")
print('gpu 가속 활성화')
elif (device == "auto"):
model.to("cpu")
else:
model.to(device)
return model
def load_segmentation_model(model_path: str = "test-data/model/yolov8n-seg.pt", device:str ="auto"):
if not os.path.exists(model_path) and model_path != "test-data/model/yolov8n-seg.pt":
raise FileNotFoundError(f"Model file not found at path: {model_path}")
model = YOLO(model_path)
# Segmentation 모델인지 검증
if not (isinstance(model, YOLO_Model) and isinstance(model.model, SegmentationModel)):
raise TypeError(f"Invalid model type: {type(model)} (contained model type: {type(model.model)}). Expected a SegmentationModel.")
# gpu 이용
if (device == "auto" and torch.cuda.is_available()):
model.to("cuda")
print('gpu 가속 활성화')
elif (device == "auto"):
model.to("cpu")
else:
model.to(device)
return model

View File

@ -0,0 +1,49 @@
from ultralytics import YOLO # Ultralytics YOLO 모델을 가져오기
import os
import uuid
from services.load_model import load_model
def create_new_model(project_id: int, type:str, pretrained:bool):
suffix = ""
if type in ["seg", "cls"]:
suffix = "-"+type
# 학습된 기본 모델 로드
if pretrained:
suffix += ".pt"
else:
suffix += ".yaml"
model = YOLO(os.path.join("resources", "models" ,f"yolov8n{suffix}"))
# 모델을 저장할 폴더 경로
base_path = os.path.join("resources","projects",str(project_id),"models")
os.makedirs(base_path, exist_ok=True)
# 고유값 id 생성
unique_id = uuid.uuid4()
while os.path.exists(os.path.join(base_path, f"{unique_id}.pt")):
unique_id = uuid.uuid4()
model_path = os.path.join(base_path, f"{unique_id}.pt")
# 기본 모델 저장
model.save(filename=model_path)
return model_path
def upload_tmp_model(project_id: int, tmp_path:str):
# 모델 불러오기
model = load_model(tmp_path)
# 모델을 저장할 폴더 경로
base_path = os.path.join("resources","projects",str(project_id),"models")
os.makedirs(base_path, exist_ok=True)
# 고유값 id 생성
unique_id = uuid.uuid4()
while os.path.exists(os.path.join(base_path, f"{unique_id}.pt")):
unique_id = uuid.uuid4()
model_path = os.path.join(base_path, f"{unique_id}.pt")
# 기본 모델 저장
model.save(filename=model_path)
return model_path

View File

@ -0,0 +1,61 @@
# ai_service.py
from ultralytics import YOLO # Ultralytics YOLO 모델을 가져오기
from ultralytics.models.yolo.model import YOLO as YOLO_Model
from ultralytics.nn.tasks import DetectionModel, SegmentationModel
import os
import torch
import re
def load_detection_model(model_path:str):
"""
지정된 경로에서 YOLO 모델을 로드합니다.
Args:
model_path (str): 모델 파일 경로.
device (str): 모델을 로드할 장치. 기본값은 'cpu'.
'cpu' 또는 'cuda' 같은 장치를 지정할 있습니다.
Returns:
YOLO: 로드된 YOLO 모델 인스턴스
"""
if model_path:
model = load_model(model_path)
else:
model = YOLO(os.path.join("resources","models","yolov8n.pt"))
# Detection 모델인지 검증
if model.task != "detect":
raise TypeError(f"Invalid model type: {model.task}. Expected a DetectionModel.")
return model
def load_segmentation_model(model_path: str):
if model_path:
model = YOLO(model_path)
else:
model = YOLO(os.path.join("resources","models","yolov8n-seg.pt"))
# Segmentation 모델인지 검증
if model.task != "segment":
raise TypeError(f"Invalid model type: {model.task}. Expected a SegmentationModel.")
return model
def load_model(model_path: str):
# model_path 검증
pattern = r'^resources[/\\]projects[/\\](\d+)[/\\]models[/\\]([a-f0-9\-]+)\.pt$'
if not re.match(pattern, model_path):
raise Exception("Invalid path format")
if not os.path.exists(model_path):
raise FileNotFoundError(f"Model file not found at path: {model_path}")
try:
model = YOLO(model_path)
if (torch.cuda.is_available()):
model.to("cuda")
print("gpu 활성화")
else:
model.to("cpu")
return model
except:
raise Exception("YOLO model conversion failed: Unsupported architecture or invalid configuration.")

View File

@ -148,4 +148,29 @@ def process_image_and_label(data:TrainDataInfo, dataset_root_path:str, child_pat
def join_path(path, *paths):
"""os.path.join()과 같은 기능, os import 하기 싫어서 만듦"""
return os.path.join(path, *paths)
return os.path.join(path, *paths)
def get_model_paths(project_id:int):
path = os.path.join("resources","projects",str(project_id), "models")
if not os.path.exists(path):
raise FileNotFoundError()
files = os.listdir(path)
return [os.path.join(path, file) for file in files if file.endswith(".pt")]
def delete_file(path):
if not os.path.exists(path):
raise FileNotFoundError()
os.remove(path)
def save_file(path, file):
# 경로에서 디렉토리 부분만 추출 (파일명을 제외한 경로)
dir_path = os.path.dirname(path)
os.makedirs(dir_path, exist_ok=True)
with open(path, "wb") as buffer:
shutil.copyfileobj(file.file, buffer)
def get_file_name(path):
if not os.path.exists(path):
raise FileNotFoundError()
return os.path.basename(path)

View File

@ -1,4 +1,5 @@
import websockets
from websockets import WebSocketException
class WebSocketClient:
def __init__(self, url: str):
@ -33,4 +34,8 @@ class WebSocketClient:
print(f"Failed to close WebSocket connection: {str(e)}")
def is_connected(self):
return self.websocket is not None and self.websocket.open
return self.websocket is not None and self.websocket.open
class WebSocketConnectionException(WebSocketException):
def __init__(self, message="Failed to connect to WebSocket"):
super().__init__(message)