Feat: segmentation API 구현

This commit is contained in:
김진현 2024-09-26 15:04:31 +09:00
parent 6c9782a807
commit 6225c50914
3 changed files with 219 additions and 55 deletions

View File

@ -122,7 +122,7 @@ async def detection_train(request: TrainRequest, http_request: Request):
# 학습
results = run_train(request,token,model,dataset_root_path)
# last 모델 저장
# best 모델 저장
model_key = save_model(project_id=request.project_id, path=join_path(dataset_root_path, "result", "weights", "best.pt"))
response = {"model_key": model_key, "results": results.results_dict}

View File

@ -1,62 +1,213 @@
from fastapi import APIRouter, HTTPException
from fastapi import APIRouter, HTTPException, Request
from schemas.predict_request import PredictRequest
from schemas.train_request import TrainRequest
from schemas.predict_response import PredictResponse, LabelData
from schemas.train_report_data import ReportData
from services.load_model import load_segmentation_model
from typing import List
from services.create_model import save_model
from utils.dataset_utils import split_data
from utils.file_utils import get_dataset_root_path, process_directories, process_image_and_label, join_path
from utils.slackMessage import send_slack_message
from utils.api_utils import send_data_call_api
import random
router = APIRouter()
@router.post("/predict", response_model=List[PredictResponse])
def predict(request: PredictRequest):
@router.post("/predict")
async def segmentation_predict(request: PredictRequest):
send_slack_message(f"seg predict 요청: {request}", status="success")
# 모델 로드
try:
model = load_segmentation_model(request.project_id, request.m_key)
except Exception as e:
raise HTTPException(status_code=500, detail="load model exception: "+str(e))
model = get_model(request)
# 모델 레이블 카테고리 연결
classes = list(request.label_map) if request.label_map else None
# 이미지 데이터 정리
url_list = list(map(lambda x:x.image_url, request.image_list))
# 추론
results = []
results = run_predictions(model, url_list, request, classes)
# 추론 결과 변환
response = [process_prediction_result(result, image, request.label_map) for result, image in zip(results,request.image_list)]
send_slack_message(f"predict 성공{response}", status="success")
return response
# 모델 로드
def get_model(request: PredictRequest):
try:
for image in request.image_list:
predict_results = model.predict(
source=image.image_url,
iou=request.iou_threshold,
conf=request.conf_threshold,
classes=request.classes
)
results.append(predict_results[0])
return load_segmentation_model(request.project_id, request.m_key)
except Exception as e:
raise HTTPException(status_code=500, detail="model predict exception: "+str(e))
raise HTTPException(status_code=500, detail="load model exception: " + str(e))
# 추론 실행 함수
def run_predictions(model, image, request, classes):
try:
return model.predict(
source=image,
iou=request.iou_threshold,
conf=request.conf_threshold,
classes=classes
)
except Exception as e:
raise HTTPException(status_code=500, detail="model predict exception: " + str(e))
# 추론 결과 -> 레이블 객체 파싱
response = []
# 추론 결과 처리 함수
def process_prediction_result(result, image, label_map):
try:
for (image, result) in zip(request.image_list, results):
label_data:LabelData = {
"version": "0.0.0",
"task_type": "seg",
"shapes": [
{
"label": summary['name'],
"color": "#ff0000",
"points": list(zip(summary['segments']['x'], summary['segments']['y'])),
"group_id": summary['class'],
"shape_type": "polygon",
"flags": {}
}
for summary in result.summary()
],
"split": "none",
"imageHeight": result.orig_img.shape[0],
"imageWidth": result.orig_img.shape[1],
"imageDepth": result.orig_img.shape[2]
}
response.append({
"image_id":image.image_id,
"image_url":image.image_url,
"data":label_data
})
label_data = LabelData(
version="0.0.0",
task_type="seg",
shapes=[
{
"label": summary['name'],
"color": get_random_color(),
"points": list(zip(summary['segments']['x'], summary['segments']['y'])),
"group_id": label_map[summary['class']] if label_map else summary['class'],
"shape_type": "polygon",
"flags": {}
}
for summary in result.summary()
],
split="none",
imageHeight=result.orig_img.shape[0],
imageWidth=result.orig_img.shape[1],
imageDepth=result.orig_img.shape[2]
)
except Exception as e:
raise HTTPException(status_code=500, detail="label parsing exception: "+str(e))
return response
raise HTTPException(status_code=500, detail="model predict exception: " + str(e))
return PredictResponse(
image_id=image.image_id,
data=label_data.model_dump_json()
)
def get_random_color():
random_number = random.randint(0, 0xFFFFFF)
return f"#{random_number:06X}"
@router.post("/train")
async def segmentation_train(request: TrainRequest, http_request: Request):
send_slack_message(f"train 요청{request}", status="success")
# Authorization 헤더에서 Bearer 토큰 추출
auth_header = http_request.headers.get("Authorization")
token = auth_header.split(" ")[1] if auth_header and auth_header.startswith("Bearer ") else None
# 레이블 맵
inverted_label_map = {value: key for key, value in request.label_map.items()} if request.label_map else None
# 데이터셋 루트 경로 얻기
dataset_root_path = get_dataset_root_path(request.project_id)
# 모델 로드
model = get_model(request)
# 학습할 모델 카테고리, 카테고리가 추가되는 경우 추가 작업 필요
model_categories = model.names
# 데이터 전처리
preprocess_dataset(dataset_root_path, model_categories, request.data, request.ratio, inverted_label_map)
# 학습
results = run_train(request,token,model,dataset_root_path)
# best 모델 저장
model_key = save_model(project_id=request.project_id, path=join_path(dataset_root_path, "result", "weights", "best.pt"))
response = {"model_key": model_key, "results": results.results_dict}
send_slack_message(f"train 성공{response}", status="success")
return response
def preprocess_dataset(dataset_root_path, model_categories, data, ratio, label_map):
try:
# 디렉토리 생성 및 초기화
process_directories(dataset_root_path, model_categories)
# 학습 데이터 분류
train_data, val_data = split_data(data, ratio)
if not train_data or not val_data:
raise HTTPException(status_code=400, detail="data split exception: data size is too small or \"ratio\" has invalid value")
# 학습 데이터 처리
for data in train_data:
process_image_and_label(data, dataset_root_path, "train", label_map)
# 검증 데이터 처리
for data in val_data:
process_image_and_label(data, dataset_root_path, "val", label_map)
except HTTPException as e:
raise e # HTTP 예외를 다시 발생
except Exception as e:
raise HTTPException(status_code=500, detail="preprocess dataset exception: " + str(e))
def run_train(request, token, model, dataset_root_path):
try:
# 데이터 전송 콜백함수
def send_data(trainer):
try:
# 첫번째 epoch는 스킵
if trainer.epoch == 0:
return
# 남은 시간 계산(초)
left_epochs = trainer.epochs - trainer.epoch
left_seconds = left_epochs * trainer.epoch_time
# 로스 box_loss, cls_loss, dfl_loss
loss = trainer.label_loss_items(loss_items=trainer.loss_items)
data = ReportData(
epoch=trainer.epoch, # 현재 에포크
total_epochs=trainer.epochs, # 전체 에포크
seg_loss=loss["train/seg_loss"], # seg loss
cls_loss=loss["train/cls_loss"], # cls loss
dfl_loss=loss["train/dfl_loss"], # dfl loss
fitness=trainer.fitness, # 적합도
epoch_time=trainer.epoch_time, # 지난 에포크 걸린 시간 (에포크 시작 기준으로 결정)
left_seconds=left_seconds # 남은 시간(초)
)
# 데이터 전송
send_data_call_api(request.project_id, request.m_id, data, token)
except Exception as e:
raise HTTPException(status_code=500, detail=f"send_data exception: {e}")
# 콜백 등록
model.add_callback("on_train_epoch_start", send_data)
# 학습 실행
try:
results = model.train(
data=join_path(dataset_root_path, "dataset.yaml"),
name=join_path(dataset_root_path, "result"),
epochs=request.epochs,
batch=request.batch,
lr0=request.lr0,
lrf=request.lrf,
optimizer=request.optimizer
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"model train exception: {e}")
# 마지막 에포크 전송
model.trainer.epoch += 1
send_data(model.trainer)
return results
except HTTPException as e:
raise e # HTTP 예외를 다시 발생
except Exception as e:
raise HTTPException(status_code=500, detail=f"run_train exception: {e}")

View File

@ -20,24 +20,24 @@ def make_dir(path:str, init: bool):
shutil.rmtree(path)
os.makedirs(path, exist_ok=True)
def make_yml(path:str, names):
def make_yml(path:str, model_categories):
data = {
"train": f"{path}/train",
"val": f"{path}/val",
"nc": 80,
"names": names
"names": model_categories
}
with open(os.path.join(path, "dataset.yaml"), 'w') as f:
yaml.dump(data, f)
def process_directories(dataset_root_path:str, names:list[str]):
def process_directories(dataset_root_path:str, model_categories:list[str]):
"""학습을 위한 디렉토리 생성"""
make_dir(dataset_root_path, init=False)
make_dir(os.path.join(dataset_root_path, "train"), init=True)
make_dir(os.path.join(dataset_root_path, "val"), init=True)
if os.path.exists(os.path.join(dataset_root_path, "result")):
shutil.rmtree(os.path.join(dataset_root_path, "result"))
make_yml(dataset_root_path, names)
make_yml(dataset_root_path, model_categories)
def process_image_and_label(data:TrainDataInfo, dataset_root_path:str, child_path:str, label_map:dict[int, int]|None):
"""이미지 저장 및 레이블 파일 생성"""
@ -59,9 +59,12 @@ def process_image_and_label(data:TrainDataInfo, dataset_root_path:str, child_pat
label = json.loads(urllib.request.urlopen(data.data_url).read())
# 레이블 -> 학습용 레이블 데이터 파싱 후 생성
create_detection_train_label(label, label_path, label_map)
if label['task_type'] == "det":
create_detection_train_label(label, label_path, label_map)
elif label["task_type"] == "seg":
create_segmentation_train_label(label, label_path, label_map)
def create_detection_train_label(label:LabelData, label_path:str, label_map:dict[int, int]|None):
def create_detection_train_label(label:dict, label_path:str, label_map:dict[int, int]|None):
with open(label_path, "w") as train_label_txt:
for shape in label["shapes"]:
train_label = []
@ -76,6 +79,16 @@ def create_detection_train_label(label:LabelData, label_path:str, label_map:dict
train_label.append(str((y2 - y1) / label["imageHeight"] )) # 높이
train_label_txt.write(" ".join(train_label)+"\n")
def create_segmentation_train_label(label:dict, label_path:str, label_map:dict[int, int]|None):
with open(label_path, "w") as train_label_txt:
for shape in label["shapes"]:
train_label = []
train_label.append(str(label_map[shape["group_id"]]) if label_map else str(shape["group_id"])) # label Id
for x, y in shape["points"]:
train_label.append(str(x / label["imageWidth"]))
train_label.append(str(y / label["imageHeight"]))
train_label_txt.write(" ".join(train_label)+"\n")
def join_path(path, *paths):
"""os.path.join()과 같은 기능, os import 하기 싫어서 만듦"""
return os.path.join(path, *paths)