Merge branch 'ai/feat/segmentation' into 'ai/develop'

Feat: 세그멘테이션 train response 수정

See merge request s11-s-project/S11P21S002!198
This commit is contained in:
김용수 2024-09-26 21:26:10 +09:00
commit 7bcb4da904

View File

@ -3,6 +3,7 @@ from schemas.predict_request import PredictRequest
from schemas.train_request import TrainRequest from schemas.train_request import TrainRequest
from schemas.predict_response import PredictResponse, LabelData from schemas.predict_response import PredictResponse, LabelData
from schemas.train_report_data import ReportData from schemas.train_report_data import ReportData
from schemas.train_response import TrainResponse
from services.load_model import load_segmentation_model from services.load_model import load_segmentation_model
from services.create_model import save_model from services.create_model import save_model
from utils.dataset_utils import split_data from utils.dataset_utils import split_data
@ -91,40 +92,50 @@ def get_random_color():
@router.post("/train") @router.post("/train")
async def segmentation_train(request: TrainRequest, http_request: Request): async def segmentation_train(request: TrainRequest):
send_slack_message(f"train 요청{request}", status="success") send_slack_message(f"train 요청{request}", status="success")
# Authorization 헤더에서 Bearer 토큰 추출 try:
auth_header = http_request.headers.get("Authorization") # 레이블 맵
token = auth_header.split(" ")[1] if auth_header and auth_header.startswith("Bearer ") else None inverted_label_map = {value: key for key, value in request.label_map.items()} if request.label_map else None
# 레이블 맵 # 데이터셋 루트 경로 얻기
inverted_label_map = {value: key for key, value in request.label_map.items()} if request.label_map else None dataset_root_path = get_dataset_root_path(request.project_id)
# 데이터셋 루트 경로 얻기 # 모델 로드
dataset_root_path = get_dataset_root_path(request.project_id) model = get_model(request)
# 모델 로드 # 학습할 모델 카테고리, 카테고리가 추가되는 경우 추가 작업 필요
model = get_model(request) model_categories = model.names
# 데이터 전처리
preprocess_dataset(dataset_root_path, model_categories, request.data, request.ratio, inverted_label_map)
# 학습할 모델 카테고리, 카테고리가 추가되는 경우 추가 작업 필요 # 학습
model_categories = model.names results = run_train(request, model,dataset_root_path)
# 데이터 전처리
preprocess_dataset(dataset_root_path, model_categories, request.data, request.ratio, inverted_label_map)
# 학습 # best 모델 저장
results = run_train(request,token,model,dataset_root_path) model_key = save_model(project_id=request.project_id, path=join_path(dataset_root_path, "result", "weights", "best.pt"))
result = results.results_dict
# best 모델 저장 response = TrainResponse(
model_key = save_model(project_id=request.project_id, path=join_path(dataset_root_path, "result", "weights", "best.pt")) modelKey=model_key,
precision= result["metrics/precision(M)"],
recall= result["metrics/recall(M)"],
mAP50= result["metrics/mAP50(M)"],
mAP5095= result["metrics/mAP50-95(M)"],
fitness= result["fitness"]
)
send_slack_message(f"train 성공{response}", status="success")
return response
response = {"model_key": model_key, "results": results.results_dict} except HTTPException as e:
raise e
send_slack_message(f"train 성공{response}", status="success") except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return response
def preprocess_dataset(dataset_root_path, model_categories, data, ratio, label_map): def preprocess_dataset(dataset_root_path, model_categories, data, ratio, label_map):
@ -150,7 +161,7 @@ def preprocess_dataset(dataset_root_path, model_categories, data, ratio, label_m
except Exception as e: except Exception as e:
raise HTTPException(status_code=500, detail="preprocess dataset exception: " + str(e)) raise HTTPException(status_code=500, detail="preprocess dataset exception: " + str(e))
def run_train(request, token, model, dataset_root_path): def run_train(request, model, dataset_root_path):
try: try:
# 데이터 전송 콜백함수 # 데이터 전송 콜백함수
def send_data(trainer): def send_data(trainer):
@ -168,7 +179,7 @@ def run_train(request, token, model, dataset_root_path):
data = ReportData( data = ReportData(
epoch=trainer.epoch, # 현재 에포크 epoch=trainer.epoch, # 현재 에포크
total_epochs=trainer.epochs, # 전체 에포크 total_epochs=trainer.epochs, # 전체 에포크
seg_loss=loss["train/seg_loss"], # seg loss box_loss=loss["train/box_loss"], # box loss
cls_loss=loss["train/cls_loss"], # cls loss cls_loss=loss["train/cls_loss"], # cls loss
dfl_loss=loss["train/dfl_loss"], # dfl loss dfl_loss=loss["train/dfl_loss"], # dfl loss
fitness=trainer.fitness, # 적합도 fitness=trainer.fitness, # 적합도
@ -176,7 +187,7 @@ def run_train(request, token, model, dataset_root_path):
left_seconds=left_seconds # 남은 시간(초) left_seconds=left_seconds # 남은 시간(초)
) )
# 데이터 전송 # 데이터 전송
send_data_call_api(request.project_id, request.m_id, data, token) send_data_call_api(request.project_id, request.m_id, data)
except Exception as e: except Exception as e:
raise HTTPException(status_code=500, detail=f"send_data exception: {e}") raise HTTPException(status_code=500, detail=f"send_data exception: {e}")
@ -184,23 +195,19 @@ def run_train(request, token, model, dataset_root_path):
model.add_callback("on_train_epoch_start", send_data) model.add_callback("on_train_epoch_start", send_data)
# 학습 실행 # 학습 실행
try: results = model.train(
results = model.train( data=join_path(dataset_root_path, "dataset.yaml"),
data=join_path(dataset_root_path, "dataset.yaml"), name=join_path(dataset_root_path, "result"),
name=join_path(dataset_root_path, "result"), epochs=request.epochs,
epochs=request.epochs, batch=request.batch,
batch=request.batch, lr0=request.lr0,
lr0=request.lr0, lrf=request.lrf,
lrf=request.lrf, optimizer=request.optimizer
optimizer=request.optimizer )
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"model train exception: {e}")
# 마지막 에포크 전송 # 마지막 에포크 전송
model.trainer.epoch += 1 model.trainer.epoch += 1
send_data(model.trainer) send_data(model.trainer)
return results return results
except HTTPException as e: except HTTPException as e:
@ -211,3 +218,6 @@ def run_train(request, token, model, dataset_root_path):