딥러닝 (1)

이미지
 의도치 않게 딥러닝 3일차 과제를 하는 중에 구글의 사과를 받는 일이 발생했다. 문제의 코드 일부분은 다음과 같다. fromIPython.displayimportclear_output importmatplotlib.pyplotas plt # 1. Clear all existing plots from memory immediately plt.close( 'all' ) # 2. Setup model model = RegularizedMLP(dropout_rate= 0.3 , use_batchnorm= True ).to(device) optimizer = optim.Adam(model.parameters(), lr= 0.001 ) criterion = nn.BCELoss() # 3. Train - We add a cleanup step immediately after print ( "Training started..." ) model,reg_history = train_model(     model, train_loader, val_loader, criterion, optimizer,     num_epochs= 100 , patience= 20 , device=device ) # 4. THE CRITICAL FIX: Wipe all the "repeated" graphs created during training clear_output(wait= True ) # 5. Show ONLY the final result print ( "=" * 50 ) print ( "FINAL RESULT ONLY" ) print ( "=" * 50 ) plt.figure(figsize=( 10 , 5 )) plot_history(reg_history, "Regularized Model" ) plt.show()

머신 러닝 (30) 미니 프로젝트

  ! pip install catboost optuna shap -q ! pip install koreanize_matplotlib -q import pandas as pd import numpy as np import matplotlib.pyplot as plt import koreanize_matplotlib import seaborn as sns import warnings warnings.filterwarnings( 'ignore' ) from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold, GridSearchCV from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, roc_auc_score, f1_score from xgboost import XGBClassifier from lightgbm import LGBMClassifier from catboost import CatBoostClassifier #하이퍼파라미터 최적화 import optuna #설명 가능한 AI import shap RANDOM_STATE = 42 np.random.seed(RANDOM_STATE) #차트에서 깨짐 현상 방지 plt.rcParams[ 'axes.unicode_minus' ] = False optuna.logging.set_verbosity(optuna.logging.WARNING) from google.colab import files uploaded = files.upload() for fn in uploaded.keys():   print ( f '...

머신 러닝(29) SHAP

  # 필요한 라이브러리 설치 ! pip install shap catboost -q import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings( 'ignore' ) # SHAP import shap shap.initjs()   # JavaScript 시각화 초기화 from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split from xgboost import XGBClassifier from lightgbm import LGBMClassifier from catboost import CatBoostClassifier from sklearn.metrics import accuracy_score, classification_report, confusion_matrix # 랜덤 시드 고정 RANDOM_STATE = 42 np.random.seed(RANDOM_STATE) # 한글 폰트 설정 plt.rcParams[ 'font.family' ] = 'DejaVu Sans' plt.rcParams[ 'axes.unicode_minus' ] = False print ( "라이브러리 임포트 완료!" ) print ( f "SHAP 버전: {shap. __version__ } " ) print ( "Loading Pima Indians Diabetes Dataset..." ) pima = fetch_openml(name= 'diabetes' , version= 1 , as_frame= True ) X = pima.data y =...