本文主要是介绍kaggle竞赛实战7——其他方案之lightgbm,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
本文换种方案,用wrapper+lightgbm建模+TPE调优
接下来是特征筛选过程,此处先择使用Wrapper方法进行特征筛选,通过带入全部数据训练一个LightGBM模型,然后通过观察特征重要性,选取最重要的300个特征。当然,为了进一步确保挑选过程的有效性,此处我们考虑使用交叉验证的方法来进行多轮验证。实际多轮验证特征重要性的过程也较为清晰,我们只需要记录每一轮特征重要性,并在最后进行简单汇总即可。我们可以通过定义如下函数完成该过程:
# Part 1.划分特征名称,删除ID列和标签列
print('feature_select_wrapper...')
label = 'target'
features = train.columns.tolist()
features.remove('card_id')
features.remove('target')
# Step 2.配置lgb参数
# 模型参数
params_initial = {
'num_leaves': 31,
'learning_rate': 0.1,
'boosting': 'gbdt',
'min_child_samples': 20,
'bagging_seed': 2020,
'bagging_fraction': 0.7,
'bagging_freq': 1,
'feature_fraction': 0.7,
'max_depth': -1,
'metric': 'rmse',
'reg_alpha': 0,
'reg_lambda': 1,
'objective': 'regression'
}
# 控制参数
# 提前验证迭代效果或停止
ESR = 30
# 迭代次数
NBR = 10000
# 打印间隔
VBE = 50
import lightgbm as lgb
# 实例化评估器
kf = KFold(n_splits=5, random_state=2020, shuffle=True)
# 创建空容器
fse = pd.Series(0, index=features)
for train_part_index, eval_index in kf.split(train[features], train[label]):
# 封装训练数据集
train_part = lgb.Dataset(train[features].loc[train_part_index],
train[label].loc[train_part_index])
# 封装验证数据集
eval = lgb.Dataset(train[features].loc[eval_index],
train[label].loc[eval_index])
# 在训练集上进行训练,并同时进行验证
bst = lgb.train(params_initial, train_part, num_boost_round=NBR,
valid_sets=[train_part, eval],
valid_names=['train', 'valid'],
early_stopping_rounds=ESR, verbose_eval=VBE)
# 输出特征重要性计算结果,并进行累加
fse += pd.Series(bst.feature_importance(), features)
# Part 4.选择最重要的300个特征
feature_select = ['card_id'] + fse.sort_values(ascending=False).index.tolist()[:300]
print('done')
return train[feature_select + ['target']], test[feature_select]
最后调用该函数
train_LGBM, test_LGBM = feature_select_wrapper(train, test)
part4:使用lightgbm训练,先做超参数搜索
首先设置一部分参数不变,防止后续它被设置为默认值
def params_append(params):
params['feature_pre_filter'] = False
params['objective'] = 'regression'
params['metric'] = 'rmse'
params['bagging_seed'] = 2020
return params
接着进行模型训练
# Part 1.划分特征名称,删除ID列和标签列
label = 'target'
features = train.columns.tolist()
features.remove('card_id')
features.remove('target')
# Part 2.封装训练数据
train_data = lgb.Dataset(train[features], train[label])
# Part 3.内部函数,输入模型超参数输出损失值的函数
def hyperopt_objective(params):
# 创建参数集
params = params_append(params)
print(params)
# 借助lgb的cv过程,输出某一组超参数下损失值的最小值
res = lgb.cv(params, train_data, 1000,
nfold=2,
stratified=False,
shuffle=True,
metrics='rmse',
early_stopping_rounds=20,
verbose_eval=False,
show_stdv=False,
seed=2020)#cv是lgb自带的交叉熵过程
return min(res['rmse-mean'])
#接下来开始搜索参数
params_space = {
'learning_rate': hp.uniform('learning_rate', 1e-2, 5e-1),
'bagging_fraction': hp.uniform('bagging_fraction', 0.5, 1),
'feature_fraction': hp.uniform('feature_fraction', 0.5, 1),
'num_leaves': hp.choice('num_leaves', list(range(10, 300, 10))),
'reg_alpha': hp.randint('reg_alpha', 0, 10),
'reg_lambda': hp.uniform('reg_lambda', 0, 10),
'bagging_freq': hp.randint('bagging_freq', 1, 10),
'min_child_samples': hp.choice('min_child_samples', list(range(1, 30, 5)))
} #uniform表示是连续空间
# Part 5.TPE超参数搜索
params_best = fmin(
hyperopt_objective, #目标函数
space=params_space,
algo=tpe.suggest, #搜索算法
max_evals=30,
rstate=RandomState(2020))
带入训练数据测试函数性能
best_clf = param_hyperopt(train_LGBM)
得到左优参数 best_clf
{'bagging_fraction': 0.9022336069269954,
'bagging_freq': 2,
'feature_fraction': 0.9373662317255621,
'learning_rate': 0.014947332175194025,
'min_child_samples': 5,
'num_leaves': 7,
'reg_alpha': 2,
'reg_lambda': 3.5907566887206896}
part5 正式进入训练部分
# 数据准备过程
label = 'target'
features = train_LGBM.columns.tolist()
features.remove('card_id')
features.remove('target')
# 数据封装
lgb_train = lgb.Dataset(train_LGBM[features], train_LGBM[label])
# 在全部数据集上训练模型
bst = lgb.train(best_clf, lgb_train)
# 在测试集上完成预测
bst.predict(train_LGBM[features])
# 简单查看训练集RMSE
np.sqrt(mean_squared_error(train_LGBM[label], bst.predict(train_LGBM[features])))
# 接下来,对测试集进行预测,并将结果写入本地文件
test_LGBM['target'] = bst.predict(test_LGBM[features])
test_LGBM[['card_id', 'target']].to_csv(\ result/submission_LGBM.csv\ , index=False)
提交到kaggle发现结果不如随机森林,决定再用交叉验证进行均值集成
def train_predict(train, test, params):
label = 'target'
features = train.columns.tolist()
features.remove('card_id')
features.remove('target')
# Part 2.再次申明固定参数与控制迭代参数
params = params_append(params)
ESR = 30
NBR = 10000
VBE = 50
# Part 3.创建结果存储容器
# 测试集预测结果存储器,后保存至本地文件
prediction_test = 0
# 验证集的模型表现,作为展示用
cv_score = []
# 验证集的预测结果存储器,后保存至本地文件
prediction_train = pd.Series()
# Part 3.交叉验证
kf = KFold(n_splits=5, random_state=2020, shuffle=True)
for train_part_index, eval_index in kf.split(train[features], train[label]):
# 训练数据封装
train_part = lgb.Dataset(train[features].loc[train_part_index],
train[label].loc[train_part_index])
# 测试数据封装
eval = lgb.Dataset(train[features].loc[eval_index],
train[label].loc[eval_index])
# 依据验证集训练模型
bst = lgb.train(params, train_part, num_boost_round=NBR,
valid_sets=[train_part, eval],
valid_names=['train', 'valid'],
early_stopping_rounds=ESR, verbose_eval=VBE)
# 测试集预测结果并纳入prediction_test容器
prediction_test += bst.predict(test[features])
# 验证集预测结果并纳入prediction_train容器
prediction_train = prediction_train.append(pd.Series(bst.predict(train[features].loc[eval_index]),
index=eval_index))
# 验证集预测结果
eval_pre = bst.predict(train[features].loc[eval_index])
# 计算验证集上得分
score = np.sqrt(mean_squared_error(train[label].loc[eval_index].values, eval_pre))
# 纳入cv_score容器
cv_score.append(score)
# Part 4.打印/输出结果
# 打印验证集得分与平均得分
print(cv_score, sum(cv_score) / 5)
# 将验证集上预测结果写入本地文件
pd.Series(prediction_train.sort_index().values).to_csv(\ preprocess/train_lightgbm.csv\ , index=False)
# 将测试集上预测结果写入本地文件
pd.Series(prediction_test / 5).to_csv(\ preprocess/test_lightgbm.csv\ , index=False)
# 测试集平均得分作为模型最终预测结果
test['target'] = prediction_test / 5
# 将测试集预测结果写成竞赛要求格式并保存至本地
test[['card_id', 'target']].to_csv(\ result/submission_lightgbm.csv\ , index=False)
return ]
最后去算得分
train_LGBM, test_LGBM = feature_select_wrapper(train, test)
best_clf = param_hyperopt(train_LGBM)
train_predict(train_LGBM, test_LGBM, best_clf)
发现分比之前都有提升。
这篇关于kaggle竞赛实战7——其他方案之lightgbm的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!