train_gpt2_fp32.cu

2024-05-10 09:36
文章标签 train cu gpt2 fp32

本文主要是介绍train_gpt2_fp32.cu,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!

源程序

llm.c/test_gpt2_fp32.cu at master · karpathy/llm.c (github.com)

#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include <float.h>
#include <string.h>
#include <unistd.h>#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cublasLt.h>
#include <cooperative_groups.h>
#include <cooperative_groups/reduce.h>
#include "utils.h"
#include "tokenizer.h"#define CEIL_DIV(M, N) (((M) + (N)-1) / (N))void cudaCheck(cudaError_t error, const char *file, int line) {if (error != cudaSuccess) {printf("[CUDA ERROR] at file %s:%d:\n%s\n", file, line,cudaGetErrorString(error));exit(EXIT_FAILURE);}
};
#define cudaCheck(err) (cudaCheck(err, __FILE__, __LINE__))void cublasCheck(cublasStatus_t status, const char *file, int line)
{if (status != CUBLAS_STATUS_SUCCESS) {printf("[cuBLAS ERROR]: %d %s %d\n", status, file, line);exit(EXIT_FAILURE);}
}
#define cublasCheck(status) { cublasCheck((status), __FILE__, __LINE__); }static size_t cublaslt_workspace_size = 32 * 1024 * 1024;
static void* cublaslt_workspace = NULL;
static cublasComputeType_t cublas_compute_type;
cublasHandle_t cublas_handle;
cublasLtHandle_t cublaslt_handle;namespace cg = cooperative_groups;__device__ inline float4 add_float4(const float4& a, const float4& b) {return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}__global__ void encoder_forward_kernel3(float4* out,const int* inp, const float4* wte, const float4* wpe,int B, int T, int C) {int C4 = C / 4;int idx = blockIdx.x * blockDim.x + threadIdx.x;int N = B * T * C4;if (idx < N) {int bt = idx / C4;int b = bt / T;int t = bt % T;int c4 = idx % C4;int ix = inp[b * T + t];out[b * T * C4 + t * C4 + c4] = add_float4(wte[ix * C4 + c4], wpe[t * C4 + c4]);}
}__global__ void encoder_backward_kernel(float* dwte, float* dwpe,const float* dout, const int* inp,int B, int T, int C) {int idx = blockIdx.x * blockDim.x + threadIdx.x;int N = B * T * C;if (idx < N) {int bt = idx / C;int b = bt / T;int t = bt % T;int c = idx % C;int ix = inp[b * T + t];const float* dout_btc = dout + b * T * C + t * C + c;float* dwte_ix = dwte + ix * C + c;float* dwpe_tc = dwpe + t * C + c;atomicAdd(dwte_ix, *dout_btc);atomicAdd(dwpe_tc, *dout_btc);}
}__global__ void layernorm_forward_kernel3(float* __restrict__ out, float* __restrict__ mean, float* __restrict__ rstd,const float*  __restrict__ inp, const float*  __restrict__ weight,const float* __restrict__ bias, int N, int C) {cg::thread_block block = cg::this_thread_block();cg::thread_block_tile<32> warp = cg::tiled_partition<32>(block);int idx = blockIdx.x * warp.meta_group_size() + warp.meta_group_rank();if(idx >= N) {return;}const float* x = inp + idx * C;float sum = 0.0f;for (int i = warp.thread_rank(); i < C; i += warp.size()) {sum += x[i];}sum = cg::reduce(warp, sum, cg::plus<float>{});float m = sum / C;if(warp.thread_rank() == 0 && mean != nullptr) {__stcs(mean + idx, m);}sum = 0.0f;for (int i = warp.thread_rank(); i < C; i += warp.size()) {float diff = x[i] - m;sum += diff * diff;}sum = cg::reduce(warp, sum, cg::plus<float>{});float s = rsqrtf(sum / C + 1e-5f);if(warp.thread_rank() == 0 && rstd != nullptr) {__stcs(rstd + idx, s);}float* o = out + idx * C;for (int c = warp.thread_rank(); c < C; c += warp.size()) {float n = s * (__ldcs(x+c) - m);__stcs(o+c, n * weight[c] + bias[c]);}
}__global__ void permute_kernel(float* q, float* k, float* v,const float* inp,int B, int N, int NH, int d) {int idx = blockIdx.x * blockDim.x + threadIdx.x;if (idx < B * NH * N * d) {int b = idx / (NH * N * d);int rest = idx % (NH * N * d);int nh_ = rest / (N * d);rest = rest % (N * d);int n = rest / d;int d_ = rest % d;int inp_idx = (b * N * 3 * NH * d) + (n * 3 * NH * d) + (0 * NH * d) + (nh_ * d) + d_;q[idx] = __ldcs(&inp[inp_idx]);k[idx] = __ldcs(&inp[inp_idx + NH * d]);v[idx] = __ldcs(&inp[inp_idx + 2 * (NH * d)]);}
}__global__ void permute_kernel_backward(float* dinp,const float* dq, const float* dk, const float* dv,int B, int N, int NH, int d) {int idx = blockIdx.x * blockDim.x + threadIdx.x;if (idx < B * NH * N * d) {int b = idx / (NH * N * d);int rest = idx % (NH * N * d);int nh_ = rest / (N * d);rest = rest % (N * d);int n = rest / d;int d_ = rest % d;int inp_idx = (b * N * 3 * NH * d) + (n * 3 * NH * d) + (0 * NH * d) + (nh_ * d) + d_;dinp[inp_idx] = dq[idx];dinp[inp_idx + NH * d] = dk[idx];dinp[inp_idx + 2 * (NH * d)] = dv[idx];}
}__global__ void unpermute_kernel(float* inp, float *out, int B, int N, int NH, int d) {int idx = blockIdx.x * blockDim.x + threadIdx.x;if (idx < B * NH * N * d) {int b = idx / (NH * N * d);int rest = idx % (NH * N * d);int nh_ = rest / (N * d);rest = rest % (N * d);int n = rest / d;int d_ = rest % d;int other_idx = (b * NH * N * d) + (n * NH * d) + (nh_ * d) + d_;out[other_idx] = __ldcs(&inp[idx]);}
}__global__ void unpermute_kernel_backward(float* dinp, const float *dout, int B, int N, int NH, int d) {int idx = blockIdx.x * blockDim.x + threadIdx.x;if (idx < B * NH * N * d) {int b = idx / (NH * N * d);int rest = idx % (NH * N * d);int nh_ = rest / (N * d);rest = rest % (N * d);int n = rest / d;int d_ = rest % d;int other_idx = (b * NH * N * d) + (n * NH * d) + (nh_ * d) + d_;dinp[idx] = dout[other_idx];}
}__device__ float& vec_at(float4& vec, int index) {return reinterpret_cast<float*>(&vec)[index];
}__device__ float vec_at(const float4& vec, int index) {return reinterpret_cast<const float*>(&vec)[index];
}__global__ void softmax_forward_kernel5(float* out, float inv_temperature, const float* inp, int N, int T) {assert(T % 4  == 0);cg::thread_block block = cg::this_thread_block();cg::thread_block_tile<32> warp = cg::tiled_partition<32>(block);int idx = (gridDim.x - blockIdx.x - 1) * warp.meta_group_size() + warp.meta_group_rank(); if(idx >= N * T) {return;}int own_pos = idx % T;int pos_by_4 = own_pos / 4;const float* x = inp + idx * T;float maxval = -FLT_MAX;float sumval = 0.0f;const float4* x_vec = reinterpret_cast<const float4*>(x);for (int i = warp.thread_rank(); i < pos_by_4; i += warp.size()) {float4 v = x_vec[i];float old_maxval = maxval;for(int k = 0; k < 4; ++k) {maxval = fmaxf(maxval, vec_at(v, k));}sumval *= expf(inv_temperature * (old_maxval - maxval));for(int k = 0; k < 4; ++k) {sumval += expf(inv_temperature * (vec_at(v, k) - maxval));}}if(4*pos_by_4 + warp.thread_rank() <= own_pos) {float old_maxval = maxval;maxval = fmaxf(maxval, x[4*pos_by_4 + warp.thread_rank()]);sumval *= expf(inv_temperature * (old_maxval - maxval));sumval += expf(inv_temperature * (x[4*pos_by_4 + warp.thread_rank()] - maxval));}float global_maxval = cg::reduce(warp, maxval, cg::greater<float>{});sumval *= expf(inv_temperature * (maxval - global_maxval));float sum = cg::reduce(warp, sumval, cg::plus<float>{});float norm = 1.f / sum;for (int i = warp.thread_rank(); i <= own_pos; i += warp.size()) {float ev = expf(inv_temperature * (__ldcs(x + i) - global_maxval));__stcs(out + idx * T + i, ev * norm);}
}__global__ void residual_forward_kernel(float* out, float* inp1, float* inp2, int N) {int idx = blockIdx.x * blockDim.x + threadIdx.x;if (idx < N) {out[idx] = __ldcs(&inp1[idx]) + __ldcs(&inp2[idx]);}
}#define GELU_SCALING_FACTOR sqrtf(2.0f / M_PI)
__global__ void gelu_forward_kernel(float* out, const float* inp, int N) {int i = blockIdx.x * blockDim.x + threadIdx.x;if (i < N) {float xi = inp[i];float cube = 0.044715f * xi * xi * xi;out[i] = 0.5f * xi * (1.0f + tanhf(GELU_SCALING_FACTOR * (xi + cube)));}
}__global__ void gelu_backward_kernel(float* dinp, const float* inp, const float* dout, const int N) {int i = blockIdx.x * blockDim.x + threadIdx.x;if (i < N) {float x = inp[i];float cube = 0.044715f * x * x * x;float tanh_arg = GELU_SCALING_FACTOR * (x + cube);float tanh_out = tanhf(tanh_arg);float coshf_out = coshf(tanh_arg);float sech_out = 1.0f / (coshf_out * coshf_out);float local_grad = 0.5f * (1.0f + tanh_out) + x * 0.5f * sech_out * GELU_SCALING_FACTOR * (1.0f + 3.0f * 0.044715f * x * x);dinp[i] = local_grad * dout[i];}
}__global__ void matmul_backward_bias_kernel4(float* dbias, const float* dout, int B, int T, int OC) {extern __shared__ float smem[]; const int warp_id = threadIdx.x / warpSize; const int lane_id = threadIdx.x % warpSize; const int tl = blockIdx.x * warpSize; const int vstep = blockDim.x / warpSize; const float* dout_col = dout + tl + lane_id;float dout_sum = 0.0f;for (int row = warp_id; row < B * T; row += vstep) {dout_sum += dout_col[row * OC];}smem[lane_id + warp_id * warpSize] = dout_sum;__syncthreads();dout_sum = 0.0f;if (warp_id == 0) {for (int j = 0; j < vstep; j++) {dout_sum += smem[lane_id + j * warpSize];}dbias[tl + lane_id] += dout_sum;}
}__global__ void layernorm_backward_kernel2(float* dinp, float* dweight, float* dbias,const float* dout, const float* inp, const float* weight, const float* mean, const float* rstd,int B, int T, int C) {extern __shared__ float shared[]; namespace cg = cooperative_groups;cg::thread_block block = cg::this_thread_block();cg::thread_block_tile<32> warp = cg::tiled_partition<32>(block);int idx = blockIdx.x * warp.meta_group_size() + warp.meta_group_rank();int N = B * T;if(idx >= N) { return; } // thread guardsint b = idx / T;int t = idx % T;const float* dout_bt = dout + b * T * C + t * C;const float* inp_bt = inp + b * T * C + t * C;float* dinp_bt = dinp + b * T * C + t * C;const float mean_bt = mean[b * T + t];const float rstd_bt = rstd[b * T + t];float* dbias_shared = shared;float* dweight_shared = shared + C;#pragma unrollfor(int i = threadIdx.x; i < C; i+= blockDim.x){dbias_shared[i] = 0.0f;dweight_shared[i] = 0.0f;}__syncthreads();float dnorm_mean = 0.0f;float dnorm_norm_mean = 0.0f;for (int i = warp.thread_rank(); i < C; i  += warp.size()) {float norm_bti = (inp_bt[i] - mean_bt) * rstd_bt;float dnorm_i = weight[i] * dout_bt[i];dnorm_mean += dnorm_i;dnorm_norm_mean += dnorm_i * norm_bti;}dnorm_mean = cg::reduce(warp, dnorm_mean, cg::plus<float>{});dnorm_norm_mean = cg::reduce(warp, dnorm_norm_mean, cg::plus<float>{});dnorm_mean = dnorm_mean / C;dnorm_norm_mean = dnorm_norm_mean / C;for (int i = warp.thread_rank(); i < C; i += warp.size()) {float norm_bti = (inp_bt[i] - mean_bt) * rstd_bt;float dnorm_i = weight[i] * dout_bt[i];atomicAdd(&dbias_shared[i], dout_bt[i]);atomicAdd(&dweight_shared[i], norm_bti * dout_bt[i]);float dval = 0.0f;dval += dnorm_i;dval -= dnorm_mean;dval -= norm_bti * dnorm_norm_mean; dval *= rstd_bt; dinp_bt[i] += dval;}__syncthreads();for(int i = threadIdx.x; i < C; i+= blockDim.x){atomicAdd(&dbias[i], dbias_shared[i]);atomicAdd(&dweight[i], dweight_shared[i]);}
}__global__ void softmax_autoregressive_backward_kernel(float* dpreatt, const float* datt, const float* att,int B, int T, int C, float scale) {constexpr const int BlockSize = 256;constexpr int T_per_block = 4;cg::thread_block block = cg::this_thread_block();cg::thread_block_tile<32> warp = cg::tiled_partition<32>(block);__shared__ float block_acc[32];int idx = blockIdx.y;int t0 = T - 1 - T_per_block*blockIdx.x;att += idx * T * T;datt += idx * T * T;dpreatt += idx * T * T;if (warp.meta_group_rank() == 0) {block_acc[warp.thread_rank()] = 0;}for(int to = 0; to < T_per_block; ++to) {int t = t0 - to;if(t < 0) return;const float* att_bth = att + t * T;const float* datt_bth = datt + t * T;float* dpreatt_bth = dpreatt + t * T;float local_sum = 0;for (int t2 = block.thread_rank(); t2 <= t; t2 += BlockSize) {local_sum += att_bth[t2] * datt_bth[t2];}block_acc[warp.meta_group_rank()] = cg::reduce(warp, local_sum, cg::plus<float>{});block.sync();local_sum = cg::reduce(warp, block_acc[warp.thread_rank()], cg::plus<float>{});for (int t3 = block.thread_rank(); t3 <= t; t3 += BlockSize) {float acc = __ldcs(att_bth + t3) * (__ldcs(datt_bth + t3) - local_sum);__stcs(dpreatt_bth + t3, scale * acc);}}
}__device__ inline float lerp(float start, float end, float weight) {return fma(weight, end, fma(-weight, start, start));
}__global__ void adamw_kernel2(float* params_memory, float* grads_memory, float* m_memory, float* v_memory, long num_parameters,float learning_rate, float beta1, float beta2, float beta1_correction, float beta2_correction, float eps, float weight_decay) {int i = blockIdx.x * blockDim.x + threadIdx.x;if (i >= num_parameters) return; float grad = grads_memory[i];float m = m_memory[i];float v = v_memory[i];// update the first moment (momentum)m = lerp(grad, m, beta1);m_memory[i] = m;// update the second moment (RMSprop)v = lerp(grad * grad, v, beta2);v_memory[i] = v;m /= beta1_correction; v /= beta2_correction; params_memory[i] -= learning_rate * (m / (sqrtf(v) + eps) + weight_decay * params_memory[i]);
}struct SoftmaxParams {float Scale;float Offset;
};__device__ SoftmaxParams prepare_softmax_blockwide_nofloat4(cg::thread_block_tile<32>& warp,int idx, const float* inp, int V, int P) {const float* x = inp + idx * P;float thread_maxval = -INFINITY;float thread_sumval = 0.0f;for (int i = V + threadIdx.x - blockDim.x; i >= 0; i -= blockDim.x) {float v = x[i];float old_maxval = thread_maxval;thread_maxval = fmaxf(thread_maxval, v);thread_sumval *= expf((old_maxval - thread_maxval));thread_sumval += expf(v - thread_maxval);}__shared__ float shared_maxval[32];__shared__ float shared_sumval[32];int num_warps = blockDim.x / 32;int warp_id = threadIdx.x / 32;int lane_id = threadIdx.x % 32;float warp_maxval = cg::reduce(warp, thread_maxval, cg::greater<float>{});if (lane_id == 0) { shared_maxval[warp_id] = warp_maxval; }__syncthreads();warp_maxval = (lane_id < num_warps) ? shared_maxval[lane_id] : -FLT_MAX;float block_maxval = cg::reduce(warp, warp_maxval, cg::greater<float>{});thread_sumval *= expf(thread_maxval - block_maxval);float warp_sumval = cg::reduce(warp, thread_sumval, cg::plus<float>{});if (lane_id == 0) { shared_sumval[warp_id] = warp_sumval; }__syncthreads();warp_sumval = (lane_id < num_warps) ? shared_sumval[lane_id] : 0.0f;float block_sumval = cg::reduce(warp, warp_sumval, cg::plus<float>{});return SoftmaxParams{1.f / block_sumval, block_maxval};
}__global__ void fused_classifier_kernel3(float* logits, float* losses, float* probs,const float* dlosses, const int* targets,int B, int T, int V, int P) {namespace cg = cooperative_groups;cg::thread_block block = cg::this_thread_block();cg::thread_block_tile<32> warp = cg::tiled_partition<32>(block);int idx = blockIdx.x;int ix = targets[idx];SoftmaxParams sp = prepare_softmax_blockwide_nofloat4(warp, idx, logits, V, P);if(threadIdx.x == 0) {float prob = expf(logits[idx * P + ix] - sp.Offset) * sp.Scale;losses[idx] = -logf(prob);}float dloss = dlosses != NULL ? dlosses[idx] : 1.0f / (B*T);const float* logits_vec = logits + idx * P;for (int i = threadIdx.x; i < V; i += blockDim.x) {// this is the 2nd read of logits after the one in prepare_softmax2// this data will never be needed again, so we reduce cache persistencefloat v = __ldcs(&logits_vec[i]);float prob = expf(v - sp.Offset) * sp.Scale;if (probs != NULL) {probs[idx * P + i] = prob;}float indicator = (i == ix) ? 1.0f : 0.0f;logits[idx * P + i] = (prob - indicator) * dloss;}
}void encoder_forward(float* out,const int* inp, const float* wte, const float* wpe,int B, int T, int C) {assert(C % 4 == 0);const int block_size = 512;const int N = B * T * C;const int grid_size = CEIL_DIV(N / 4, block_size);encoder_forward_kernel3<<<grid_size, block_size>>>((float4*) out, inp, (float4*) wte, (float4*) wpe, B, T, C);cudaCheck(cudaGetLastError());
}void encoder_backward(float* dwte, float* dwpe,const float* dout, const int* inp,int B, int T, int C) {const int N = B * T * C;const int block_size = 256;const int grid_size = CEIL_DIV(N, block_size);encoder_backward_kernel<<<grid_size, block_size>>>(dwte, dwpe, dout, inp, B, T, C);cudaCheck(cudaGetLastError());
}void layernorm_forward(float* out, float* mean, float* rstd,float* inp, float* weight, float* bias,int B, int T, int C) {const int block_size = 512;const int N = B * T;const int grid_size = CEIL_DIV(N * 32, block_size);layernorm_forward_kernel3<<<grid_size, block_size>>>(out, mean, rstd, inp, weight, bias, N, C);cudaCheck(cudaGetLastError());
}void matmul_forward_cublaslt(float* out,float* inp, float* weight, float* bias,int B, int T, int C, int OC) {int has_bias = (bias != NULL);if(((uintptr_t)bias % 16) != 0) {printf("Bias pointer is not aligned (cuBLASLt requirement)!\n");exit(EXIT_FAILURE);}int returnedResults = 0;cublasLtMatmulDesc_t operationDesc;cublasLtMatmulPreference_t preference;cublasLtMatrixLayout_t weightLayout;cublasLtMatrixLayout_t inputLayout;cublasLtMatrixLayout_t outputLayout;cublasLtMatrixLayout_t biasLayout;cublasLtMatmulHeuristicResult_t heuristic;cublasOperation_t opNoTranspose = CUBLAS_OP_N;cublasOperation_t opTranspose = CUBLAS_OP_T;cublasLtEpilogue_t epilogueBias = CUBLASLT_EPILOGUE_BIAS;cublasCheck(cublasLtMatmulDescCreate(&operationDesc, cublas_compute_type, CUDA_R_32F));cublasCheck(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_TRANSA, &opTranspose, sizeof(opTranspose)));cublasCheck(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_TRANSB, &opNoTranspose, sizeof(opNoTranspose)));if(has_bias) {cublasCheck(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_EPILOGUE, &epilogueBias,sizeof(epilogueBias)));}cublasCheck(cublasLtMatmulDescSetAttribute(operationDesc, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias)));cublasCheck(cublasLtMatrixLayoutCreate(&weightLayout, CUDA_R_32F, C, OC, C));cublasCheck(cublasLtMatrixLayoutCreate(&inputLayout, CUDA_R_32F, C, B*T, C));cublasCheck(cublasLtMatrixLayoutCreate(&outputLayout, CUDA_R_32F, OC, B*T, OC));cublasCheck(cublasLtMatrixLayoutCreate(&biasLayout, CUDA_R_32F, OC, 1, OC));cublasCheck(cublasLtMatmulPreferenceCreate(&preference));cublasCheck(cublasLtMatmulPreferenceSetAttribute(preference,CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES,&cublaslt_workspace_size, sizeof(cublaslt_workspace_size)));cublasCheck(cublasLtMatmulAlgoGetHeuristic(cublaslt_handle, operationDesc,weightLayout, inputLayout, outputLayout, outputLayout,preference, 1, &heuristic, &returnedResults));if (returnedResults == 0) {printf("No cuBLASLt algorithm: B: %d, T: %d, C: %d, OC: %d, bias: %d\n", B, T, C, OC, has_bias);exit(EXIT_FAILURE);}const float alpha = 1.0f, beta = 0.0f;cublasCheck(cublasLtMatmul(cublaslt_handle, operationDesc,&alpha, weight, weightLayout, inp, inputLayout, &beta,out, outputLayout, out, outputLayout, &heuristic.algo,cublaslt_workspace, cublaslt_workspace_size, 0));cublasCheck(cublasLtMatmulPreferenceDestroy(preference));cublasCheck(cublasLtMatmulDescDestroy(operationDesc));cublasCheck(cublasLtMatrixLayoutDestroy(weightLayout));cublasCheck(cublasLtMatrixLayoutDestroy(inputLayout));cublasCheck(cublasLtMatrixLayoutDestroy(outputLayout));cublasCheck(cublasLtMatrixLayoutDestroy(biasLayout));
}void attention_forward(float* out, float* qkvr, float* att,float* inp,int B, int T, int C, int NH) {const int block_size = 256;const int softmax_block_size = 256;int HS = C / NH; // head sizefloat *q, *k, *v;q = qkvr + 0 * B * T * C;k = qkvr + 1 * B * T * C;v = qkvr + 2 * B * T * C;int total_threads = B * NH * T * HS;int num_blocks = CEIL_DIV(total_threads, block_size);permute_kernel<<<num_blocks, block_size>>>(q, k, v, inp, B, T, NH, HS);cudaCheck(cudaGetLastError());const float alpha = 1.0f;const float beta = 0.0f;float* preatt = inp;cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, T, T, HS, &alpha, k, HS, T * HS, q, HS, T * HS, &beta, preatt, T, T * T, B * NH));float scale = 1.0 / sqrtf(HS);int grid_size = CEIL_DIV(B * NH * T * 32, softmax_block_size);softmax_forward_kernel5<<<grid_size, softmax_block_size>>>(att, scale, preatt, B * NH, T);cudaCheck(cudaGetLastError());float* vaccum = inp;cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, HS, T, T, &alpha, v, HS, T * HS, att, T, T * T, &beta, vaccum, HS, T * HS, B * NH));num_blocks = CEIL_DIV(B * T * C, block_size);unpermute_kernel<<<num_blocks, block_size>>>(vaccum, out, B, T, NH, HS);cudaCheck(cudaGetLastError());
}void residual_forward(float* out, float* inp1, float* inp2, int N) {const int block_size = 256;const int grid_size = CEIL_DIV(N, block_size);residual_forward_kernel<<<grid_size, block_size>>>(out, inp1, inp2, N);cudaCheck(cudaGetLastError());
}void gelu_forward(float* out, const float* inp, int N) {const int block_size = 128;const int grid_size = CEIL_DIV(N, block_size);gelu_forward_kernel<<<grid_size, block_size>>>(out, inp, N);cudaCheck(cudaGetLastError());
}void gelu_backward(float* dinp, const float* inp, const float* dout, const int N) {const int block_size = 128;const int grid_size = CEIL_DIV(N, block_size);gelu_backward_kernel<<<grid_size, block_size>>>(dinp, inp, dout, N);cudaCheck(cudaGetLastError());
}void matmul_backward(float* dinp, float* dweight, float* dbias,float* dout, float* inp, float* weight,int B, int T, int C, int OC) {float one = 1.0f;float zero = 0.0f;cublasCheck(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, C, B*T, OC, &one, weight, C, dout, OC, &zero, dinp, C));cublasCheck(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, C, OC, B*T, &one, inp, C, dout, OC, &one, dweight, C));if (dbias != NULL) {const int block_size = 1024;const int grid_size = OC / 32; matmul_backward_bias_kernel4<<<grid_size, block_size, block_size * sizeof(float)>>>(dbias, dout, B, T, OC);cudaCheck(cudaGetLastError());}
}void layernorm_backward(float* dinp, float* dweight, float* dbias,const float* dout, const float* inp, const  float* weight, const float* mean, const float* rstd,int B, int T, int C) {const int block_size = 512;const int N = B * T;const int grid_size = CEIL_DIV(32*N, block_size);size_t shared_mem_size = 2 * C * sizeof(float);layernorm_backward_kernel2<<<grid_size, block_size, shared_mem_size>>>(dinp, dweight, dbias, dout, inp, weight, mean, rstd, B, T, C);cudaCheck(cudaGetLastError());
}void attention_backward(float* dinp, float* dqkvr, float* dpreatt, float* datt, float* scratch,const float* dout,const float* qkvr, const float* att,int B, int T, int C, int NH) {const int block_size = 256;int HS = C / NH; // head sizeconst float one = 1.0f;const float zero = 0.0f; // note beta = 1.0f so that we accumulate gradients (+=)const float *q, *k, *v;q = qkvr + 0 * B * T * C;k = qkvr + 1 * B * T * C;v = qkvr + 2 * B * T * C;float *dq, *dk, *dv;dq = dqkvr + 0 * B * T * C;dk = dqkvr + 1 * B * T * C;dv = dqkvr + 2 * B * T * C;int num_blocks = CEIL_DIV(B * T * C, block_size);unpermute_kernel_backward<<<num_blocks, block_size>>>(scratch, dout, B, T, NH, HS);cudaCheck(cudaGetLastError());cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, T, T, HS, &one, v, HS, T * HS, scratch, HS, T * HS, &zero, datt, T, T * T, B * NH));cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, HS, T, T, &one, scratch, HS, T * HS, att, T, T * T, &zero, dv, HS, T * HS, B * NH));int hs = C / NH; // head sizefloat scale = 1.0f / sqrtf(hs);softmax_autoregressive_backward_kernel<<<dim3(T / 4, B * NH), 256>>>(dpreatt, datt, att, B, T, C, scale);cudaCheck(cudaGetLastError());cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, HS, T, T, &one, k, HS, T * HS, dpreatt, T, T * T, &zero, dq, HS, T * HS, B * NH));cublasCheck(cublasSgemmStridedBatched(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, HS, T, T, &one, q, HS, T * HS, dpreatt, T, T * T, &zero, dk, HS, T * HS, B * NH));num_blocks = CEIL_DIV(B * NH * T * HS, block_size);permute_kernel_backward<<<num_blocks, block_size>>>(dinp, dq, dk, dv, B, T, NH, HS);cudaCheck(cudaGetLastError());
}void fused_classifier3(float* logits, float* losses,const float* dlosses, const int* targets,int B, int T, int V, int P) {const int block_size = 1024;const int N = B * T;const int grid_size = N;fused_classifier_kernel3<<<grid_size, block_size>>>(logits, losses, NULL, dlosses, targets, B, T, V, P);cudaCheck(cudaGetLastError());
}typedef struct {int max_seq_len; int vocab_size; int padded_vocab_size;int num_layers;int num_heads;int channels; 
} GPT2Config;#define NUM_PARAMETER_TENSORS 16
typedef struct {float* wte; float* wpe; float* ln1w; float* ln1b; float* qkvw; float* qkvb; float* attprojw;float* attprojb;float* ln2w; float* ln2b;float* fcw; float* fcb; float* fcprojw;float* fcprojb; float* lnfw;float* lnfb;
} ParameterTensors;void fill_in_parameter_sizes(size_t* param_sizes, GPT2Config config) {int Vp = config.padded_vocab_size;int C = config.channels;int maxT = config.max_seq_len;int L = config.num_layers;param_sizes[0] = Vp * C; param_sizes[1] = maxT * C; param_sizes[2] = L * C; param_sizes[3] = L * C; param_sizes[4] = L * (3 * C) * C; param_sizes[5] = L * (3 * C); param_sizes[6] = L * C * C; param_sizes[7] = L * C; param_sizes[8] = L * C; param_sizes[9] = L * C; param_sizes[10] = L * (4 * C) * C; param_sizes[11] = L * (4 * C); param_sizes[12] = L * C * (4 * C); param_sizes[13] = L * C; param_sizes[14] = C; param_sizes[15] = C; 
}float* malloc_and_point_parameters(ParameterTensors* params, size_t* param_sizes, int on_device) {size_t num_parameters = 0;for (size_t i = 0; i < NUM_PARAMETER_TENSORS; i++) {num_parameters += param_sizes[i];}float* params_memory;if (on_device) {cudaCheck(cudaMalloc((void**)&params_memory, num_parameters * sizeof(float)));} else {params_memory = (float*)mallocCheck(num_parameters * sizeof(float));}float** ptrs[] = {&params->wte, &params->wpe, &params->ln1w, &params->ln1b, &params->qkvw, &params->qkvb,&params->attprojw, &params->attprojb, &params->ln2w, &params->ln2b, &params->fcw, &params->fcb,&params->fcprojw, &params->fcprojb, &params->lnfw, &params->lnfb};float* params_memory_iterator = params_memory;for (size_t i = 0; i < NUM_PARAMETER_TENSORS; i++) {*(ptrs[i]) = params_memory_iterator;params_memory_iterator += param_sizes[i];}return params_memory;
}#define NUM_ACTIVATION_TENSORS 21
typedef struct {float* encoded; float* ln1; float* ln1_mean; float* ln1_rstd; float* atty; float* att; float* attproj; float* residual2; float* ln2; float* ln2_mean; float* ln2_rstd; float* fch; float* fch_gelu; float* fcproj; float* residual3; float* lnf; float* lnf_mean; float* lnf_rstd; float* losses; float* qkvr; float* output;
} ActivationTensors;void fill_in_activation_sizes(size_t* act_sizes, int B, int T, GPT2Config config) {size_t Vp = config.padded_vocab_size;size_t L = config.num_layers;size_t NH = config.num_heads;size_t C = config.channels;act_sizes[0] = B * T * C; act_sizes[1] = L * B * T * C; act_sizes[2] = L * B * T; act_sizes[3] = L * B * T; act_sizes[4] = L * B * T * C; act_sizes[5] = L * B * NH * T * T; act_sizes[6] = L * B * T * C; act_sizes[7] = L * B * T * C; act_sizes[8] = L * B * T * C; act_sizes[9] = L * B * T; act_sizes[10] = L * B * T; act_sizes[11] = L * B * T * 4*C; act_sizes[12] = L * B * T * 4*C; act_sizes[13] = L * B * T * C; act_sizes[14] = L * B * T * C; act_sizes[15] = B * T * C; act_sizes[16] = B * T; act_sizes[17] = B * T; act_sizes[18] = B * T; act_sizes[19] = L * B * T * 3*C; // qkvract_sizes[20] = B * T * max(3*C, max(NH*T, Vp)); // output / scratch
}#define NUM_BACKWARD_TENSORS 3
typedef struct {float* bt4c; float* preatt; float* residual3; 
} GradActTensors;void fill_in_grad_act_sizes(size_t* act_sizes, int B, int T, GPT2Config config) {size_t NH = config.num_heads;size_t C = config.channels;act_sizes[0] = B * T * 4 * C; act_sizes[1] = B * NH * T * T; act_sizes[2] = B * T * C; 
}float* malloc_and_point(float** targets[], const size_t* act_sizes, int n) {size_t num_activations = 0;for (size_t i = 0; i < n; i++) {num_activations += act_sizes[i];}float* acts_memory;cudaCheck(cudaMalloc((void**)&acts_memory, num_activations * sizeof(float)));float* acts_memory_iterator = acts_memory;for (size_t i = 0; i < n; i++) {*(targets[i]) = acts_memory_iterator;acts_memory_iterator += act_sizes[i];}return acts_memory;
}float* malloc_and_point_activations(ActivationTensors* acts, const size_t* act_sizes) {float** ptrs[] = {&acts->encoded, &acts->ln1, &acts->ln1_mean, &acts->ln1_rstd, &acts->atty,&acts->att, &acts->attproj, &acts->residual2, &acts->ln2, &acts->ln2_mean,&acts->ln2_rstd, &acts->fch, &acts->fch_gelu, &acts->fcproj, &acts->residual3, &acts->lnf,&acts->lnf_mean, &acts->lnf_rstd, &acts->losses, &acts->qkvr, &acts->output};return malloc_and_point(ptrs, act_sizes, NUM_ACTIVATION_TENSORS);
}float* malloc_and_point_backward(GradActTensors* acts, const size_t* act_sizes) {float** ptrs[] = {&acts->bt4c, &acts->preatt, &acts->residual3};return malloc_and_point(ptrs, act_sizes, NUM_BACKWARD_TENSORS);
}typedef struct {GPT2Config config;ParameterTensors params;size_t param_sizes[NUM_PARAMETER_TENSORS];float* params_memory;size_t num_parameters;ParameterTensors grads;float* grads_memory;float* m_memory;float* v_memory;ActivationTensors acts;size_t act_sizes[NUM_ACTIVATION_TENSORS];float* acts_memory;size_t num_activations;GradActTensors grads_acts;size_t num_grad_acts;float* grads_acts_memory;int batch_size; int seq_len;int* inputs; int* targets; float mean_loss; float* cpu_losses; 
} GPT2;void gpt2_build_from_checkpoint(GPT2 *model, const char* checkpoint_path) {FILE *model_file = fopenCheck(checkpoint_path, "rb");int model_header[256];freadCheck(model_header, sizeof(int), 256, model_file);if (model_header[0] != 20240326) { fprintf(stderr, "Bad magic model file\n"); exit(EXIT_FAILURE); }if (model_header[1] != 3) {// was bumped from 1 -> 3 to incorporate the padded vocab sizefprintf(stderr, "Bad version in model file\n");fprintf(stderr, "---> HINT: try to re-run `python train_gpt2.py`\n");exit(EXIT_FAILURE);}model->config.max_seq_len = model_header[2];model->config.vocab_size = model_header[3];model->config.num_layers = model_header[4];model->config.num_heads = model_header[5];model->config.channels = model_header[6];model->config.padded_vocab_size = model_header[7];fill_in_parameter_sizes(model->param_sizes, model->config);size_t num_parameters = 0;for (size_t i = 0; i < NUM_PARAMETER_TENSORS; i++) {num_parameters += model->param_sizes[i];}model->num_parameters = num_parameters;model->params_memory = malloc_and_point_parameters(&model->params, model->param_sizes, 1);float* params_memory_cpu = (float*)mallocCheck(num_parameters * sizeof(float));freadCheck(params_memory_cpu, sizeof(float), num_parameters, model_file);cudaCheck(cudaMemcpy(model->params_memory, params_memory_cpu, num_parameters * sizeof(float), cudaMemcpyHostToDevice));free(params_memory_cpu);fcloseCheck(model_file);model->acts_memory = NULL;model->grads_memory = NULL;model->m_memory = NULL;model->v_memory = NULL;model->grads_acts_memory = NULL;model->inputs = NULL;model->targets = NULL;model->cpu_losses = NULL;model->batch_size = 0;model->seq_len = 0;model->mean_loss = -1.0f; // -1.0f will designate no loss
}void gpt2_forward(GPT2 *model, int* inputs, int* targets, int B, int T) {if (model->params_memory == NULL) {printf("Error: model was not initialized properly.\n");exit(EXIT_FAILURE);}int V = model->config.vocab_size;int Vp = model->config.padded_vocab_size;int L = model->config.num_layers;int NH = model->config.num_heads;int C = model->config.channels;for(int i = 0; i < B * T; i++) {assert(0 <= inputs[i] && inputs[i] < V);if (targets != NULL) {assert(0 <= targets[i] && targets[i] < V);}}if(model->acts_memory == NULL) {model->batch_size = B;model->seq_len = T;fill_in_activation_sizes(model->act_sizes, B, T, model->config);size_t num_activations = 0;for (size_t i = 0; i < NUM_ACTIVATION_TENSORS; i++) {num_activations += model->act_sizes[i];}model->num_activations = num_activations;model->acts_memory = malloc_and_point_activations(&model->acts, model->act_sizes);printf("allocated %zu MiB for activations\n", (num_activations * sizeof(float)) >> 20); cudaCheck(cudaMalloc((void**)&model->inputs, B * T * sizeof(int)));cudaCheck(cudaMalloc((void**)&model->targets, B * T * sizeof(int)));cudaCheck(cudaMallocHost((void**)&model->cpu_losses, B * T * sizeof(float)));} else {if (B != model->batch_size || T != model->seq_len) {printf("Model: B=%d T=%d, Desired: B=%d T=%d\n", model->batch_size, model->seq_len, B, T);exit(EXIT_FAILURE);}}cudaCheck(cudaMemcpy(model->inputs, inputs, B * T * sizeof(int), cudaMemcpyHostToDevice));if (targets != NULL) {cudaCheck(cudaMemcpy(model->targets, targets, B * T * sizeof(int), cudaMemcpyHostToDevice));}ParameterTensors params = model->params; ActivationTensors acts = model->acts;float* residual;encoder_forward(acts.encoded, model->inputs, params.wte, params.wpe, B, T, C); for (int l = 0; l < L; l++) {residual = l == 0 ? acts.encoded : acts.residual3 + (l-1) * B * T * C;float* l_ln1w = params.ln1w + l * C;float* l_ln1b = params.ln1b + l * C;float* l_qkvw = params.qkvw + l * 3*C * C;float* l_qkvb = params.qkvb + l * 3*C;float* l_attprojw = params.attprojw + l * C * C;float* l_attprojb = params.attprojb + l * C;float* l_ln2w = params.ln2w + l * C;float* l_ln2b = params.ln2b + l * C;float* l_fcw = params.fcw + l * 4*C * C;float* l_fcb = params.fcb + l * 4*C;float* l_fcprojw = params.fcprojw + l * C * 4*C;float* l_fcprojb = params.fcprojb + l * C;float* l_ln1 = acts.ln1 + l * B * T * C;float* l_ln1_mean = acts.ln1_mean + l * B * T;float* l_ln1_rstd = acts.ln1_rstd + l * B * T;float* l_qkvr = acts.qkvr + l * B * T * 3*C;float* l_atty = acts.atty + l * B * T * C;float* l_att = acts.att + l * B * NH * T * T;float* l_attproj = acts.attproj + l * B * T * C;float* l_residual2 = acts.residual2 + l * B * T * C;float* l_ln2 = acts.ln2 + l * B * T * C;float* l_ln2_mean = acts.ln2_mean + l * B * T;float* l_ln2_rstd = acts.ln2_rstd + l * B * T;float* l_fch = acts.fch + l * B * T * 4*C;float* l_fch_gelu = acts.fch_gelu + l * B * T * 4*C;float* l_fcproj = acts.fcproj + l * B * T * C;float* l_residual3 = acts.residual3 + l * B * T * C;float* scratch = acts.output;layernorm_forward(l_ln1, l_ln1_mean, l_ln1_rstd, residual, l_ln1w, l_ln1b, B, T, C);matmul_forward_cublaslt(scratch, l_ln1, l_qkvw, l_qkvb, B, T, C, 3*C);attention_forward(l_atty, l_qkvr, l_att, scratch, B, T, C, NH);matmul_forward_cublaslt(l_attproj, l_atty, l_attprojw, l_attprojb, B, T, C, C);residual_forward(l_residual2, residual, l_attproj, B*T*C);layernorm_forward(l_ln2, l_ln2_mean, l_ln2_rstd, l_residual2, l_ln2w, l_ln2b, B, T, C);matmul_forward_cublaslt(l_fch, l_ln2, l_fcw, l_fcb, B, T, C, 4*C);gelu_forward(l_fch_gelu, l_fch, B*T*4*C);matmul_forward_cublaslt(l_fcproj, l_fch_gelu, l_fcprojw, l_fcprojb, B, T, 4*C, C);residual_forward(l_residual3, l_residual2, l_fcproj, B*T*C);}residual = acts.residual3 + (L-1) * B * T * C; // last residual is in residual3layernorm_forward(acts.lnf, acts.lnf_mean, acts.lnf_rstd, residual, params.lnfw, params.lnfb, B, T, C);matmul_forward_cublaslt(acts.output, acts.lnf, params.wte, NULL, B, T, C, Vp);if (targets != NULL) {fused_classifier3(acts.output, acts.losses, NULL, model->targets, B, T, V, Vp);cudaCheck(cudaMemcpy(model->cpu_losses, acts.losses, B * T * sizeof(float), cudaMemcpyDeviceToHost));float mean_loss = 0.0f;for (int i=0; i<B*T; i++) { mean_loss += model->cpu_losses[i]; }mean_loss /= B*T;model->mean_loss = mean_loss;} else {model->mean_loss = -1.0f;}
}void gpt2_zero_grad(GPT2 *model) {if (model->grads_acts_memory != NULL) { cudaCheck(cudaMemset(model->grads_acts_memory, 0, model->num_grad_acts * sizeof(float))); }if (model->grads_memory != NULL) { cudaCheck(cudaMemset(model->grads_memory, 0, model->num_parameters * sizeof(float))); }
}void gpt2_backward(GPT2 *model) {if (model->mean_loss == -1.0f) {printf("Error: must forward with targets before backward\n");exit(EXIT_FAILURE);}if (model->grads_memory == NULL) {model->grads_memory = malloc_and_point_parameters(&model->grads, model->param_sizes, 1);printf("allocated %zu MiB for parameter gradients\n", (model->num_parameters * sizeof(float)) >> 20);size_t bw_act_sizes[NUM_ACTIVATION_TENSORS];GPT2Config cfg = model->config;cfg.num_layers = 1; // copy the configuration but override number of layers to 1fill_in_grad_act_sizes(bw_act_sizes, model->batch_size, model->seq_len, cfg);model->grads_acts_memory = malloc_and_point_backward(&model->grads_acts, bw_act_sizes);model->num_grad_acts = 0;for (int i = 0; i < NUM_BACKWARD_TENSORS; i++) {model->num_grad_acts += bw_act_sizes[i];}printf("allocated %zu MiB for activation gradients\n", (model->num_grad_acts * sizeof(float)) >> 20);gpt2_zero_grad(model);}int B = model->batch_size;int T = model->seq_len;int Vp = model->config.padded_vocab_size;int L = model->config.num_layers;int NH = model->config.num_heads;int C = model->config.channels;ParameterTensors params = model->params; ParameterTensors grads = model->grads;ActivationTensors acts = model->acts;GradActTensors grads_acts = model->grads_acts;matmul_backward(grads_acts.bt4c, grads.wte, NULL, acts.output, acts.lnf, params.wte, B, T, C, Vp);float* residual = acts.residual3 + (L-1) * B * T * C; float* dresidual = grads_acts.residual3; layernorm_backward(dresidual, grads.lnfw, grads.lnfb, grads_acts.bt4c, residual, params.lnfw, acts.lnf_mean, acts.lnf_rstd, B, T, C);for (int l = L-1; l >= 0; l--) {residual = l == 0 ? acts.encoded : acts.residual3 + (l-1) * B * T * C;float* l_ln1w = params.ln1w + l * C;float* l_qkvw = params.qkvw + l * 3*C * C;float* l_attprojw = params.attprojw + l * C * C;float* l_ln2w = params.ln2w + l * C;float* l_fcw = params.fcw + l * 4*C * C;float* l_fcprojw = params.fcprojw + l * C * 4*C;float* dl_ln1w = grads.ln1w + l * C;float* dl_ln1b = grads.ln1b + l * C;float* dl_qkvw = grads.qkvw + l * 3*C * C;float* dl_qkvb = grads.qkvb + l * 3*C;float* dl_attprojw = grads.attprojw + l * C * C;float* dl_attprojb = grads.attprojb + l * C;float* dl_ln2w = grads.ln2w + l * C;float* dl_ln2b = grads.ln2b + l * C;float* dl_fcw = grads.fcw + l * 4*C * C;float* dl_fcb = grads.fcb + l * 4*C;float* dl_fcprojw = grads.fcprojw + l * C * 4*C;float* dl_fcprojb = grads.fcprojb + l * C;float* l_ln1 = acts.ln1 + l * B * T * C;float* l_ln1_mean = acts.ln1_mean + l * B * T;float* l_ln1_rstd = acts.ln1_rstd + l * B * T;float* l_qkvr = acts.qkvr + l * B * T * 3*C;float* l_atty = acts.atty + l * B * T * C;float* l_att = acts.att + l * B * NH * T * T;float* l_residual2 = acts.residual2 + l * B * T * C;float* l_ln2 = acts.ln2 + l * B * T * C;float* l_ln2_mean = acts.ln2_mean + l * B * T;float* l_ln2_rstd = acts.ln2_rstd + l * B * T;float* l_fch = acts.fch + l * B * T * 4*C;float* l_fch_gelu = acts.fch_gelu + l * B * T * 4*C;float* dl_btc = acts.lnf;float* dl_bt4c = grads_acts.bt4c;float* dl_preatt = grads_acts.preatt;float* scratch = acts.output;matmul_backward(dl_bt4c, dl_fcprojw, dl_fcprojb, dresidual, l_fch_gelu, l_fcprojw, B, T, 4*C, C);gelu_backward(dl_bt4c, l_fch, dl_bt4c, B*T*4*C);matmul_backward(dl_btc, dl_fcw, dl_fcb, dl_bt4c, l_ln2, l_fcw, B, T, C, 4 * C);layernorm_backward(dresidual, dl_ln2w, dl_ln2b, dl_btc, l_residual2, l_ln2w, l_ln2_mean, l_ln2_rstd, B, T, C);matmul_backward(dl_btc, dl_attprojw, dl_attprojb, dresidual, l_atty, l_attprojw, B, T, C, C);float* buffer_a = l_atty;float* buffer_b = l_fch;  attention_backward(dl_bt4c, buffer_b, dl_preatt, scratch, buffer_a, dl_btc, l_qkvr, l_att, B, T, C, NH);matmul_backward(dl_btc, dl_qkvw, dl_qkvb, dl_bt4c, l_ln1, l_qkvw, B, T, C, 3 * C);layernorm_backward(dresidual, dl_ln1w, dl_ln1b, dl_btc, residual, l_ln1w, l_ln1_mean, l_ln1_rstd, B, T, C);}encoder_backward(grads.wte, grads.wpe, dresidual, model->inputs, B, T, C);
}void gpt2_update(GPT2 *model, float learning_rate, float beta1, float beta2, float eps, float weight_decay, int t) {if (model->m_memory == NULL) {cudaCheck(cudaMalloc((void**)&model->m_memory, model->num_parameters * sizeof(float)));cudaCheck(cudaMalloc((void**)&model->v_memory, model->num_parameters * sizeof(float)));cudaCheck(cudaMemset(model->m_memory, 0, model->num_parameters * sizeof(float)));cudaCheck(cudaMemset(model->v_memory, 0, model->num_parameters * sizeof(float)));printf("allocated %zu MiB for AdamW optimizer state m\n", (model->num_parameters * sizeof(float)) >> 20);printf("allocated %zu MiB for AdamW optimizer state v\n", (model->num_parameters * sizeof(float)) >> 20);}int block_size = 512;int num_blocks = CEIL_DIV(model->num_parameters, block_size);float beta1_correction = 1.0f - powf(beta1, t);float beta2_correction = 1.0f - powf(beta2, t);adamw_kernel2<<<num_blocks, block_size>>>(model->params_memory, model->grads_memory, model->m_memory, model->v_memory,model->num_parameters,learning_rate, beta1, beta2, beta1_correction, beta2_correction, eps, weight_decay);cudaCheck(cudaGetLastError());
}void gpt2_free(GPT2 *model) {cudaCheck(cudaFree(model->params_memory));cudaCheck(cudaFree(model->grads_memory));cudaCheck(cudaFree(model->m_memory));cudaCheck(cudaFree(model->v_memory));cudaCheck(cudaFree(model->acts_memory));cudaCheck(cudaFree(model->grads_acts_memory));cudaCheck(cudaFree(model->inputs));cudaCheck(cudaFree(model->targets));cudaFreeHost(model->cpu_losses);
}#ifndef TESTING
typedef struct {int B;int T;FILE* tokens_file;long file_size;long current_position;// output memoryint* batch;int* inputs;int* targets;long num_batches;
} DataLoader;void dataloader_init(DataLoader *loader, const char* filename, int B, int T) {loader->B = B;loader->T = T;loader->tokens_file = fopenCheck(filename, "rb");fseekCheck(loader->tokens_file, 0, SEEK_END);loader->file_size = ftell(loader->tokens_file);fseekCheck(loader->tokens_file, 0, SEEK_SET);if (loader->file_size < (B * T + 1) * sizeof(int)) {printf("Error: file size is too small for the batch size and sequence length\n");exit(EXIT_FAILURE);}loader->current_position = 0; cudaMallocHost((void**)&loader->batch, (B * T + 1) * sizeof(int));loader->inputs = loader->batch;loader->targets = loader->batch + 1; loader->num_batches = loader->file_size / (B * T * sizeof(int));
}void dataloader_reset(DataLoader *loader) {loader->current_position = 0;
}void dataloader_next_batch(DataLoader *loader) {int B = loader->B;int T = loader->T;if (loader->current_position + (B*T+1) * sizeof(int) > loader->file_size) {loader->current_position = 0;}fseekCheck(loader->tokens_file, loader->current_position, SEEK_SET);freadCheck(loader->batch, sizeof(int), B*T+1, loader->tokens_file);loader->current_position += B*T * sizeof(int);
}void dataloader_free(DataLoader *loader) {fcloseCheck(loader->tokens_file);cudaFreeHost(loader->batch);
}#define GPT2_EOT 50256unsigned int random_u32(unsigned long long *state) {*state ^= *state >> 12;*state ^= *state << 25;*state ^= *state >> 27;return (*state * 0x2545F4914F6CDD1Dull) >> 32;
}
float random_f32(unsigned long long *state) { return (random_u32(state) >> 8) / 16777216.0f;
}int sample_softmax(const float* logits, int n, float coin) {double norm = 0;for (int i = 0; i < n; i++) {norm += expf(logits[i]);}coin *= norm;float cdf = 0.0f;for (int i = 0; i < n; i++) {cdf += expf(logits[i]);if (coin < cdf) {return i;}}return n - 1; 
}typedef struct {FILE *logfile;int flush_every; // every how many steps to flush the log
} Logger;void logger_init(Logger *logger, const char *filename) {logger->flush_every = 20;logger->logfile = NULL;if (filename != NULL) { logger->logfile = fopenCheck(filename, "w"); }
}void logger_log_val(Logger *logger, int step, float val_loss) {if (logger->logfile != NULL) {fprintf(logger->logfile, "s:%d tel:%.4f\n", step, val_loss);}
}void logger_log_train(Logger *logger, int step, float train_loss) {if (logger->logfile != NULL) {fprintf(logger->logfile, "s:%d trl:%.4f\n", step, train_loss);if (step % 10 == 0) { fflush(logger->logfile); }}
}void logger_free(Logger *logger) {if (logger->logfile != NULL) { fclose(logger->logfile); }
}void error_usage() {fprintf(stderr, "Usage:   ./train_gpt2fp32cu [options]\n");fprintf(stderr, "Example: ./train_gpt2fp32cu -i data/TinyStories -v 100 -s 100 -g 144 -o stories.log\n");fprintf(stderr, "Options:\n");fprintf(stderr, "  -i <string> input dataset prefix (default = data/tiny_shakespeare)\n");fprintf(stderr, "  -o <string> output log file (default = NULL)\n");fprintf(stderr, "  -b <int>    batch size B (default = 4)\n");fprintf(stderr, "  -t <int>    sequence length T (default = 1024)\n");fprintf(stderr, "  -l <float>  learning rate (default = 3e-4f)\n");fprintf(stderr, "  -v <int>    val_loss_every, how often we evaluate val loss (default = 20)\n");fprintf(stderr, "  -m <int>    val_max_batches, up to how many val batches to estimate val loss? (default = 20)\n");fprintf(stderr, "  -s <int>    sample_every, how often we inference the model (default = 20)\n");fprintf(stderr, "  -g <int>    genT, how many steps of inference we do (default = 64)\n");exit(EXIT_FAILURE);
}int main(int argc, char *argv[]) {const char* input_dataset_prefix = "data/tiny_shakespeare"; const char* output_log_file = NULL;int B = 4; int T = 1024; float learning_rate = 3e-4f;int val_loss_every = 20; int val_max_batches = 20; int sample_every = 20; int genT = 64; for (int i = 1; i < argc; i+=2) {if (i + 1 >= argc) { error_usage(); } // must have arg after flagif (argv[i][0] != '-') { error_usage(); } // must start with dashif (strlen(argv[i]) != 2) { error_usage(); } // must be -x (one dash, one letter)// read in the argsif (argv[i][1] == 'i') { input_dataset_prefix = argv[i+1]; }else if (argv[i][1] == 'o') { output_log_file = argv[i+1]; }else if (argv[i][1] == 'b') { B = atoi(argv[i+1]); }else if (argv[i][1] == 't') { T = atoi(argv[i+1]); }else if (argv[i][1] == 'l') { learning_rate = atof(argv[i+1]); }else if (argv[i][1] == 'v') { val_loss_every = atoi(argv[i+1]); }else if (argv[i][1] == 'm') { val_max_batches = atoi(argv[i+1]); }else if (argv[i][1] == 's') { sample_every = atoi(argv[i+1]); }else if (argv[i][1] == 'g') { genT = atoi(argv[i+1]); }else { error_usage(); }}printf("+-----------------------+----------------------------------------------------+\n");printf("| Parameter             | Value                                              |\n");printf("+-----------------------+----------------------------------------------------+\n");printf("| input dataset prefix  | %-50s |\n", input_dataset_prefix);printf("| output log file       | %-50s |\n", output_log_file == NULL ? "NULL" : output_log_file);printf("| batch size B          | %-50d |\n", B);printf("| sequence length T     | %-50d |\n", T);printf("| learning rate         | %-50f |\n", learning_rate);printf("| val_loss_every        | %-50d |\n", val_loss_every);printf("| val_max_batches       | %-50d |\n", val_max_batches);printf("| sample_every          | %-50d |\n", sample_every);printf("| genT                  | %-50d |\n", genT);printf("+-----------------------+----------------------------------------------------+\n");int deviceIdx = 0;cudaCheck(cudaSetDevice(deviceIdx));cudaDeviceProp deviceProp;cudaGetDeviceProperties(&deviceProp, deviceIdx);cublasCheck(cublasCreate(&cublas_handle));cublasCheck(cublasLtCreate(&cublaslt_handle));int enable_tf32 = deviceProp.major >= 8 ? 1 : 0;cublas_compute_type = enable_tf32 ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;cublasMath_t cublas_math_mode = enable_tf32 ? CUBLAS_TF32_TENSOR_OP_MATH : CUBLAS_DEFAULT_MATH;cublasCheck(cublasSetMathMode(cublas_handle, cublas_math_mode));cudaCheck(cudaMalloc(&cublaslt_workspace, cublaslt_workspace_size));printf("| device                | %-50s |\n", deviceProp.name);printf("| TF32                  | %-50s |\n", enable_tf32 ? "enabled" : "disabled");printf("+-----------------------+----------------------------------------------------+\n");GPT2 model;gpt2_build_from_checkpoint(&model, "gpt2_124M.bin");printf("| max_sequence_length T | %-50d |\n", model.config.max_seq_len);printf("| vocab_size V          | %-50d |\n", model.config.vocab_size);printf("| padded_vocab_size Vp  | %-50d |\n", model.config.padded_vocab_size);printf("| num_layers L          | %-50d |\n", model.config.num_layers);printf("| num_heads NH          | %-50d |\n", model.config.num_heads);printf("| channels C            | %-50d |\n", model.config.channels);printf("| num_parameters        | %-50zu |\n", model.num_parameters);printf("+-----------------------+----------------------------------------------------+\n");char train_tokens_filename[128];char val_tokens_filename[128];assert(strlen(input_dataset_prefix) < 100); // being bit lazy here, make sure we don't overflowsprintf(train_tokens_filename, "%s_train.bin", input_dataset_prefix);sprintf(val_tokens_filename, "%s_val.bin", input_dataset_prefix);DataLoader train_loader;dataloader_init(&train_loader, train_tokens_filename, B, T);DataLoader val_loader;dataloader_init(&val_loader, val_tokens_filename, B, T);int train_num_batches = train_loader.num_batches; // let's do 1 epoch by default for nowint val_num_batches = train_loader.num_batches < val_max_batches ? train_loader.num_batches : val_max_batches;printf("| train_num_batches     | %-50d |\n", train_num_batches);printf("| val_num_batches       | %-50d |\n", val_num_batches);printf("+-----------------------+----------------------------------------------------+\n");printf("allocated %d MiB for model parameters\n", (int)round(model.num_parameters * sizeof(float) / (1024 * 1024)));Logger logger;logger_init(&logger, output_log_file);Tokenizer tokenizer;tokenizer_init(&tokenizer, "gpt2_tokenizer.bin");unsigned long long rng_state = 1337;int* gen_tokens = (int*)mallocCheck(B * T * sizeof(int));float* cpu_logits = (float*)mallocCheck(model.config.vocab_size * sizeof(float));struct timespec start, end;double total_sum_iteration_time_s = 0.0;for (int step = 0; step <= train_num_batches; step++) {int last_step = step == train_num_batches;if (step % val_loss_every == 0 || last_step) {float val_loss = 0.0f;dataloader_reset(&val_loader);for (int i = 0; i < val_num_batches; i++) {dataloader_next_batch(&val_loader);gpt2_forward(&model, val_loader.inputs, val_loader.targets, B, T);val_loss += model.mean_loss;}val_loss /= val_num_batches;printf("val loss %f\n", val_loss);logger_log_val(&logger, step, val_loss);}if (step > 0 && step % sample_every == 0 || last_step) {for(int i = 0; i < B * T; ++i) {gen_tokens[i] = GPT2_EOT;}printf("generating:\n---\n");for (int t = 1; t < genT; t++) {gpt2_forward(&model, gen_tokens, NULL, B, T);float* logits = model.acts.output + (t - 1) * model.config.padded_vocab_size;cudaCheck(cudaMemcpy(cpu_logits, logits, model.config.vocab_size * sizeof(float), cudaMemcpyDeviceToHost));float coin = random_f32(&rng_state);int next_token = sample_softmax(cpu_logits, model.config.vocab_size, coin);gen_tokens[t] = next_token;if (tokenizer.init_ok) {const char* token_str = tokenizer_decode(&tokenizer, next_token);safe_printf(token_str);} else {printf("%d ", next_token);}fflush(stdout);}printf("\n---\n");}if (last_step) { break; }clock_gettime(CLOCK_MONOTONIC, &start);dataloader_next_batch(&train_loader);gpt2_forward(&model, train_loader.inputs, train_loader.targets, B, T);gpt2_zero_grad(&model);gpt2_backward(&model);gpt2_update(&model, learning_rate, 0.9f, 0.999f, 1e-8f, 0.0f, step+1);cudaCheck(cudaDeviceSynchronize()); clock_gettime(CLOCK_MONOTONIC, &end);double time_elapsed_s = (end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec) / 1e9;total_sum_iteration_time_s += time_elapsed_s;int tokens_per_second = (B * T) / time_elapsed_s;printf("step %4d/%d: train loss %f (%f ms, %d tok/s)\n", step + 1, train_num_batches, model.mean_loss, time_elapsed_s * 1000, tokens_per_second);logger_log_train(&logger, step, model.mean_loss);}printf("total average iteration time: %f ms\n", total_sum_iteration_time_s / train_num_batches * 1000);dataloader_free(&train_loader);dataloader_free(&val_loader);tokenizer_free(&tokenizer);gpt2_free(&model);free(cpu_logits);free(gen_tokens);cudaCheck(cudaFree(cublaslt_workspace));cublasCheck(cublasDestroy(cublas_handle));cublasCheck(cublasLtDestroy(cublaslt_handle));logger_free(&logger);return 0;
}
#endif

解读

这段代码是一个使用C语言编写的训练GPT-2模型的程序,它利用了NVIDIA的CUDA平台进行GPU加速。GPT-2是一个基于Transformer架构的自然语言处理模型,常用于文本生成任务。这个程序包含了数据加载、模型构建、前向传播、反向传播、参数更新以及模型保存等功能。下面是对程序主要部分的分析:

  1. 头文件包含:程序开始处包含了多个头文件,这些头文件提供了访问标准库、数学库、时间库、断言、浮点数精度、字符串操作以及UNIX系统调用的功能。

  2. CUDA和cuBLAS库:程序使用了CUDA运行时API和cuBLAS库,这是NVIDIA提供的一些用于GPU加速计算的库。

  3. 自定义宏定义:例如CEIL_DIV用于计算除法的上限,cudaCheckcublasCheck用于检查CUDA和cuBLAS操作的错误。

  4. 设备函数和内核:程序中定义了一些设备函数(在GPU上执行的函数)和内核(可以在GPU上并行执行的函数),例如encoder_forward_kernel3encoder_backward_kernel等,这些内核函数实现了模型的前向传播和反向传播计算。

  5. 前向传播和反向传播函数:程序实现了encoder_forwardencoder_backwardlayernorm_forwardmatmul_forward_cublaslt等函数,这些函数用于执行模型的前向传播。matmul_backwardlayernorm_backward等函数用于执行模型的反向传播。

  6. 注意力机制:程序实现了注意力机制的前向和后向传播,这是Transformer模型的核心部分。

  7. 优化器:程序实现了AdamW优化器的adamw_kernel2内核函数,用于更新模型的参数。

  8. 模型构建gpt2_build_from_checkpoint函数用于从检查点文件中恢复模型的参数。

  9. 模型前向传播gpt2_forward函数执行模型的前向传播,并计算损失。

  10. 模型反向传播和参数更新gpt2_backward函数执行模型的反向传播,计算梯度;gpt2_update函数使用AdamW优化器更新模型参数。

  11. 数据加载器DataLoader结构和相关函数用于从文件中加载训练和验证数据。

  12. 日志记录Logger结构和相关函数用于记录训练过程中的损失和其他信息。

  13. 主函数main函数是程序的入口点,它初始化模型、数据加载器、日志记录器,然后进入训练循环。在循环中,它执行数据加载、模型前向传播、损失计算、反向传播、参数更新,并定期记录验证损失和生成文本样本。

  14. 错误处理和参数解析:程序提供了错误处理和命令行参数解析,以便于用户自定义训练参数。

  15. 随机数生成:程序实现了随机数生成器,用于文本生成过程中的随机抽样。

  16. Tokenizer:程序使用了Tokenizer来处理文本数据,将文本转换为模型可以理解的数字ID。

整体来看,这个程序是一个完整的训练循环实现,包括数据加载、模型构建、训练、验证和文本生成。它展示了如何使用CUDA和cuBLAS库在GPU上执行复杂的机器学习模型训练。

这个C语言程序是一个用于训练GPT-2模型的完整示例,它使用了NVIDIA CUDA和cuBLAS库来实现GPU加速。程序的主要目的是展示如何在GPU上使用这些技术来训练一个基于Transformer架构的语言模型。以下是对程序主要部分的解读:

  1. 头文件和库:程序包含了多个C标准库头文件,以及CUDA运行时API、cuBLAS库和合作组(cooperative groups)库,后者用于在CUDA内核中实现更高效的并行计算。

  2. 宏定义CEIL_DIV宏用于计算除法的上限。cudaCheckcublasCheck宏用于检查CUDA和cuBLAS操作的错误,并在出错时退出程序。

  3. CUDA和cuBLAS环境设置:程序初始化了CUDA和cuBLAS环境,包括创建句柄和分配工作空间。

  4. 设备函数:定义了一些设备函数,如add_float4,这些函数在GPU上执行,用于进行特定的数学运算。

  5. CUDA内核:程序包含多个CUDA内核函数,例如encoder_forward_kernel3encoder_backward_kernellayernorm_forward_kernel3等,这些内核实现了模型的前向传播和反向传播计算。

  6. 前向传播和反向传播函数:程序实现了encoder_forwardencoder_backwardlayernorm_forwardmatmul_forward_cublaslt等函数,用于执行模型的前向传播和反向传播。

  7. 注意力机制:实现了注意力机制的前向和后向传播,这是Transformer模型的核心部分。

  8. 优化器:实现了AdamW优化器的adamw_kernel2内核函数,用于更新模型的参数。

  9. 模型构建和初始化gpt2_build_from_checkpoint函数用于从检查点文件中恢复模型的参数。

  10. 模型前向传播gpt2_forward函数执行模型的前向传播,并计算损失。

  11. 模型反向传播和参数更新gpt2_backward函数执行模型的反向传播,计算梯度;gpt2_update函数使用AdamW优化器更新模型参数。

  12. 数据加载器DataLoader结构和相关函数用于从文件中加载训练和验证数据。

  13. 日志记录Logger结构和相关函数用于记录训练过程中的损失和其他信息。

  14. 主函数main函数是程序的入口点,它初始化模型、数据加载器、日志记录器,然后进入训练循环。在循环中,它执行数据加载、模型前向传播、损失计算、反向传播、参数更新,并定期记录验证损失和生成文本样本。

  15. Tokenizer:程序使用了Tokenizer来处理文本数据,将文本转换为模型可以理解的数字ID。

  16. 命令行参数解析:程序提供了命令行参数解析,以便于用户自定义训练参数。

  17. 随机数生成:程序实现了随机数生成器,用于文本生成过程中的随机抽样。

  18. 性能测量:程序使用clock_gettime来测量训练步骤的执行时间,并计算每秒处理的令牌数。

  19. 释放资源:在训练结束后,程序会释放所有分配的资源,包括CUDA内存、文件句柄和日志文件。

这个程序是一个完整的训练循环实现,展示了如何使用CUDA和cuBLAS库在GPU上执行复杂的机器学习模型训练。程序的结构清晰,包含了训练过程中的所有关键步骤,是一个学习如何在GPU上进行深度学习模型训练的很好的示例。

这篇关于train_gpt2_fp32.cu的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!



http://www.chinasem.cn/article/976063

相关文章

【chatgpt】train_split_test的random_state

在使用train_test_split函数划分数据集时,random_state参数用于控制随机数生成器的种子,以确保划分结果的可重复性。这样,无论你运行多少次代码,只要使用相同的random_state值,得到的训练集和测试集划分就会是一样的。 使用 train_test_split 示例 以下是一个示例,展示如何使用train_test_split函数进行数据集划分,并设置random_s

tf.train.batch和tf.train.shuffle_batch的理解

capacity是队列的长度 min_after_dequeue是出队后,队列至少剩下min_after_dequeue个数据 假设现在有个test.tfrecord文件,里面按从小到大顺序存放整数0~100 1. tf.train.batch是按顺序读取数据,队列中的数据始终是一个有序的队列, 比如队列的capacity=20,开始队列内容为0,1,..,19=>读取10条记录后,队列剩下10,

ValueError: Shape must be rank 0 but is rank 1 for 'train_data/ReadFile' (op: 'ReadFile') with input

使用函数tf.train.slice_input_producer读取文件时, input_queue = tf.train.slice_input_producer([flist], shuffle=self.shuffle,seed=0123, num_epochs=self.num_epochs)input_file = tf.read_file(input_queue) 出现错误:

train订票系统优化最终版

.h文件#define _CRT_SECURE_NO_WARNINGS#include <stdio.h>#include <stdlib.h>#include <string.h>struct train {//车次的属性int id;char name[50];int remainTickets;};struct node {//普通节点的属性struct node *next;st

caffe CNN train_val.prototxt 神经网络参数配置说明

name: "CaffeNet"layer {#输入层,即数据层#数据层的类型除了Database外,还可以是In-Memory、HDF5 Input、HDF5 Output、Images、Windows、Dummyname: "data"type: "Data"top: "data"top: "label"include {phase: TRAIN#表示仅在训练阶段包括进去

Sklearn工具包---train_test_split随机划分训练集和测试集

一般形式: train_test_split是交叉验证中常用的函数,功能是从样本中随机的按比例选取train data和test data,形式为: X_train,X_test, y_train, y_test = cross_validation.train_test_split(train_data,train_target,test_size=0.4, random_stat

model.train及model.eval

链接:model.train()及model.eval()

tensorflow中 tf.train.slice_input_producer() 函数和 tf.train.batch() 函数

原创:https://blog.csdn.net/dcrmg/article/details/79776876 别人总结的转载方便自己以后看 tensorflow数据读取机制 tensorflow中为了充分利用GPU,减少GPU等待数据的空闲时间,使用了两个线程分别执行数据读入和数据计算。 具体来说就是使用一个线程源源不断的将硬盘中的图片数据读入到一个内存队列中,另一个线程负责

tf.estimator.train_and_evaluate() 训练与测试不一致

问题背景 以一个简单的分类任务为例,在处理完数据之后,使用如下code进行训练: estimator = tf.estimator.Estimator(model_fn, 'model', cfg, params)train_spec = tf.estimator.TrainSpec(input_fn=train_inpf, hooks=[])eval_spec = tf.estimato

UVa - 10194 - Train Swapping

啦啦啦啦啦啦啦啦啦啦啦啦啦啦啦啦啦啦啦啦啦啦啦啦        怀着无比激动的心情。   某大神助攻。 不然就废了 收:strcasecmp(s1, s2);  题目大意: 多支队伍比赛。 记录每支队伍 1总共赢的分数(赢了得3分,平得1分,输不得分) 2共比赛的场数 3共赢的常数 4共平的场数 5共输的场数 6赢球数与输球数之差 7共赢的球数(输的场中