大家好,我是小悟。
什麼是大文件上傳
大文件上傳通常指上傳超過幾百MB甚至幾個GB的文件。與普通文件上傳相比,大文件上傳面臨以下挑戰:
- 內存限制 - 一次性加載整個文件到內存會導致內存溢出
- 網絡穩定性 - 上傳過程中網絡中斷需要能夠斷點續傳
- 超時問題 - 長時間上傳可能導致連接超時
- 進度監控 - 需要實時顯示上傳進度
- 文件校驗 - 確保文件完整性和安全性
解決方案:分片上傳
大文件上傳的核心思想是將文件分割成多個小塊,分別上傳,最後在服務器端合併。
前端代碼示例 (HTML + JavaScript)
<!DOCTYPE html>
<html>
<head>
<title>大文件上傳</title>
</head>
<body>
<input type="file" id="fileInput" />
<button onclick="uploadFile()">開始上傳</button>
<div id="progress"></div>
<script>
const CHUNK_SIZE = 2 * 1024 * 1024; // 2MB
async function uploadFile() {
const fileInput = document.getElementById('fileInput');
const file = fileInput.files[0];
if (!file) {
alert('請選擇文件');
return;
}
const totalChunks = Math.ceil(file.size / CHUNK_SIZE);
const fileMd5 = await calculateFileMD5(file);
// 檢查文件是否已上傳過
const checkResult = await checkFileExists(file.name, fileMd5, file.size);
if (checkResult.uploaded) {
alert('文件已存在');
return;
}
let uploadedChunks = checkResult.uploadedChunks || [];
for (let chunkIndex = 0; chunkIndex < totalChunks; chunkIndex++) {
// 跳過已上傳的分片
if (uploadedChunks.includes(chunkIndex)) {
updateProgress(chunkIndex + 1, totalChunks);
continue;
}
const chunk = file.slice(chunkIndex * CHUNK_SIZE, (chunkIndex + 1) * CHUNK_SIZE);
const formData = new FormData();
formData.append('file', chunk);
formData.append('chunkIndex', chunkIndex);
formData.append('totalChunks', totalChunks);
formData.append('fileName', file.name);
formData.append('fileMd5', fileMd5);
try {
await uploadChunk(formData);
updateProgress(chunkIndex + 1, totalChunks);
} catch (error) {
console.error(`分片 ${chunkIndex} 上傳失敗:`, error);
alert('上傳失敗');
return;
}
}
// 所有分片上傳完成,請求合併
await mergeChunks(file.name, fileMd5, totalChunks);
alert('上傳完成');
}
function uploadChunk(formData) {
return fetch('/upload/chunk', {
method: 'POST',
body: formData
}).then(response => {
if (!response.ok) {
throw new Error('上傳失敗');
}
return response.json();
});
}
function checkFileExists(fileName, fileMd5, fileSize) {
return fetch(`/upload/check?fileName=${fileName}&fileMd5=${fileMd5}&fileSize=${fileSize}`)
.then(response => response.json());
}
function mergeChunks(fileName, fileMd5, totalChunks) {
return fetch('/upload/merge', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
fileName: fileName,
fileMd5: fileMd5,
totalChunks: totalChunks
})
}).then(response => response.json());
}
function updateProgress(current, total) {
const progress = document.getElementById('progress');
const percentage = Math.round((current / total) * 100);
progress.innerHTML = `上傳進度: ${percentage}%`;
}
// 計算文件MD5(簡化版,實際應使用更可靠的庫)
async function calculateFileMD5(file) {
// 這裏使用簡單的文件名+大小模擬MD5
// 實際項目中應使用 spark-md5 等庫
return btoa(file.name + file.size).replace(/[^a-zA-Z0-9]/g, '');
}
</script>
</body>
</html>
後端Java代碼示例 (Spring Boot)
1. 配置文件上傳設置
@Configuration
public class UploadConfig {
@Bean
public MultipartConfigElement multipartConfigElement() {
MultipartConfigFactory factory = new MultipartConfigFactory();
factory.setMaxFileSize("10GB");
factory.setMaxRequestSize("10GB");
return factory.createMultipartConfig();
}
}
2. 文件上傳控制器
@RestController
@RequestMapping("/upload")
public class FileUploadController {
@Value("${file.upload-dir:/tmp/uploads}")
private String uploadDir;
/**
* 檢查文件是否存在
*/
@GetMapping("/check")
public ResponseEntity<CheckResult> checkFile(
@RequestParam String fileName,
@RequestParam String fileMd5,
@RequestParam Long fileSize) {
String filePath = Paths.get(uploadDir, fileMd5, fileName).toString();
File file = new File(filePath);
CheckResult result = new CheckResult();
// 如果文件已存在
if (file.exists() && file.length() == fileSize) {
result.setUploaded(true);
return ResponseEntity.ok(result);
}
// 檢查已上傳的分片
String chunkDir = getChunkDir(fileMd5);
File chunkFolder = new File(chunkDir);
if (!chunkFolder.exists()) {
result.setUploaded(false);
result.setUploadedChunks(new ArrayList<>());
return ResponseEntity.ok(result);
}
List<Integer> uploadedChunks = Arrays.stream(chunkFolder.listFiles())
.map(f -> Integer.parseInt(f.getName()))
.collect(Collectors.toList());
result.setUploaded(false);
result.setUploadedChunks(uploadedChunks);
return ResponseEntity.ok(result);
}
/**
* 上傳文件分片
*/
@PostMapping("/chunk")
public ResponseEntity<UploadResult> uploadChunk(
@RequestParam("file") MultipartFile file,
@RequestParam Integer chunkIndex,
@RequestParam Integer totalChunks,
@RequestParam String fileName,
@RequestParam String fileMd5) {
try {
// 創建分片目錄
String chunkDir = getChunkDir(fileMd5);
File chunkFolder = new File(chunkDir);
if (!chunkFolder.exists()) {
chunkFolder.mkdirs();
}
// 保存分片文件
File chunkFile = new File(chunkDir + File.separator + chunkIndex);
file.transferTo(chunkFile);
UploadResult result = new UploadResult();
result.setSuccess(true);
result.setMessage("分片上傳成功");
return ResponseEntity.ok(result);
} catch (Exception e) {
UploadResult result = new UploadResult();
result.setSuccess(false);
result.setMessage("分片上傳失敗: " + e.getMessage());
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(result);
}
}
/**
* 合併文件分片
*/
@PostMapping("/merge")
public ResponseEntity<MergeResult> mergeChunks(@RequestBody MergeRequest request) {
try {
String chunkDir = getChunkDir(request.getFileMd5());
String fileName = request.getFileName();
String filePath = Paths.get(uploadDir, request.getFileMd5(), fileName).toString();
// 創建目標文件
File targetFile = new File(filePath);
File parentDir = targetFile.getParentFile();
if (!parentDir.exists()) {
parentDir.mkdirs();
}
// 合併分片
try (FileOutputStream fos = new FileOutputStream(targetFile)) {
for (int i = 0; i < request.getTotalChunks(); i++) {
File chunkFile = new File(chunkDir + File.separator + i);
try (FileInputStream fis = new FileInputStream(chunkFile)) {
byte[] buffer = new byte[1024];
int len;
while ((len = fis.read(buffer)) > 0) {
fos.write(buffer, 0, len);
}
}
// 刪除分片文件
chunkFile.delete();
}
}
// 刪除分片目錄
new File(chunkDir).delete();
MergeResult result = new MergeResult();
result.setSuccess(true);
result.setMessage("文件合併成功");
result.setFilePath(filePath);
return ResponseEntity.ok(result);
} catch (Exception e) {
MergeResult result = new MergeResult();
result.setSuccess(false);
result.setMessage("文件合併失敗: " + e.getMessage());
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(result);
}
}
private String getChunkDir(String fileMd5) {
return Paths.get(uploadDir, "chunks", fileMd5).toString();
}
}
3. 數據傳輸對象
@Data
public class CheckResult {
private boolean uploaded;
private List<Integer> uploadedChunks;
}
@Data
public class UploadResult {
private boolean success;
private String message;
}
@Data
public class MergeRequest {
private String fileName;
private String fileMd5;
private Integer totalChunks;
}
@Data
public class MergeResult {
private boolean success;
private String message;
private String filePath;
}
4. 應用配置
# application.properties
spring.servlet.multipart.max-file-size=10GB
spring.servlet.multipart.max-request-size=10GB
file.upload-dir=/data/uploads
關鍵技術點
- 分片上傳:將大文件分割成小塊,分別上傳
- 斷點續傳:記錄已上傳的分片,網絡中斷後可以從中斷處繼續
- 文件校驗:通過MD5驗證文件完整性
- 進度監控:實時顯示上傳進度
- 內存優化:流式處理,避免內存溢出
優化建議
- 增加重試機制:網絡異常時自動重試
- 並行上傳:同時上傳多個分片提高速度
- 壓縮傳輸:對分片進行壓縮減少網絡傳輸量
- 安全驗證:添加身份驗證和文件類型檢查
- 分佈式存儲:支持分佈式文件系統存儲
這種方案可以有效解決大文件上傳的各種問題,提供穩定可靠的上傳體驗。
謝謝你看我的文章,既然看到這裏了,如果覺得不錯,隨手點個贊、轉發、在看三連吧,感謝感謝。那我們,下次再見。
您的一鍵三連,是我更新的最大動力,謝謝
山水有相逢,來日皆可期,謝謝閲讀,我們再會
我手中的金箍棒,上能通天,下能探海