基于Ruoyi Vue3版本的文件切片上传

如题,框架使用的是Ruoyi的前后端分离版本,前端框架是elementPlus,后端是springboot
源码地址:https://github.com/yangzongzhuan/RuoYi-Vue3

1.前端部分

前端拆成单个组件的形式,需要使用直接引入即可
前端代码借鉴:https://v3u.cn/a_id_175
需要额外安装spark-md5依赖

npm install spark-md5

index.vue

<template>
	<div class="container">
		<el-button type="primary" @click="() => { state.uploadVisible = true }">
			<el-icon>
				<UploadFilled />
			</el-icon>&nbsp;&nbsp;点击上传
		</el-button>
		<el-dialog class="dialog" v-model="state.uploadVisible" title="文件上传" width="24%" center>
			<el-upload class="upload" drag :http-request="upload" :ref="upload" :action="uploadUrl"
				:disabled="state.disabled" :data="state.uploadData" :headers="headers" :on-error="onError"
				:before-remove="beforeRemove" name="file">
				<el-icon class="el-icon--upload">
					<upload-filled />
				</el-icon>
				<div class="el-upload__text">
					拖动或 <em>点击上传</em>
				</div>
				<template #tip>
					<div class="el-upload__tip">
						<el-icon><InfoFilled /></el-icon>支持上传大文件
					</div>
				</template>
			</el-upload>
		</el-dialog>
	</div>
</template>

<script setup name="ChunkUpload">
import { getToken } from "@/utils/auth";
import { defineStore } from 'pinia'
import SparkMD5 from "spark-md5";
import axios from "axios";

const chunkStore = defineStore('chunk', {
	state: () => ({
		chunkUploadXhr: []
	}),
	actions: {

	}
});

const state = reactive({
	uploadVisible: false,
	disabled: false,
	uploadData: {},
})

const headers = ref({ Authorization: "Bearer " + getToken() });

const props = defineProps({
	uploadUrl: {
		type: String,
		default: '',
	},
	mergeUrl: {
		type: String,
		default: '',
	},
});

const onError = (err, file, fileList) => {
	uploadStatus = "exception";
	chunkStore().chunkUploadXhr.forEach(item => {
		item.abort()
	})
	ElMessage.error('文件上传失败,请重试');
	state.disabled = false;
}
const beforeRemove = (file) => {
	// 如果正在分片上传,则取消分片上传  
	if (file.percentage !== 100) {
		chunkStore().chunkUploadXhr.forEach(item => {
			item.abort()
		})
	}
}

//错误信息
const getError = (action, option, xhr) => {
	let msg;
	if (xhr.response) {
		msg = `${xhr.response.error || xhr.response}`;
	} else if (xhr.responseText) {
		msg = `${xhr.responseText}`;
	} else {
		msg = `fail to post ${action} ${xhr.status}`;
	}
	const err = new Error(msg);
	err.status = xhr.status;
	err.method = "post";
	err.url = action;
	return err;
}
// 上传成功完成合并之后,获取服务器返回的json
const getBody = (xhr) => {
	const text = xhr.responseText || xhr.response;
	if (!text) {
		return text;
	}
	try {
		return JSON.parse(text);
	} catch (e) {
		return text;
	}
}

// 分片上传的自定义请求,以下请求会覆盖element的默认上传行为
const upload = (option) => {
	if (!option) {
		return;
	}
	if (typeof XMLHttpRequest === "undefined") {
		return;
	}
	if (!option.file) {
		return;
	}
	const spark = new SparkMD5.ArrayBuffer(); // md5的ArrayBuffer加密类
	const fileReader = new FileReader(); // 文件读取类
	const action = option.action; // 文件上传上传路径
	const chunkSize = 1024 * 1024 * 1; // 单个分片大小,这里测试用1m
	let md5 = ""; // 文件的唯一标识
	const optionFile = option.file; // 需要分片的文件
	let fileChunkedList = []; // 文件分片完成之后的数组
	const percentage = []; // 文件上传进度的数组,单项就是一个分片的进度

	// 文件开始分片,push到fileChunkedList数组中, 并用第一个分片去计算文件的md5
	for (let i = 0; i < optionFile.size; i = i + chunkSize) {
		const tmp = optionFile.slice(i, Math.min(i + chunkSize, optionFile.size));
		if (i === 0) {
			fileReader.readAsArrayBuffer(tmp);
		}
		fileChunkedList.push(tmp);
	}

	// 在文件读取完毕之后,开始计算文件md5,作为文件唯一标识
	fileReader.onload = async (e) => {
		state.disabled = true;
		spark.append(e.target.result);
		md5 = spark.end() + new Date().getTime();
		// 将fileChunkedList转成FormData对象,并加入上传时需要的数据
		fileChunkedList = fileChunkedList.map((item, index) => {
			const formData = new FormData();
			if (option.data) {
				// 额外加入外面传入的data数据
				Object.keys(option.data).forEach((key) => {
					formData.append(key, option.data[key]);
				});
				// 这些字段看后端需要哪些,就传哪些,也可以自己追加额外参数
				formData.append(option.filename, item, option.file.name); // 文件
				formData.append("chunkNumber", index + 1); // 当前文件块
				formData.append("chunkSize", chunkSize); // 单个分块大小
				formData.append("currentChunkSize", item.size); // 当前分块大小
				formData.append("totalSize", optionFile.size); // 文件总大小
				formData.append("identifier", md5); // 文件标识
				formData.append("filename", option.file.name); // 文件名
				formData.append("totalChunks", fileChunkedList.length); // 总块数
			}
			return { formData: formData, index: index };
		});

		// 更新上传进度条百分比的方法
		function updataPercentage(e) {
			let loaded = 0; // 当前已经上传文件的总大小
			percentage.forEach((item) => {
				loaded += item;
			});
			e.percent = (loaded / optionFile.size).toFixed(1) * 100;
			option.onProgress(e);
		};

		// 创建队列上传任务,limit是上传并发数,默认会用两个并发
		function sendRequest(chunks, limit = 2) {
			return new Promise((resolve, reject) => {
				const len = chunks.length;
				let counter = 0;
				let isStop = false;
				const start = async () => {
					if (isStop) {
						return;
					}
					const item = chunks.shift();
					if (item) {
						const xhr = new XMLHttpRequest();
						const index = item.index;
						// 分片上传失败回调
						xhr.onerror = function error(e) {
							console.log("->", e);
							isStop = true;
							reject(e);
						};
						// 分片上传成功回调
						xhr.onload = function onload() {
							if (xhr.status < 200 || xhr.status >= 300) {
								isStop = true;
								reject(getError(action, option, xhr));
							}
							if (counter === len - 1) {
								// 最后一个上传完成
								resolve();
							} else {
								counter++;
								start();
							}
						};
						// 分片上传中回调
						if (xhr.upload) {
							xhr.upload.onprogress = function progress(e) {
								if (e.total > 0) {
									e.percent = (e.loaded / e.total) * 100;
								}
								percentage[index] = e.loaded;
								updataPercentage(e);
							};
						}
						xhr.open("post", action, true);
						if (option.withCredentials && "withCredentials" in xhr) {
							xhr.withCredentials = true;
						}
						const headers = option.headers || {};
						for (const item in headers) {
							if (headers.hasOwnProperty(item) && headers[item] !== null) {
								xhr.setRequestHeader(item, headers[item]);
							}
						}
						// 文件开始上传
						xhr.send(item.formData);
					}
				};
				while (limit > 0) {
					setTimeout(() => {
						start();
					}, Math.random() * 1000);
					limit -= 1;
				}
			});
		}
		try {
			// 调用上传队列方法 等待所有文件上传完成
			await sendRequest(fileChunkedList, 2);
			// 这里的参数根据自己实际情况写
			const data = {
				identifier: md5,
				fileName: option.file.name,
				totalSize: optionFile.size,
			};
			// 给请求甚至headers,加入token
			axios.defaults.headers = {
				"Content-Type": "application/json",
				...option.headers,
			};
			// 给后端发送文件合并请求
			const fileInfo = await axios(
				{
					method: "post",
					url: "http://localhost:8080/common/chunk/merge/",
					data: data,
				}
			).catch((error) => {
				console.log("ERRRR:: ", error.response.data);
			});
			if (fileInfo.data.code === 200) {
				const success = getBody(fileInfo.request);
				option.onSuccess(success);
				state.disabled = false;
				return;
			}
		} catch (error) {
			console.log(error);
			option.onError(error);
		}
	};
}

</script>

<style scoped lang="scss">
</style>

引用示例:
test.vue

<template>
<div>
	<h1>切片上传</h1>
	<ChunkUpload :uploadUrl="'http://localhost:8080/common/chunk/upload/'" 
    :mergeUrl="'http://localhost:8080/common/chunk/merge/'"
    :uploadData="uploadData"></ChunkUpload>
</div>
</template>
<script setup name="test">
import ChunkUpload from "@/components/ChunkUpload/index.vue";

const data = reactive({
	uploadData:{}
})

</script>
<style scoped lang="scss">
</style>

注意传入切片上传url和文件合并url即可

2.后端代码

controller

/**
     * 文件切片上传
     */
    @PostMapping("/chunk/upload")
    public AjaxResult chunkUpload(ChunkUploadDTO dto, MultipartFile file) throws Exception {
        // 上传文件路径
        // 上传并返回新文件名称
        String filePath = FileUploadUtils.chunkUpload(file, dto.getChunkNumber(),dto.getIdentifier());
        chunkUploadService.chunkUpload(dto.getChunkNumber(), dto.getChunkSize(), dto.getCurrentChunkSize(),
                dto.getTotalSize(), dto.getIdentifier(), file.getOriginalFilename(),file.getOriginalFilename(), dto.getTotalChunks(), filePath);
        return AjaxResult.success();
    }
    
    /**
     * 文件切片合并
     */
    @PostMapping("/chunk/merge")
    public AjaxResult chunkMerge(@RequestBody ChunkMergeDTO dto) throws Exception {
        chunkUploadService.merge(dto.getIdentifier());
        return AjaxResult.success();
    }

service

/**
     * 块上传
     *
     * @param chunkNumber      块数量
     * @param chunkSize        块大小
     * @param currentChunkSize 当前块大小
     * @param totalSize        总大小
     * @param identifier       标识符
     * @param fileName         文件名称
     * @param originFileName   源文件名称
     * @param totalChunks      总块
     * @param filePath         文件路径
     */
    public void chunkUpload(long chunkNumber, long chunkSize, long currentChunkSize, long totalSize,
                            String identifier, String fileName, String originFileName, long totalChunks, String filePath) {
        SegmentFile segment = new SegmentFile();
        segment.setFilePath(filePath);
        segment.setFileName(fileName);
        segment.setOrginFileName(originFileName);
        segment.setFileSize(totalSize);
        segment.setSegmentIndex(chunkNumber);
        segment.setSegmentSize(chunkSize);
        segment.setSegmentTotal(totalChunks);
        segment.setMd5Key(identifier);
        service.insertSegmentFile(segment);
    }
    
    /**
     * 合并文件
     *
     * @param identifier 标识符
     */
    public void merge(String identifier) {
        SegmentFile segment = new SegmentFile();
        segment.setMd5Key(identifier);
        List<SegmentFile> segments = service.selectSegmentFileList(segment);
        if (segments.isEmpty()) {
            throw new RuntimeException("segments is empty!");
        }
        String baseDir = segments.get(0).getFilePath();
        // 文件分片所在的文件夹
        File chunkFileFolder = new File(baseDir);
        // 得到文件分片所在的文件夹下的所有文件
        File[] chunks = chunkFileFolder.listFiles();
        for (File chunk : chunks) {
            System.out.println(chunk.getName());
        }
        chunks = Arrays.stream(chunks).sorted(Comparator.comparingInt(o -> Integer.valueOf(o.getName()))).toArray(File[]::new);
        assert chunks != null;
        // 合并文件
        File mergeFile = new File(baseDir + File.separator + segments.get(0).getOrginFileName());
        RandomAccessFile raf = null;
        InputStream is = null;
        SequenceInputStream sis = null;
        try {
            raf = new RandomAccessFile(mergeFile, "rw");
            Vector<InputStream> vt = new Vector<>();
            for (File chunkFile : chunks) {
                is = new FileInputStream(chunkFile);
                vt.addElement(is);
            }
            sis = new SequenceInputStream(vt.elements());
            int len = 0;
            byte[] buff = new byte[1024];
            while ((len = sis.read(buff)) != -1) {
                raf.write(buff, 0, len);
            }
            // 删除切片的文件
            for (File chunk : chunks) {
                chunk.delete();
            }
        } catch (IOException e) {
            throw new RuntimeException(e);
        } finally {
            try {
                if (sis != null) {
                    sis.close();
                }
                if (is != null) {
                    is.close();
                }
                if (raf != null) {
                    raf.close();
                }
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    }

我的service中块上传采用的是缓存到数据库的形式,其实这样做并不好,可以考虑缓存到redis里面,表结构的sql如下:

CREATE TABLE `segment_file` (
  `id` bigint NOT NULL AUTO_INCREMENT COMMENT '自增主键ID',
  `file_path` varchar(200) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL COMMENT '文件缓存地址',
  `file_name` varchar(200) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL COMMENT '文件名',
  `orgin_file_name` varchar(200) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL COMMENT '文件原始名',
  `file_size` double DEFAULT NULL COMMENT '文件大小',
  `segment_index` int DEFAULT NULL COMMENT '片编号',
  `segment_size` int DEFAULT NULL COMMENT '片大小',
  `segment_total` int DEFAULT NULL COMMENT '片总数',
  `md5_key` varchar(200) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL COMMENT 'md5标识',
  PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=149 DEFAULT CHARSET=utf8mb3 COMMENT='文件分片上传';

后端的核心思想

前端会将文件切片,如果一个文件有12M,前端设置每片大小1M,则一共有12个编号,每个切片都会携带当前切片的自增编号和MD5标识到后台(编号用于后期拼接文件的时候需要按顺序拼接,MD5标识用于区分当前的上传,一个上传过程,不论多少个切片,MD5标识都是同一个)。
后端在合并之前要缓存这些切片文件,并记住这些切片文件的地址和顺序,我这里是用切片编号作为文件名,md5标识作为文件夹。
合并文件时,需要讲文件按顺序排好,循环读取往一个文件里面写,要保证写的顺序和切片的顺序是一致的,这样文件才能被拼接成一个完整的文件,就像切豆腐似的,切完再按顺序拼好,又是个完整的豆腐😂

# Java  Vue 

评论

Your browser is out-of-date!

Update your browser to view this website correctly. Update my browser now

×