| const fs = require('fs'); |
| const path = require('path'); |
| const mime = require('mime'); |
| const { v4 } = require('uuid'); |
| const { |
| isUUID, |
| megabyte, |
| FileContext, |
| FileSources, |
| imageExtRegex, |
| EModelEndpoint, |
| EToolResources, |
| mergeFileConfig, |
| AgentCapabilities, |
| checkOpenAIStorage, |
| removeNullishValues, |
| isAssistantsEndpoint, |
| getEndpointFileConfig, |
| } = require('librechat-data-provider'); |
| const { EnvVar } = require('@librechat/agents'); |
| const { logger } = require('@librechat/data-schemas'); |
| const { sanitizeFilename, parseText, processAudioFile } = require('@librechat/api'); |
| const { |
| convertImage, |
| resizeAndConvert, |
| resizeImageBuffer, |
| } = require('~/server/services/Files/images'); |
| const { addResourceFileId, deleteResourceFileId } = require('~/server/controllers/assistants/v2'); |
| const { addAgentResourceFile, removeAgentResourceFiles } = require('~/models/Agent'); |
| const { getOpenAIClient } = require('~/server/controllers/assistants/helpers'); |
| const { createFile, updateFileUsage, deleteFiles } = require('~/models/File'); |
| const { loadAuthValues } = require('~/server/services/Tools/credentials'); |
| const { getFileStrategy } = require('~/server/utils/getFileStrategy'); |
| const { checkCapability } = require('~/server/services/Config'); |
| const { LB_QueueAsyncCall } = require('~/server/utils/queue'); |
| const { getStrategyFunctions } = require('./strategies'); |
| const { determineFileType } = require('~/server/utils'); |
| const { STTService } = require('./Audio/STTService'); |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| const createSanitizedUploadWrapper = (uploadFunction) => { |
| return async (params) => { |
| const { req, file, file_id, ...restParams } = params; |
|
|
| |
| |
| const sanitizedFile = { |
| ...file, |
| originalname: sanitizeFilename(file.originalname), |
| }; |
|
|
| return uploadFunction({ req, file: sanitizedFile, file_id, ...restParams }); |
| }; |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| const processFiles = async (files, fileIds) => { |
| const promises = []; |
| const seen = new Set(); |
|
|
| for (let file of files) { |
| const { file_id } = file; |
| if (seen.has(file_id)) { |
| continue; |
| } |
| seen.add(file_id); |
| promises.push(updateFileUsage({ file_id })); |
| } |
|
|
| if (!fileIds) { |
| const results = await Promise.all(promises); |
| |
| return results.filter((result) => result != null); |
| } |
|
|
| for (let file_id of fileIds) { |
| if (seen.has(file_id)) { |
| continue; |
| } |
| seen.add(file_id); |
| promises.push(updateFileUsage({ file_id })); |
| } |
|
|
| |
| const results = await Promise.all(promises); |
| |
| return results.filter((result) => result != null); |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| function enqueueDeleteOperation({ req, file, deleteFile, promises, resolvedFileIds, openai }) { |
| if (checkOpenAIStorage(file.source)) { |
| |
| promises.push( |
| new Promise((resolve, reject) => { |
| LB_QueueAsyncCall( |
| () => deleteFile(req, file, openai), |
| [], |
| (err, result) => { |
| if (err) { |
| logger.error('Error deleting file from OpenAI source', err); |
| reject(err); |
| } else { |
| resolvedFileIds.push(file.file_id); |
| resolve(result); |
| } |
| }, |
| ); |
| }), |
| ); |
| } else { |
| |
| promises.push( |
| deleteFile(req, file) |
| .then(() => resolvedFileIds.push(file.file_id)) |
| .catch((err) => { |
| logger.error('Error deleting file', err); |
| return Promise.reject(err); |
| }), |
| ); |
| } |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| const processDeleteRequest = async ({ req, files }) => { |
| const appConfig = req.config; |
| const resolvedFileIds = []; |
| const deletionMethods = {}; |
| const promises = []; |
|
|
| |
| const client = { [FileSources.openai]: undefined, [FileSources.azure]: undefined }; |
| const initializeClients = async () => { |
| if (appConfig.endpoints?.[EModelEndpoint.assistants]) { |
| const openAIClient = await getOpenAIClient({ |
| req, |
| overrideEndpoint: EModelEndpoint.assistants, |
| }); |
| client[FileSources.openai] = openAIClient.openai; |
| } |
|
|
| if (!appConfig.endpoints?.[EModelEndpoint.azureOpenAI]?.assistants) { |
| return; |
| } |
|
|
| const azureClient = await getOpenAIClient({ |
| req, |
| overrideEndpoint: EModelEndpoint.azureAssistants, |
| }); |
| client[FileSources.azure] = azureClient.openai; |
| }; |
|
|
| if (req.body.assistant_id !== undefined) { |
| await initializeClients(); |
| } |
|
|
| const agentFiles = []; |
|
|
| for (const file of files) { |
| const source = file.source ?? FileSources.local; |
| if (req.body.agent_id && req.body.tool_resource) { |
| agentFiles.push({ |
| tool_resource: req.body.tool_resource, |
| file_id: file.file_id, |
| }); |
| } |
|
|
| if (source === FileSources.text) { |
| resolvedFileIds.push(file.file_id); |
| continue; |
| } |
|
|
| if (checkOpenAIStorage(source) && !client[source]) { |
| await initializeClients(); |
| } |
|
|
| const openai = client[source]; |
|
|
| if (req.body.assistant_id && req.body.tool_resource) { |
| promises.push( |
| deleteResourceFileId({ |
| req, |
| openai, |
| file_id: file.file_id, |
| assistant_id: req.body.assistant_id, |
| tool_resource: req.body.tool_resource, |
| }), |
| ); |
| } else if (req.body.assistant_id) { |
| promises.push(openai.beta.assistants.files.del(req.body.assistant_id, file.file_id)); |
| } |
|
|
| if (deletionMethods[source]) { |
| enqueueDeleteOperation({ |
| req, |
| file, |
| deleteFile: deletionMethods[source], |
| promises, |
| resolvedFileIds, |
| openai, |
| }); |
| continue; |
| } |
|
|
| const { deleteFile } = getStrategyFunctions(source); |
| if (!deleteFile) { |
| throw new Error(`Delete function not implemented for ${source}`); |
| } |
|
|
| deletionMethods[source] = deleteFile; |
| enqueueDeleteOperation({ req, file, deleteFile, promises, resolvedFileIds, openai }); |
| } |
|
|
| if (agentFiles.length > 0) { |
| promises.push( |
| removeAgentResourceFiles({ |
| agent_id: req.body.agent_id, |
| files: agentFiles, |
| }), |
| ); |
| } |
|
|
| await Promise.allSettled(promises); |
| await deleteFiles(resolvedFileIds); |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| const processFileURL = async ({ fileStrategy, userId, URL, fileName, basePath, context }) => { |
| const { saveURL, getFileURL } = getStrategyFunctions(fileStrategy); |
| try { |
| const { |
| bytes = 0, |
| type = '', |
| dimensions = {}, |
| } = (await saveURL({ userId, URL, fileName, basePath })) || {}; |
| const filepath = await getFileURL({ fileName: `${userId}/${fileName}`, basePath }); |
| return await createFile( |
| { |
| user: userId, |
| file_id: v4(), |
| bytes, |
| filepath, |
| filename: fileName, |
| source: fileStrategy, |
| type, |
| context, |
| width: dimensions.width, |
| height: dimensions.height, |
| }, |
| true, |
| ); |
| } catch (error) { |
| logger.error(`Error while processing the image with ${fileStrategy}:`, error); |
| throw new Error(`Failed to process the image with ${fileStrategy}. ${error.message}`); |
| } |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| const processImageFile = async ({ req, res, metadata, returnFile = false }) => { |
| const { file } = req; |
| const appConfig = req.config; |
| const source = getFileStrategy(appConfig, { isImage: true }); |
| const { handleImageUpload } = getStrategyFunctions(source); |
| const { file_id, temp_file_id, endpoint } = metadata; |
|
|
| const { filepath, bytes, width, height } = await handleImageUpload({ |
| req, |
| file, |
| file_id, |
| endpoint, |
| }); |
|
|
| const result = await createFile( |
| { |
| user: req.user.id, |
| file_id, |
| temp_file_id, |
| bytes, |
| filepath, |
| filename: file.originalname, |
| context: FileContext.message_attachment, |
| source, |
| type: `image/${appConfig.imageOutputType}`, |
| width, |
| height, |
| }, |
| true, |
| ); |
|
|
| if (returnFile) { |
| return result; |
| } |
| res.status(200).json({ message: 'File uploaded and processed successfully', ...result }); |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| const uploadImageBuffer = async ({ req, context, metadata = {}, resize = true }) => { |
| const appConfig = req.config; |
| const source = getFileStrategy(appConfig, { isImage: true }); |
| const { saveBuffer } = getStrategyFunctions(source); |
| let { buffer, width, height, bytes, filename, file_id, type } = metadata; |
| if (resize) { |
| file_id = v4(); |
| type = `image/${appConfig.imageOutputType}`; |
| ({ buffer, width, height, bytes } = await resizeAndConvert({ |
| inputBuffer: buffer, |
| desiredFormat: appConfig.imageOutputType, |
| })); |
| filename = `${path.basename(req.file.originalname, path.extname(req.file.originalname))}.${ |
| appConfig.imageOutputType |
| }`; |
| } |
| const fileName = `${file_id}-${filename}`; |
| const filepath = await saveBuffer({ userId: req.user.id, fileName, buffer }); |
| return await createFile( |
| { |
| user: req.user.id, |
| file_id, |
| bytes, |
| filepath, |
| filename, |
| context, |
| source, |
| type, |
| width, |
| height, |
| }, |
| true, |
| ); |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| const processFileUpload = async ({ req, res, metadata }) => { |
| const appConfig = req.config; |
| const isAssistantUpload = isAssistantsEndpoint(metadata.endpoint); |
| const assistantSource = |
| metadata.endpoint === EModelEndpoint.azureAssistants ? FileSources.azure : FileSources.openai; |
| |
| const source = isAssistantUpload ? assistantSource : appConfig.fileStrategy; |
| const { handleFileUpload } = getStrategyFunctions(source); |
| const { file_id, temp_file_id = null } = metadata; |
|
|
| |
| let openai; |
| if (checkOpenAIStorage(source)) { |
| ({ openai } = await getOpenAIClient({ req })); |
| } |
|
|
| const { file } = req; |
| const sanitizedUploadFn = createSanitizedUploadWrapper(handleFileUpload); |
| const { |
| id, |
| bytes, |
| filename, |
| filepath: _filepath, |
| embedded, |
| height, |
| width, |
| } = await sanitizedUploadFn({ |
| req, |
| file, |
| file_id, |
| openai, |
| }); |
|
|
| if (isAssistantUpload && !metadata.message_file && !metadata.tool_resource) { |
| await openai.beta.assistants.files.create(metadata.assistant_id, { |
| file_id: id, |
| }); |
| } else if (isAssistantUpload && !metadata.message_file) { |
| await addResourceFileId({ |
| req, |
| openai, |
| file_id: id, |
| assistant_id: metadata.assistant_id, |
| tool_resource: metadata.tool_resource, |
| }); |
| } |
|
|
| let filepath = isAssistantUpload ? `${openai.baseURL}/files/${id}` : _filepath; |
| if (isAssistantUpload && file.mimetype.startsWith('image')) { |
| const result = await processImageFile({ |
| req, |
| file, |
| metadata: { file_id: v4() }, |
| returnFile: true, |
| }); |
| filepath = result.filepath; |
| } |
|
|
| const result = await createFile( |
| { |
| user: req.user.id, |
| file_id: id ?? file_id, |
| temp_file_id, |
| bytes, |
| filepath, |
| filename: filename ?? sanitizeFilename(file.originalname), |
| context: isAssistantUpload ? FileContext.assistants : FileContext.message_attachment, |
| model: isAssistantUpload ? req.body.model : undefined, |
| type: file.mimetype, |
| embedded, |
| source, |
| height, |
| width, |
| }, |
| true, |
| ); |
| res.status(200).json({ message: 'File uploaded and processed successfully', ...result }); |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| const processAgentFileUpload = async ({ req, res, metadata }) => { |
| const { file } = req; |
| const appConfig = req.config; |
| const { agent_id, tool_resource, file_id, temp_file_id = null } = metadata; |
|
|
| let messageAttachment = !!metadata.message_file; |
|
|
| if (agent_id && !tool_resource && !messageAttachment) { |
| throw new Error('No tool resource provided for agent file upload'); |
| } |
|
|
| if (tool_resource === EToolResources.file_search && file.mimetype.startsWith('image')) { |
| throw new Error('Image uploads are not supported for file search tool resources'); |
| } |
|
|
| if (!messageAttachment && !agent_id) { |
| throw new Error('No agent ID provided for agent file upload'); |
| } |
|
|
| const isImage = file.mimetype.startsWith('image'); |
| let fileInfoMetadata; |
| const entity_id = messageAttachment === true ? undefined : agent_id; |
| const basePath = mime.getType(file.originalname)?.startsWith('image') ? 'images' : 'uploads'; |
| if (tool_resource === EToolResources.execute_code) { |
| const isCodeEnabled = await checkCapability(req, AgentCapabilities.execute_code); |
| if (!isCodeEnabled) { |
| throw new Error('Code execution is not enabled for Agents'); |
| } |
| const { handleFileUpload: uploadCodeEnvFile } = getStrategyFunctions(FileSources.execute_code); |
| const result = await loadAuthValues({ userId: req.user.id, authFields: [EnvVar.CODE_API_KEY] }); |
| const stream = fs.createReadStream(file.path); |
| const fileIdentifier = await uploadCodeEnvFile({ |
| req, |
| stream, |
| filename: file.originalname, |
| apiKey: result[EnvVar.CODE_API_KEY], |
| entity_id, |
| }); |
| fileInfoMetadata = { fileIdentifier }; |
| } else if (tool_resource === EToolResources.file_search) { |
| const isFileSearchEnabled = await checkCapability(req, AgentCapabilities.file_search); |
| if (!isFileSearchEnabled) { |
| throw new Error('File search is not enabled for Agents'); |
| } |
| |
| } else if (tool_resource === EToolResources.context) { |
| const { file_id, temp_file_id = null } = metadata; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| const createTextFile = async ({ text, bytes, filepath, type = 'text/plain' }) => { |
| const fileInfo = removeNullishValues({ |
| text, |
| bytes, |
| file_id, |
| temp_file_id, |
| user: req.user.id, |
| type, |
| filepath: filepath ?? file.path, |
| source: FileSources.text, |
| filename: file.originalname, |
| model: messageAttachment ? undefined : req.body.model, |
| context: messageAttachment ? FileContext.message_attachment : FileContext.agents, |
| }); |
|
|
| if (!messageAttachment && tool_resource) { |
| await addAgentResourceFile({ |
| req, |
| file_id, |
| agent_id, |
| tool_resource, |
| }); |
| } |
| const result = await createFile(fileInfo, true); |
| return res |
| .status(200) |
| .json({ message: 'Agent file uploaded and processed successfully', ...result }); |
| }; |
|
|
| const fileConfig = mergeFileConfig(appConfig.fileConfig); |
|
|
| const shouldUseOCR = |
| appConfig?.ocr != null && |
| fileConfig.checkType(file.mimetype, fileConfig.ocr?.supportedMimeTypes || []); |
|
|
| if (shouldUseOCR && !(await checkCapability(req, AgentCapabilities.ocr))) { |
| throw new Error('OCR capability is not enabled for Agents'); |
| } else if (shouldUseOCR) { |
| try { |
| const { handleFileUpload: uploadOCR } = getStrategyFunctions( |
| appConfig?.ocr?.strategy ?? FileSources.mistral_ocr, |
| ); |
| const { |
| text, |
| bytes, |
| filepath: ocrFileURL, |
| } = await uploadOCR({ req, file, loadAuthValues }); |
| return await createTextFile({ text, bytes, filepath: ocrFileURL }); |
| } catch (ocrError) { |
| logger.error( |
| `[processAgentFileUpload] OCR processing failed for file "${file.originalname}", falling back to text extraction:`, |
| ocrError, |
| ); |
| } |
| } |
|
|
| const shouldUseSTT = fileConfig.checkType( |
| file.mimetype, |
| fileConfig.stt?.supportedMimeTypes || [], |
| ); |
|
|
| if (shouldUseSTT) { |
| const sttService = await STTService.getInstance(); |
| const { text, bytes } = await processAudioFile({ req, file, sttService }); |
| return await createTextFile({ text, bytes }); |
| } |
|
|
| const shouldUseText = fileConfig.checkType( |
| file.mimetype, |
| fileConfig.text?.supportedMimeTypes || [], |
| ); |
|
|
| if (!shouldUseText) { |
| throw new Error(`File type ${file.mimetype} is not supported for text parsing.`); |
| } |
|
|
| const { text, bytes } = await parseText({ req, file, file_id }); |
| return await createTextFile({ text, bytes, type: file.mimetype }); |
| } |
|
|
| |
| let storageResult, embeddingResult; |
| const isImageFile = file.mimetype.startsWith('image'); |
| const source = getFileStrategy(appConfig, { isImage: isImageFile }); |
|
|
| if (tool_resource === EToolResources.file_search) { |
| |
| const { handleFileUpload } = getStrategyFunctions(source); |
| const sanitizedUploadFn = createSanitizedUploadWrapper(handleFileUpload); |
| storageResult = await sanitizedUploadFn({ |
| req, |
| file, |
| file_id, |
| basePath, |
| entity_id, |
| }); |
|
|
| |
| const { uploadVectors } = require('./VectorDB/crud'); |
|
|
| embeddingResult = await uploadVectors({ |
| req, |
| file, |
| file_id, |
| entity_id, |
| }); |
|
|
| |
| fileInfoMetadata = {}; |
| } else { |
| |
| const { handleFileUpload } = getStrategyFunctions(source); |
| const sanitizedUploadFn = createSanitizedUploadWrapper(handleFileUpload); |
| storageResult = await sanitizedUploadFn({ |
| req, |
| file, |
| file_id, |
| basePath, |
| entity_id, |
| }); |
| } |
|
|
| let { bytes, filename, filepath: _filepath, height, width } = storageResult; |
| |
| let embedded = storageResult.embedded; |
| if (tool_resource === EToolResources.file_search) { |
| embedded = embeddingResult?.embedded; |
| filename = embeddingResult?.filename || filename; |
| } |
|
|
| let filepath = _filepath; |
|
|
| if (!messageAttachment && tool_resource) { |
| await addAgentResourceFile({ |
| req, |
| file_id, |
| agent_id, |
| tool_resource, |
| }); |
| } |
|
|
| if (isImage) { |
| const result = await processImageFile({ |
| req, |
| file, |
| metadata: { file_id: v4() }, |
| returnFile: true, |
| }); |
| filepath = result.filepath; |
| } |
|
|
| const fileInfo = removeNullishValues({ |
| user: req.user.id, |
| file_id, |
| temp_file_id, |
| bytes, |
| filepath, |
| filename: filename ?? sanitizeFilename(file.originalname), |
| context: messageAttachment ? FileContext.message_attachment : FileContext.agents, |
| model: messageAttachment ? undefined : req.body.model, |
| metadata: fileInfoMetadata, |
| type: file.mimetype, |
| embedded, |
| source, |
| height, |
| width, |
| }); |
|
|
| const result = await createFile(fileInfo, true); |
|
|
| res.status(200).json({ message: 'Agent file uploaded and processed successfully', ...result }); |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| const processOpenAIFile = async ({ |
| openai, |
| file_id, |
| userId, |
| filename, |
| saveFile = false, |
| updateUsage = false, |
| }) => { |
| const _file = await openai.files.retrieve(file_id); |
| const originalName = filename ?? (_file.filename ? path.basename(_file.filename) : undefined); |
| const filepath = `${openai.baseURL}/files/${userId}/${file_id}${ |
| originalName ? `/${originalName}` : '' |
| }`; |
| const type = mime.getType(originalName ?? file_id); |
| const source = |
| openai.req.body.endpoint === EModelEndpoint.azureAssistants |
| ? FileSources.azure |
| : FileSources.openai; |
| const file = { |
| ..._file, |
| type, |
| file_id, |
| filepath, |
| usage: 1, |
| user: userId, |
| context: _file.purpose, |
| source, |
| model: openai.req.body.model, |
| filename: originalName ?? file_id, |
| }; |
|
|
| if (saveFile) { |
| await createFile(file, true); |
| } else if (updateUsage) { |
| try { |
| await updateFileUsage({ file_id }); |
| } catch (error) { |
| logger.error('Error updating file usage', error); |
| } |
| } |
|
|
| return file; |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| const processOpenAIImageOutput = async ({ req, buffer, file_id, filename, fileExt }) => { |
| const currentDate = new Date(); |
| const formattedDate = currentDate.toISOString(); |
| const appConfig = req.config; |
| const _file = await convertImage(req, buffer, undefined, `${file_id}${fileExt}`); |
|
|
| |
| const file = { |
| ..._file, |
| usage: 1, |
| user: req.user.id, |
| type: mime.getType(fileExt), |
| createdAt: formattedDate, |
| updatedAt: formattedDate, |
| source: getFileStrategy(appConfig, { isImage: true }), |
| context: FileContext.assistants_output, |
| file_id, |
| filename, |
| }; |
| createFile(file, true); |
| return file; |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| async function retrieveAndProcessFile({ |
| openai, |
| client, |
| file_id, |
| basename: _basename, |
| unknownType, |
| }) { |
| if (!file_id) { |
| return null; |
| } |
|
|
| let basename = _basename; |
| const processArgs = { openai, file_id, filename: basename, userId: client.req.user.id }; |
|
|
| |
| if (!basename) { |
| return await processOpenAIFile({ ...processArgs, saveFile: true }); |
| } |
|
|
| const fileExt = path.extname(basename); |
| if (client.attachedFileIds?.has(file_id) || client.processedFileIds?.has(file_id)) { |
| return processOpenAIFile({ ...processArgs, updateUsage: true }); |
| } |
|
|
| |
| |
| |
| const getDataBuffer = async () => { |
| const response = await openai.files.content(file_id); |
| const arrayBuffer = await response.arrayBuffer(); |
| return Buffer.from(arrayBuffer); |
| }; |
|
|
| let dataBuffer; |
| if (unknownType || !fileExt || imageExtRegex.test(basename)) { |
| try { |
| dataBuffer = await getDataBuffer(); |
| } catch (error) { |
| logger.error('Error downloading file from OpenAI:', error); |
| dataBuffer = null; |
| } |
| } |
|
|
| if (!dataBuffer) { |
| return await processOpenAIFile({ ...processArgs, saveFile: true }); |
| } |
|
|
| |
| if (dataBuffer && (unknownType || !fileExt)) { |
| const detectedExt = await determineFileType(dataBuffer); |
| const isImageOutput = detectedExt && imageExtRegex.test('.' + detectedExt); |
|
|
| if (!isImageOutput) { |
| return await processOpenAIFile({ ...processArgs, saveFile: true }); |
| } |
|
|
| return await processOpenAIImageOutput({ |
| file_id, |
| req: client.req, |
| buffer: dataBuffer, |
| filename: basename, |
| fileExt: detectedExt, |
| }); |
| } else if (dataBuffer && imageExtRegex.test(basename)) { |
| return await processOpenAIImageOutput({ |
| file_id, |
| req: client.req, |
| buffer: dataBuffer, |
| filename: basename, |
| fileExt, |
| }); |
| } else { |
| logger.debug(`[retrieveAndProcessFile] Non-image file type detected: ${basename}`); |
| return await processOpenAIFile({ ...processArgs, saveFile: true }); |
| } |
| } |
|
|
| |
| |
| |
| |
| |
| function base64ToBuffer(base64String) { |
| try { |
| const typeMatch = base64String.match(/^data:([A-Za-z-+/]+);base64,/); |
| const type = typeMatch ? typeMatch[1] : ''; |
|
|
| const base64Data = base64String.replace(/^data:([A-Za-z-+/]+);base64,/, ''); |
|
|
| if (!base64Data) { |
| throw new Error('Invalid base64 string'); |
| } |
|
|
| return { |
| buffer: Buffer.from(base64Data, 'base64'), |
| type, |
| }; |
| } catch (error) { |
| throw new Error(`Failed to convert base64 to buffer: ${error.message}`); |
| } |
| } |
|
|
| async function saveBase64Image( |
| url, |
| { req, file_id: _file_id, filename: _filename, endpoint, context, resolution }, |
| ) { |
| const appConfig = req.config; |
| const effectiveResolution = resolution ?? appConfig.fileConfig?.imageGeneration ?? 'high'; |
| const file_id = _file_id ?? v4(); |
| let filename = `${file_id}-${_filename}`; |
| const { buffer: inputBuffer, type } = base64ToBuffer(url); |
| if (!path.extname(_filename)) { |
| const extension = mime.getExtension(type); |
| if (extension) { |
| filename += `.${extension}`; |
| } else { |
| throw new Error(`Could not determine file extension from MIME type: ${type}`); |
| } |
| } |
|
|
| const image = await resizeImageBuffer(inputBuffer, effectiveResolution, endpoint); |
| const source = getFileStrategy(appConfig, { isImage: true }); |
| const { saveBuffer } = getStrategyFunctions(source); |
| const filepath = await saveBuffer({ |
| userId: req.user.id, |
| fileName: filename, |
| buffer: image.buffer, |
| }); |
| return await createFile( |
| { |
| type, |
| source, |
| context, |
| file_id, |
| filepath, |
| filename, |
| user: req.user.id, |
| bytes: image.bytes, |
| width: image.width, |
| height: image.height, |
| }, |
| true, |
| ); |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| function filterFile({ req, image, isAvatar }) { |
| const { file } = req; |
| const { endpoint, endpointType, file_id, width, height } = req.body; |
|
|
| if (!file_id && !isAvatar) { |
| throw new Error('No file_id provided'); |
| } |
|
|
| if (file.size === 0) { |
| throw new Error('Empty file uploaded'); |
| } |
|
|
| |
| if (!isAvatar) { |
| isUUID.parse(file_id); |
| } |
|
|
| if (!endpoint && !isAvatar) { |
| throw new Error('No endpoint provided'); |
| } |
|
|
| const appConfig = req.config; |
| const fileConfig = mergeFileConfig(appConfig.fileConfig); |
|
|
| const endpointFileConfig = getEndpointFileConfig({ |
| endpoint, |
| fileConfig, |
| endpointType, |
| }); |
| const fileSizeLimit = |
| isAvatar === true ? fileConfig.avatarSizeLimit : endpointFileConfig.fileSizeLimit; |
|
|
| if (file.size > fileSizeLimit) { |
| throw new Error( |
| `File size limit of ${fileSizeLimit / megabyte} MB exceeded for ${ |
| isAvatar ? 'avatar upload' : `${endpoint} endpoint` |
| }`, |
| ); |
| } |
|
|
| const isSupportedMimeType = fileConfig.checkType( |
| file.mimetype, |
| endpointFileConfig.supportedMimeTypes, |
| ); |
|
|
| if (!isSupportedMimeType) { |
| throw new Error('Unsupported file type'); |
| } |
|
|
| if (!image || isAvatar === true) { |
| return; |
| } |
|
|
| if (!width) { |
| throw new Error('No width provided'); |
| } |
|
|
| if (!height) { |
| throw new Error('No height provided'); |
| } |
| } |
|
|
| module.exports = { |
| filterFile, |
| processFiles, |
| processFileURL, |
| saveBase64Image, |
| processImageFile, |
| uploadImageBuffer, |
| processFileUpload, |
| processDeleteRequest, |
| processAgentFileUpload, |
| retrieveAndProcessFile, |
| }; |
|
|