make it compatible with hexo v7

This commit is contained in:
tianjincai
2025-07-19 13:48:43 +08:00
parent 9065826e43
commit 8550024659
4 changed files with 3121 additions and 112 deletions

View File

@@ -1 +1,3 @@
console.log('--- Hexo Deployer S3 plugin is being loaded! ---'); // <--- 添加这行
hexo.extend.deployer.register('s3', require('./lib/deployer'));

View File

@@ -1,85 +1,186 @@
var fs = require('fs');
var ini = require('ini');
var path = require('path');
var s3 = require('s3');
var chalk = require('chalk');
var xtend = require('xtend');
// deployer.js
module.exports = function(args) {
const { S3Client, ListObjectsV2Command, DeleteObjectsCommand } = require('@aws-sdk/client-s3');
const { Upload } = require('@aws-sdk/lib-storage');
const fs = require('fs');
const path = require('path');
const klawSync = require('klaw-sync');
const mime = require('mime-types');
const chalk = require('chalk').default;
const pLimit = require('p-limit').default;
var config = {
maxAsyncS3: args.concurrency,
s3Options: {
accessKeyId: args.aws_key || process.env.AWS_ACCESS_KEY_ID || process.env.AWS_KEY,
secretAccessKey: args.aws_secret || process.env.AWS_SECRET_ACCESS_KEY || process.env.AWS_SECRET,
region: args.region
}
};
if (args.endpoint) {
config.endpoint = args.endpoint;
}
if (!config.s3Options.accessKeyId && !config.s3Options.secretAccessKey && args.aws_cli_profile) {
/* User configured their access and secret keys in ~/.aws/credentials, check there */
var iniFile = path.join(process.env.HOME, '.aws');
var iniCredentials = ini.parse(fs.readFileSync(path.join(iniFile, 'credentials'), 'utf-8'));
config.s3Options.accessKeyId = (iniCredentials[args.aws_cli_profile] || {}).aws_access_key_id;
config.s3Options.secretAccessKey = (iniCredentials[args.aws_cli_profile] || {}).aws_secret_access_key;
if (!config.s3Options.region) {
var iniConfig = ini.parse(fs.readFileSync(path.join(iniFile, 'config'), 'utf-8'));
var profilePath = (args.aws_cli_profile === 'default') ? args.aws_cli_profile : "profile " + args.aws_cli_profile;
config.s3Options.region = (iniConfig[profilePath] || {}).region;
}
}
var client = s3.createClient(config);
module.exports = async function(args) {
const log = this.log;
const publicDir = this.config.public_dir;
var publicDir = this.config.public_dir;
var log = this.log;
// --- 1. 配置检查 ---
const {
bucket,
region,
concurrency = 20,
prefix,
aws_cli_profile,
headers,
delete_removed,
endpoint,
access_key_id,
secret_access_key,
aws_key,
aws_secret
} = args;
var customHeaders = args.headers || {};
var deleteRemoved = args.hasOwnProperty('delete_removed')
? Boolean(args.delete_removed)
: true;
if (!args.bucket || !config.s3Options.accessKeyId || !config.s3Options.secretAccessKey) {
var help = '';
help += 'You should configure deployment settings in _config.yml first!\n\n';
help += 'Example:\n';
help += ' deploy:\n';
help += ' type: s3\n';
help += ' bucket: <bucket>\n';
help += ' [aws_key]: <aws_key> # Optional, if provided as environment variable\n';
help += ' [aws_secret]: <aws_secret> # Optional, if provided as environment variable\n';
help += ' [concurrency]: <concurrency>\n';
help += ' [region]: <region> # See https://github.com/LearnBoost/knox#region\n',
help += ' [headers]: <JSON headers> # Optional, see README.md file\n';
help += ' [prefix]: <prefix> # Optional, prefix ending in /\n';
help += ' [delete_removed]: <delete> # Optional, if true will delete removed files from S3 /\n\n';
help += ' [endpoint]: <endpoint> # Optional, for S3 compatiable services\n'
help += 'For more help, you can check the docs: ' + chalk.underline('https://github.com/nt3rp/hexo-deployer-s3');
console.log(help);
if (!bucket || !endpoint) {
log.error('Bucket and Endpoint must be configured in _config.yml');
log.info(chalk.bold('--- Generic S3-Compatible Service Example (like Teby, MinIO, Cloudflare R2) ---'));
log.info(' deploy:');
log.info(' type: s3');
log.info(' bucket: <your-bucket-name>');
log.info(' endpoint: <your-s3-endpoint>');
log.info(' access_key_id: <your-access-key>');
log.info(' secret_access_key: <your-secret-key>');
log.info(' region: <any-string-is-ok-e.g.-us-east-1>');
log.info(' [prefix]: <prefix>');
log.info(' [concurrency]: 20');
log.info(' [delete_removed]: true');
log.info('');
log.info(chalk.bold('--- AWS S3 Example ---'));
log.info(' deploy:');
log.info(' type: s3');
log.info(' bucket: <your-aws-bucket-name>');
log.info(' region: <your-aws-region>');
log.info(' endpoint: <s3.your-aws-region.amazonaws.com>');
log.info(' # Credentials can be from env vars, ~/.aws/credentials, or here:');
log.info(' # access_key_id: <your-aws-key>');
log.info(' # secret_access_key: <your-aws-secret>');
return;
}
var params = {
localDir: publicDir,
deleteRemoved: deleteRemoved,
s3Params: xtend({
Bucket: args.bucket,
Prefix: args.prefix
},customHeaders)
// --- 2. 创建 S3 客户端 ---
const s3Config = {
region: region || 'us-east-1',
endpoint: endpoint,
};
const keyId = access_key_id || aws_key;
const secret = secret_access_key || aws_secret;
if (keyId && secret) {
s3Config.credentials = {
accessKeyId: keyId,
secretAccessKey: secret
};
log.info('Using credentials from _config.yml.');
} else if (aws_cli_profile) {
process.env.AWS_PROFILE = aws_cli_profile;
log.info(`Using AWS profile: ${aws_cli_profile}`);
} else {
log.info('Using credentials from environment variables or IAM role.');
}
var uploader = client.uploadDir(params);
log.info('Uploading...');
const client = new S3Client(s3Config);
return uploader
.on('progress', function() {
// log.info(uploader.progressAmount + ' / ' + uploader.progressTotal);
}).on('end', function() {
log.info('Done!');
}).on('error', function(err) {
log.error(err)
// --- 3. 准备文件列表 ---
const filesToUpload = klawSync(publicDir, { nodir: true });
const remotePrefix = prefix || '';
const shouldDeleteRemoved = delete_removed !== false;
if (!fs.existsSync(publicDir)) {
log.error(`Public folder not found: ${publicDir}. Run 'hexo generate' first.`);
return;
}
log.info(`Found ${filesToUpload.length} files in ${publicDir}`);
// --- 4. 实现 delete_removed (可选) ---
if (shouldDeleteRemoved) {
log.info('Checking for files to delete on S3...');
try {
const s3Objects = await listAllObjects(client, bucket, remotePrefix);
const localFilesSet = new Set(
filesToUpload.map(file => path.join(remotePrefix, path.relative(publicDir, file.path)).replace(/\\/g, '/'))
);
const objectsToDelete = s3Objects
.filter(obj => !localFilesSet.has(obj.Key))
.map(obj => ({ Key: obj.Key }));
if (objectsToDelete.length > 0) {
log.info(`Deleting ${objectsToDelete.length} removed files from S3...`);
for (let i = 0; i < objectsToDelete.length; i += 1000) {
const chunk = objectsToDelete.slice(i, i + 1000);
await client.send(new DeleteObjectsCommand({
Bucket: bucket,
Delete: { Objects: chunk },
}));
}
} else {
log.info('No files to delete.');
}
} catch (err) {
log.error('Failed to check/delete removed files. Please check your permissions.');
log.error(err);
}
}
// --- 5. 执行上传 ---
const limit = pLimit(concurrency);
log.info(`Uploading to bucket: ${chalk.cyan(bucket)} via endpoint: ${chalk.cyan(endpoint)}`);
const uploadPromises = filesToUpload.map(file => {
return limit(() => {
const key = path.join(remotePrefix, path.relative(publicDir, file.path)).replace(/\\/g, '/');
const body = fs.createReadStream(file.path);
const contentType = mime.lookup(file.path) || 'application/octet-stream';
const upload = new Upload({
client,
params: {
Bucket: bucket,
Key: key,
Body: body,
ContentType: contentType,
...headers
},
});
return upload.done().then(() => {
log.info(`Uploaded: ${key}`);
});
});
});
try {
await Promise.all(uploadPromises);
log.info('All files uploaded successfully!');
} catch (err) {
log.error('An error occurred during upload:');
log.error(err);
throw new Error('S3 deployment failed.');
}
};
/**
* Helper function to list all objects in an S3 bucket with a given prefix,
* handling pagination automatically.
*/
async function listAllObjects(client, bucket, prefix) {
const allObjects = [];
let isTruncated = true;
let continuationToken;
while (isTruncated) {
const command = new ListObjectsV2Command({
Bucket: bucket,
Prefix: prefix,
ContinuationToken: continuationToken,
});
const { Contents, IsTruncated, NextContinuationToken } = await client.send(command);
if (Contents) {
allObjects.push(...Contents);
}
isTruncated = IsTruncated;
continuationToken = NextContinuationToken;
}
return allObjects;
}

2904
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,43 +1,45 @@
{
"name": "hexo-deployer-s3",
"version": "0.2.4",
"description": "Amazon S3 deployer plugin for Hexo",
"main": "index",
"keywords": [
"hexo",
"s3",
"aws",
"deployer"
],
"author": "Nicholas Terwoord <nicholas.terwoord+code@gmail.com>",
"contributors": [
{
"name": "Josh Strange",
"email": "josh@joshstrange.com"
"name": "hexo-deployer-s3",
"version": "1.0.0",
"description": "Amazon S3 deployer plugin for Hexo",
"main": "index",
"keywords": [
"hexo",
"s3",
"aws",
"deployer"
],
"author": "Nicholas Terwoord <nicholas.terwoord+code@gmail.com>",
"contributors": [
{
"name": "Josh Strange",
"email": "josh@joshstrange.com"
},
{
"name": "Jack Guy",
"email": "jack@thatguyjackguy.com"
},
{
"name": "Josenivaldo Benito Jr.",
"email": "jrbenito@benito.qsl.br"
}
],
"repository": {
"type": "git",
"url": "http://github.com/nt3rp/hexo-deployer-s3.git"
},
{
"name": "Jack Guy",
"email": "jack@thatguyjackguy.com"
"license": {
"type": "MIT"
},
{
"name": "Josenivaldo Benito Jr.",
"email": "jrbenito@benito.qsl.br"
"peerDependencies": {
"hexo": "^7.0.0"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.509.0",
"@aws-sdk/lib-storage": "^3.509.0",
"chalk": "^5.3.0",
"klaw-sync": "^6.0.0",
"mime-types": "^2.1.35",
"p-limit": "^4.0.0"
}
],
"repository": {
"type": "git",
"url": "http://github.com/nt3rp/hexo-deployer-s3.git"
},
"license": {
"type": "MIT"
},
"peerDependencies": {
"hexo": "3.x"
},
"dependencies": {
"chalk": "^1.1.1",
"ini": "^1.3.4",
"s3": "^4.4.0",
"xtend": "^4.0.1"
}
}
}