Here is the Lambda code I am using to read a table and then upload the results to S3:
'use strict';
const pg = require('pg');
const aws = require('aws-sdk');
const awsParamStore = require( 'aws-param-store' );
exports.handler = async function (context)
{
function putObjectToS3(bucket, key, data){
var s3 = new aws.S3();
var params = {
Bucket : bucket,
Key : key,
Body : data
}
s3.putObject(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log(data); // successful response
});
}
context.callbackWaitsForEmptyEventLoop = false;
var dbname = 'pg_db';
var milliseconds = (new Date).getTime();
var fileName = 'process_log_' + milliseconds.toString() + '.json';
let parameter = awsParamStore.getParameterSync(process.env.ParameterPath);
var client = new pg.Client(parameter.Value + '/' + dbname);
await client.connect();
const { rows } = await client.query('SELECT * FROM public.mytable LIMIT ' + process.env.LimitRes.toString() + ';');
client.end();
var json = JSON.stringify(rows);
putObjectToS3('mybucket', 'thefile.json', json);
//return JSON.stringify(rows); << verified that there are actually 10,000 rows of data
//return json; << verified that the json is valid.
//return rows.length; << 10,000 count
};
I have verified that the data is valid and that the number of rows do actually exist in the result.
However, the behavior is inconsistent. Sometimes the upload will happen but most times it won't. I have to run the code several times to get it to upload, and the larger the set of rows, the less like it is to happen.
Wondering if the query call doesn't always return in time??
New to Node and JS so not sure if I have to have the query call somehow wait until it is fully complete (not sure how to do this.)
By the way, the Lambda memory setting is set to 1664 MB so there is plenty to handle the data. Also, the code never throws an error when it runs, always green. Also is well within the Lambda timeout period of 5 mins.
Advice appreciated, thanks!