基于视频条件下使用AWS MediaConvert和AWS LAMBDA多个语句(Multiple

2019-11-05 08:53发布


也许我的问题将被编辑,我试图更具体越好。

介绍:
我按照这个指南关于启动转码的视频(使用AWS MediaConvert和AWS LAMBDA)后上传它变成一个AWS S3桶。

问题:
我需要2案件语句(可能在植酮),基于视频检测到的条件开始转换型或转换类型B.

例:
如果视频分辨率为<720P离开它,因为它是
如果视频分辨率为> 720P将其转换为1080

题:
我怎么能做到吗?

一些代码:
我在我的lambda函数代码此代码:

这是我的convert.py

 #!/usr/bin/env python import glob import json import os import uuid import boto3 import datetime import random from urlparse import urlparse import logging from botocore.client import ClientError logger = logging.getLogger() logger.setLevel(logging.INFO) S3 = boto3.resource('s3') def handler(event, context): ''' Watchfolder handler - this lambda is triggered when video objects are uploaded to the SourceS3Bucket/inputs folder. It will look for two sets of file inputs: SourceS3Bucket/inputs/SourceS3Key: the input video to be converted SourceS3Bucket/jobs/*.json: job settings for MediaConvert jobs to be run against the input video. If there are no settings files in the jobs folder, then the Default job will be run from the job.json file in lambda environment. Ouput paths stored in outputGroup['OutputGroupSettings']['DashIsoGroupSettings']['Destination'] are constructed from the name of the job settings files as follows: s3://<MediaBucket>/<basename(job settings filename)>/<basename(input)>/<Destination value from job settings file> ''' assetID = str(uuid.uuid4()) sourceS3Bucket = event['Records'][0]['s3']['bucket']['name'] sourceS3Key = event['Records'][0]['s3']['object']['key'] sourceS3 = 's3://'+ sourceS3Bucket + '/' + sourceS3Key destinationS3 = 's3://' + os.environ['DestinationBucket'] mediaConvertRole = os.environ['MediaConvertRole'] application = os.environ['Application'] region = os.environ['AWS_DEFAULT_REGION'] statusCode = 200 jobs = [] job = {} # Use MediaConvert SDK UserMetadata to tag jobs with the assetID # Events from MediaConvert will have the assetID in UserMedata jobMetadata = {} jobMetadata['assetID'] = assetID jobMetadata['application'] = application jobMetadata['input'] = sourceS3 jobMetadata['originalName'] = os.path.splitext(sourceS3Key.split("/")[-1])[0] try: # Build a list of jobs to run against the input. Use the settings files in WatchFolder/jobs # if any exist. Otherwise, use the default job. jobInput = {} # Iterates through all the objects in jobs folder of the WatchFolder bucket, doing the pagination for you. Each obj # contains a jobSettings JSON bucket = S3.Bucket(sourceS3Bucket) for obj in bucket.objects.filter(Prefix='jobs/'): if obj.key != "jobs/": jobInput['filename'] = obj.key logger.info('jobInput: %s', jobInput['filename']) jobInput['settings'] = json.loads(obj.get()['Body'].read()) logger.info(json.dumps(jobInput['settings'])) jobs.append(jobInput) # Use Default job settings in the lambda zip file in the current working directory if not jobs: with open('job.json') as json_data: jobInput['filename'] = 'Default' logger.info('jobInput: %s', jobInput['filename']) jobInput['settings'] = json.load(json_data) logger.info(json.dumps(jobInput['settings'])) jobs.append(jobInput) # get the account-specific mediaconvert endpoint for this region mediaconvert_client = boto3.client('mediaconvert', region_name=region) endpoints = mediaconvert_client.describe_endpoints() # add the account-specific endpoint to the client session client = boto3.client('mediaconvert', region_name=region, endpoint_url=endpoints['Endpoints'][0]['Url'], verify=False) for j in jobs: jobSettings = j['settings'] jobFilename = j['filename'] # Save the name of the settings file in the job userMetadata jobMetadata['settings'] = jobFilename # Update the job settings with the source video from the S3 event jobSettings['Inputs'][0]['FileInput'] = sourceS3 # Update the job settings with the destination paths for converted videos. We want to replace the # destination bucket of the output paths in the job settings, but keep the rest of the # path destinationS3 = 's3://' + os.environ['DestinationBucket'] + '/' \ + 'output' + '/' \ + os.path.splitext(os.path.basename(sourceS3Key))[0] # + '/' \ # + os.path.splitext(os.path.basename(jobFilename))[0] basePath = destinationS3.replace("s3://", "https://s3-" + region + ".amazonaws.com/") jobMetadata['fileName'] = jobFilename for outputGroup in jobSettings['OutputGroups']: logger.info("outputGroup['OutputGroupSettings']['Type'] == %s", outputGroup['OutputGroupSettings']['Type']) if outputGroup['OutputGroupSettings']['Type'] == 'FILE_GROUP_SETTINGS': templateDestination = outputGroup['OutputGroupSettings']['FileGroupSettings']['Destination'] templateDestinationKey = urlparse(templateDestination).path logger.info("templateDestinationKey == %s", templateDestinationKey) jobMetadata[templateDestinationKey.replace("/", "").lower()] = basePath + templateDestinationKey outputGroup['OutputGroupSettings']['FileGroupSettings']['Destination'] = destinationS3+templateDestinationKey elif outputGroup['OutputGroupSettings']['Type'] == 'HLS_GROUP_SETTINGS': templateDestination = outputGroup['OutputGroupSettings']['HlsGroupSettings']['Destination'] templateDestinationKey = urlparse(templateDestination).path logger.info("templateDestinationKey == %s", templateDestinationKey) jobMetadata[templateDestinationKey.replace("/", "").lower()] = basePath + templateDestinationKey outputGroup['OutputGroupSettings']['HlsGroupSettings']['Destination'] = destinationS3+templateDestinationKey elif outputGroup['OutputGroupSettings']['Type'] == 'DASH_ISO_GROUP_SETTINGS': templateDestination = outputGroup['OutputGroupSettings']['DashIsoGroupSettings']['Destination'] templateDestinationKey = urlparse(templateDestination).path logger.info("templateDestinationKey == %s", templateDestinationKey) jobMetadata[templateDestinationKey.replace("/", "").lower()] = basePath + templateDestinationKey outputGroup['OutputGroupSettings']['DashIsoGroupSettings']['Destination'] = destinationS3+templateDestinationKey elif outputGroup['OutputGroupSettings']['Type'] == 'DASH_ISO_GROUP_SETTINGS': templateDestination = outputGroup['OutputGroupSettings']['DashIsoGroupSettings']['Destination'] templateDestinationKey = urlparse(templateDestination).path logger.info("templateDestinationKey == %s", templateDestinationKey) jobMetadata[templateDestinationKey.replace("/", "").lower()] = basePath + templateDestinationKey outputGroup['OutputGroupSettings']['DashIsoGroupSettings']['Destination'] = destinationS3+templateDestinationKey elif outputGroup['OutputGroupSettings']['Type'] == 'MS_SMOOTH_GROUP_SETTINGS': templateDestination = outputGroup['OutputGroupSettings']['MsSmoothGroupSettings']['Destination'] templateDestinationKey = urlparse(templateDestination).path logger.info("templateDestinationKey == %s", templateDestinationKey) jobMetadata[templateDestinationKey.replace("/", "").lower()] = basePath + templateDestinationKey outputGroup['OutputGroupSettings']['MsSmoothGroupSettings']['Destination'] = destinationS3+templateDestinationKey else: logger.error("Exception: Unknown Output Group Type %s", outputGroup['OutputGroupSettings']['Type']) statusCode = 500 logger.info(json.dumps(jobSettings)) # Convert the video using AWS Elemental MediaConvert job = client.create_job(Role=mediaConvertRole, UserMetadata=jobMetadata, Settings=jobSettings) except Exception as e: logger.error('Exception: %s', e) statusCode = 500 raise finally: return { 'statusCode': statusCode, 'body': json.dumps(job, indent=4, sort_keys=True, default=str), 'headers': {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'} } 

这是我的job.json

 { "OutputGroups": [ { "CustomName": "MP4", "Name": "File Group", "Outputs": [ { "ContainerSettings": { "Container": "MP4", "Mp4Settings": { "CslgAtom": "INCLUDE", "FreeSpaceBox": "EXCLUDE", "MoovPlacement": "PROGRESSIVE_DOWNLOAD" } }, "VideoDescription": { "Width": 1280, "ScalingBehavior": "DEFAULT", "Height": 720, "TimecodeInsertion": "DISABLED", "AntiAlias": "ENABLED", "Sharpness": 50, "CodecSettings": { "Codec": "H_264", "H264Settings": { "InterlaceMode": "PROGRESSIVE", "NumberReferenceFrames": 3, "Syntax": "DEFAULT", "Softness": 0, "GopClosedCadence": 1, "GopSize": 90, "Slices": 1, "GopBReference": "DISABLED", "SlowPal": "DISABLED", "SpatialAdaptiveQuantization": "ENABLED", "TemporalAdaptiveQuantization": "ENABLED", "FlickerAdaptiveQuantization": "DISABLED", "EntropyEncoding": "CABAC", "Bitrate": 3000000, "FramerateControl": "INITIALIZE_FROM_SOURCE", "RateControlMode": "CBR", "CodecProfile": "MAIN", "Telecine": "NONE", "MinIInterval": 0, "AdaptiveQuantization": "HIGH", "CodecLevel": "AUTO", "FieldEncoding": "PAFF", "SceneChangeDetect": "ENABLED", "QualityTuningLevel": "SINGLE_PASS", "FramerateConversionAlgorithm": "DUPLICATE_DROP", "UnregisteredSeiTimecode": "DISABLED", "GopSizeUnits": "FRAMES", "ParControl": "INITIALIZE_FROM_SOURCE", "NumberBFramesBetweenReferenceFrames": 2, "RepeatPps": "DISABLED" } }, "AfdSignaling": "NONE", "DropFrameTimecode": "ENABLED", "RespondToAfd": "NONE", "ColorMetadata": "INSERT" }, "AudioDescriptions": [ { "AudioTypeControl": "FOLLOW_INPUT", "CodecSettings": { "Codec": "AAC", "AacSettings": { "AudioDescriptionBroadcasterMix": "NORMAL", "Bitrate": 96000, "RateControlMode": "CBR", "CodecProfile": "LC", "CodingMode": "CODING_MODE_2_0", "RawFormat": "NONE", "SampleRate": 48000, "Specification": "MPEG4" } }, "LanguageCodeControl": "FOLLOW_INPUT" } ] } ], "OutputGroupSettings": { "Type": "FILE_GROUP_SETTINGS", "FileGroupSettings": { "Destination": "s3://<MEDIABUCKET>/MP4/" } } }, { "CustomName": "Thumbnails", "Name": "File Group", "Outputs": [ { "ContainerSettings": { "Container": "RAW" }, "VideoDescription": { "Width": 1280, "ScalingBehavior": "DEFAULT", "Height": 720, "TimecodeInsertion": "DISABLED", "AntiAlias": "ENABLED", "Sharpness": 50, "CodecSettings": { "Codec": "FRAME_CAPTURE", "FrameCaptureSettings": { "FramerateNumerator": 1, "FramerateDenominator": 5, "MaxCaptures": 5, "Quality": 100 } }, "AfdSignaling": "NONE", "DropFrameTimecode": "ENABLED", "RespondToAfd": "NONE", "ColorMetadata": "INSERT" } } ], "OutputGroupSettings": { "Type": "FILE_GROUP_SETTINGS", "FileGroupSettings": { "Destination": "s3://<MEDIABUCKET>/Thumbnails/" } } } ], "AdAvailOffset": 0, "Inputs": [ { "AudioSelectors": { "Audio Selector 1": { "Offset": 0, "DefaultSelection": "DEFAULT", "ProgramSelection": 1 } }, "VideoSelector": { "ColorSpace": "FOLLOW" }, "FilterEnable": "AUTO", "PsiControl": "USE_PSI", "FilterStrength": 0, "DeblockFilter": "DISABLED", "DenoiseFilter": "DISABLED", "TimecodeSource": "EMBEDDED", "FileInput": "s3://rodeolabz-us-west-2/vodconsole/VANLIFE.m2ts" } ] } 

文章来源: Multiple statements based on video conditions using AWS MediaConvert and AWS Lambda
标签: python lambda