stage1 done, exported old lambda as yaml

This commit is contained in:
Kaushik Narayan R 2024-04-25 12:07:48 -07:00
parent b582f8ffb7
commit 11cbf121b6
8 changed files with 274 additions and 0 deletions

View File

@ -0,0 +1,77 @@
# This AWS SAM template has been generated from your function's configuration. If
# your function has one or more triggers, note that the AWS resources associated
# with these triggers aren't fully specified in this template and include
# placeholder values. Open this template in AWS Application Composer or your
# favorite IDE and modify it to specify a serverless application with other AWS
# resources.
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Description: An AWS Serverless Application Model template describing your function.
Resources:
p1videosplitting:
Type: AWS::Serverless::Function
Properties:
CodeUri: .
Description: ''
MemorySize: 512
Timeout: 60
Architectures:
- x86_64
EphemeralStorage:
Size: 512
EventInvokeConfig:
MaximumEventAgeInSeconds: 21600
MaximumRetryAttempts: 2
ImageUri: >-
146064153251.dkr.ecr.us-east-1.amazonaws.com/546-proj2-p1@sha256:f3e8b79ffa429bfeb24b7b3a1786bb206dd89c2c7f01f3ff9fbb0d1379c9e238
PackageType: Image
Policies:
- Statement:
- Effect: Allow
Action:
- logs:PutLogEvents
- logs:CreateLogGroup
- logs:CreateLogStream
Resource: arn:aws:logs:*:*:*
- Effect: Allow
Action:
- s3:GetObject
Resource: arn:aws:s3:::*/*
- Effect: Allow
Action:
- s3:PutObject
Resource: arn:aws:s3:::*/*
SnapStart:
ApplyOn: None
Events:
BucketEvent1:
Type: S3
Properties:
Bucket:
Ref: Bucket1
Events:
- s3:ObjectCreated:*
Bucket1:
Type: AWS::S3::Bucket
Properties:
VersioningConfiguration:
Status: Enabled
BucketEncryption:
ServerSideEncryptionConfiguration:
- ServerSideEncryptionByDefault:
SSEAlgorithm: AES256
BucketPolicy1:
Type: AWS::S3::BucketPolicy
Properties:
Bucket: Bucket1
PolicyDocument:
Statement:
- Action: s3:*
Effect: Deny
Principal: '*'
Resource:
- arn:aws:s3:::Bucket1/*
- arn:aws:s3:::Bucket1
Condition:
Bool:
aws:SecureTransport: false

View File

@ -0,0 +1,53 @@
#__copyright__ = "Copyright 2024, VISA Lab"
#__license__ = "MIT"
# Define global args
ARG FUNCTION_DIR="/home/app/"
ARG RUNTIME_VERSION="3.8"
ARG DISTRO_VERSION="3.12"
FROM alpine:latest
FROM python:${RUNTIME_VERSION} AS python-alpine
RUN python${RUNTIME_VERSION} -m pip install --upgrade pip
FROM python-alpine AS build-image
# Include global args in this stage of the build
ARG FUNCTION_DIR
ARG RUNTIME_VERSION
# Create function directory
RUN mkdir -p ${FUNCTION_DIR}
# Install Lambda Runtime Interface Client for Python
RUN python${RUNTIME_VERSION} -m pip install awslambdaric --target ${FUNCTION_DIR}
# Stage 3 - final runtime image
# Grab a fresh copy of the Python image
FROM python-alpine
# Include global arg in this stage of the build
ARG FUNCTION_DIR
# Set working directory to function root directory
WORKDIR ${FUNCTION_DIR}
# Copy in the built dependencies
COPY --from=build-image ${FUNCTION_DIR} ${FUNCTION_DIR}
# (Optional) Add Lambda Runtime Interface Emulator and use a script in the ENTRYPOINT for simpler local runs
ADD https://github.com/aws/aws-lambda-runtime-interface-emulator/releases/latest/download/aws-lambda-rie /usr/bin/aws-lambda-rie
RUN chmod 755 /usr/bin/aws-lambda-rie
# Install ffmpeg
RUN apt-get update
RUN apt-get install -y ffmpeg
# Install other dependencies
COPY requirements.txt ${FUNCTION_DIR}
RUN python${RUNTIME_VERSION} -m pip install -r requirements.txt --target ${FUNCTION_DIR}
# Copy function code
COPY entry.sh /
COPY handler.py video_splitting_cmdline.py dummy_s3_trigger_event.json ${FUNCTION_DIR}
RUN chmod 777 /entry.sh
# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
ENTRYPOINT [ "/entry.sh" ]
CMD [ "handler.handler" ]

View File

@ -0,0 +1,38 @@
{
"Records":[
{
"eventVersion":"2.0",
"eventSource":"aws:s3",
"awsRegion":"us-east-1",
"eventTime":"1970-01-01T00:00:00.000Z",
"eventName":"ObjectCreated:Put",
"userIdentity":{
"principalId":"EXAMPLE"
},
"requestParameters":{
"sourceIPAddress":"127.0.0.1"
},
"responseElements":{
"x-amz-request-id":"EXAMPLE123456789",
"x-amz-id-2":"EXAMPLE123/5678abcdefghijklambdaisawesome/mnopqrstuvwxyzABCDEFGH"
},
"s3":{
"s3SchemaVersion":"1.0",
"configurationId":"testConfigRule",
"bucket":{
"name":"1229569564-input",
"ownerIdentity":{
"principalId":"EXAMPLE"
},
"arn":"arn:aws:s3:::1229569564-input"
},
"object":{
"key":"jellyfish_jam.mp4",
"size":1024,
"eTag":"0123456789abcdef0123456789abcdef",
"sequencer":"0A1B2C3D4E5F678901"
}
}
}
]
}

View File

@ -0,0 +1,6 @@
#!/bin/sh
if [ -z "${AWS_LAMBDA_RUNTIME_API}" ]; then
exec /usr/bin/aws-lambda-rie /usr/local/bin/python -m awslambdaric $1
else
exec /usr/local/bin/python -m awslambdaric $1
fi

View File

@ -0,0 +1,56 @@
import json
import urllib.parse
import boto3
from video_splitting_cmdline import video_splitting_cmdline
print("Loading function")
# attach execution policies and IAM roles in deployment lambda
sesh = boto3.Session()
s3_client = sesh.client("s3", region_name="us-east-1")
lambda_client = sesh.client("lambda", region_name="us-east-1")
def handler(event, context):
for record in event["Records"]:
# get uploaded object
in_bucket = record["s3"]["bucket"]["name"]
if in_bucket != "1229569564-input":
continue
key = urllib.parse.unquote_plus(record["s3"]["object"]["key"], encoding="utf-8")
tmpkey = key.replace("/", "")
download_path = "/tmp/{}".format(tmpkey)
s3_client.download_file(in_bucket, key, download_path)
# process it
out_file = video_splitting_cmdline(download_path)
# upload output object
s3_client.upload_file(
"/tmp/" + out_file,
"1229569564-stage-1",
out_file,
)
# invoke face recognition lambda
# https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html
# (note: better to use SNS or some dedicated event handling mechanism rather than direct invocation?)
invocation_params = {
"bucket_name": "1229569564-stage-1",
"image_file_name": out_file,
}
invoke_response = lambda_client.invoke(
FunctionName="face-recognition",
InvocationType="Event",
Payload=json.dumps(invocation_params),
)
print(invoke_response)
return
if __name__ == "__main__":
with open("dummy_s3_trigger_event.json", "r") as dummy_event:
event = json.loads(dummy_event.read())
handler(event, None)

View File

@ -0,0 +1 @@
boto3

View File

@ -0,0 +1,27 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:PutLogEvents",
"logs:CreateLogGroup",
"logs:CreateLogStream"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:PutObject"
],
"Resource": "arn:aws:s3:::*/*"
},
{
"Effect": "Allow",
"Action": "lambda:InvokeFunction",
"Resource": "*"
}
]
}

View File

@ -0,0 +1,16 @@
import os
import subprocess
def video_splitting_cmdline(video_filename):
filename = os.path.basename(video_filename)
outfile = os.path.splitext(filename)[0] + ".jpg"
split_cmd = "ffmpeg -y -i " + video_filename + " -vframes 1 " + "/tmp/" + outfile
try:
subprocess.check_call(split_cmd, shell=True)
except subprocess.CalledProcessError as e:
print(e.returncode)
print(e.output)
return outfile