From a61ef9b8e5662e8cee3afc810541c506267cc07f Mon Sep 17 00:00:00 2001 From: metaskills Date: Sun, 23 Jun 2024 23:00:23 +0000 Subject: [PATCH] deploy: 95a01a0821b517907e995c9c4b65b10094de5698 --- 404.html | 4 ++-- assets/js/{85255e3d.b55ae0f8.js => 85255e3d.c5fee749.js} | 2 +- .../{runtime~main.58179dd5.js => runtime~main.bf6fb0cb.js} | 2 +- blog.html | 4 ++-- .../goodbye-cold-starts-hello-proactive-initilizations.html | 4 ++-- blog/archive.html | 4 ++-- blog/tags.html | 4 ++-- blog/tags/cold-starts.html | 4 ++-- blog/tags/console.html | 4 ++-- blog/tags/container.html | 4 ++-- blog/tags/containers.html | 4 ++-- blog/tags/extension.html | 4 ++-- blog/tags/initialization.html | 4 ++-- blog/tags/interaction.html | 4 ++-- blog/tags/lambda.html | 4 ++-- blog/tags/rails.html | 4 ++-- blog/tags/runner.html | 4 ++-- blog/tags/specification.html | 4 ++-- blog/tags/tailscale.html | 4 ++-- blog/tags/tasks.html | 4 ++-- blog/tags/websockets.html | 4 ++-- blog/tailscale-extension-for-lambda-containers.html | 4 ++-- ...the-elusive-lambda-console-a-specification-proposal.html | 4 ++-- blog/welcome-to-lamby-v4.html | 4 ++-- docs/activejob.html | 4 ++-- docs/anatomy.html | 4 ++-- docs/assets.html | 4 ++-- docs/cold-starts.html | 6 +++--- docs/cpu.html | 4 ++-- docs/custom-domain.html | 4 ++-- docs/database.html | 4 ++-- docs/environment.html | 4 ++-- docs/observability.html | 4 ++-- docs/quick-start.html | 4 ++-- docs/running-tasks.html | 4 ++-- docs/webservers.html | 4 ++-- index.html | 4 ++-- markdown-page.html | 4 ++-- 38 files changed, 75 insertions(+), 75 deletions(-) rename assets/js/{85255e3d.b55ae0f8.js => 85255e3d.c5fee749.js} (98%) rename assets/js/{runtime~main.58179dd5.js => runtime~main.bf6fb0cb.js} (99%) diff --git a/404.html b/404.html index 5eebf23..132424a 100644 --- a/404.html +++ b/404.html @@ -5,13 +5,13 @@ Page Not Found | Lamby - Simple Rails & AWS Lambda Integration using Rack - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/85255e3d.b55ae0f8.js b/assets/js/85255e3d.c5fee749.js similarity index 98% rename from assets/js/85255e3d.b55ae0f8.js rename to assets/js/85255e3d.c5fee749.js index 0a0ebfc..b589bc1 100644 --- a/assets/js/85255e3d.b55ae0f8.js +++ b/assets/js/85255e3d.c5fee749.js @@ -1 +1 @@ -"use strict";(self.webpackChunklamby=self.webpackChunklamby||[]).push([[3278],{3905:(e,t,a)=>{a.d(t,{Zo:()=>d,kt:()=>h});var n=a(7294);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function r(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var l=n.createContext({}),c=function(e){var t=n.useContext(l),a=t;return e&&(a="function"==typeof e?e(t):r(r({},t),e)),a},d=function(e){var t=c(e.components);return n.createElement(l.Provider,{value:t},e.children)},p="mdxType",u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var a=e.components,o=e.mdxType,i=e.originalType,l=e.parentName,d=s(e,["components","mdxType","originalType","parentName"]),p=c(a),m=o,h=p["".concat(l,".").concat(m)]||p[m]||u[m]||i;return a?n.createElement(h,r(r({ref:t},d),{},{components:a})):n.createElement(h,r({ref:t},d))}));function h(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var i=a.length,r=new Array(i);r[0]=m;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[p]="string"==typeof e?e:o,r[1]=s;for(var c=2;c{a.d(t,{Z:()=>i});var n=a(7294);const o={anatomy:"How Lamby Works",cpu:"CPU Architecture",environment:"ENV Variables & Secrets",assets:"JavaScript & Assets",deploy:"Build & Deploy","custom-domain":"Custom Domain Names",activejob:"ActiveJob & Background Processing",observability:"Logging & Observability",database:"Database & VPCs",webservers:"Web Proxy Integrations"};function i(e){let{id:t,name:a,anchor:i}=e;const r=a||o[t]||t.replace(/(_|-)/g," ").split(" ").map((e=>e.charAt(0).toUpperCase()+e.toLowerCase().slice(1))).join(" ");return i?n.createElement("a",{href:`/docs/${t}#${i}`},r):n.createElement("a",{href:`/docs/${t}`},r)}},3442:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>p,contentTitle:()=>c,default:()=>h,frontMatter:()=>l,metadata:()=>d,toc:()=>u});var n=a(7462),o=(a(7294),a(3905)),i=a(304),r=a(941),s=a(4996);const l={id:"cold-starts",toc_max_heading_level:2},c="Cold Starts",d={unversionedId:"cold-starts",id:"cold-starts",title:"Cold Starts",description:"Cold starts (or init times) are an incredibly addictive topic. In many cases they can be ignored as an optimization to perform when the time and data suggests action. In practice, the more traffic your function handles the less likely cold starts are an issue since they statistically disappear under the 99th percentile. However in rare cases, you may want to optimize for them. This guide can help you make decisions on how to go about it. It also descibes how AWS may be doing this for you already with Proactive Initialization.",source:"@site/docs/cold-starts.mdx",sourceDirName:".",slug:"/cold-starts",permalink:"/docs/cold-starts",draft:!1,editUrl:"https://github.com/rails-lambda/lamby-site/tree/master/docs/cold-starts.mdx",tags:[],version:"current",frontMatter:{id:"cold-starts",toc_max_heading_level:2},sidebar:"docsSidebar",previous:{title:"Web Proxy Integrations",permalink:"/docs/webservers"}},p={},u=[{value:"Monitoring with CloudWatch",id:"monitoring-with-cloudwatch",level:2},{value:"Proactive Initialization",id:"proactive-initialization",level:2},{value:"Bootsnap by Shopify",id:"bootsnap-by-shopify",level:2},{value:"Other Cold Start Factors",id:"other-cold-start-factors",level:2},{value:"Provisioned Concurrency",id:"provisioned-concurrency",level:2},{value:"Requirements",id:"requirements",level:3},{value:"Auto Scaling",id:"auto-scaling",level:3},{value:"Using a Schedule",id:"using-a-schedule",level:3},{value:"Concurrency CloudWatch Metrics",id:"concurrency-cloudwatch-metrics",level:3},{value:"Gradual Deployments",id:"gradual-deployments",level:2}],m={toc:u};function h(e){let{components:t,...a}=e;return(0,o.kt)("wrapper",(0,n.Z)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,o.kt)("h1",{id:"cold-starts"},"Cold Starts"),(0,o.kt)("p",null,"Cold starts (or init times) are an ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/lambda/latest/dg/runtimes-context.html#runtimes-lifecycle"},"incredibly addictive")," topic. In many cases they can be ignored as an optimization to perform when the time and data suggests action. In practice, the more traffic your function handles the less likely cold starts are an issue since they statistically disappear under the ",(0,o.kt)("a",{parentName:"p",href:"https://aws.amazon.com/blogs/aws/amazon-cloudwatch-update-percentile-statistics-and-new-dashboard-widgets/"},"99th percentile"),". However in rare cases, you may want to optimize for them. This guide can help you make decisions on how to go about it. It also descibes how AWS may be doing this for you already with ",(0,o.kt)("a",{parentName:"p",href:"#proactive-initialization"},"Proactive Initialization"),"."),(0,o.kt)("admonition",{type:"info"},(0,o.kt)("p",{parentName:"admonition"},"Modest sized Rails applications generally boot within 3 to 5 seconds. This happens exactly once for the duration of the function's lifecycle which could last for 30 minutes or more and service a huge amount of traffic with no latency.")),(0,o.kt)("h2",{id:"monitoring-with-cloudwatch"},"Monitoring with CloudWatch"),(0,o.kt)("p",null,"You can not optimize what you do not measure. Thankfully, AWS Lambda logs initialization time of your function to CloudWatch logs which you can query using ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html"},"CloudWatch Insights"),"."),(0,o.kt)("p",null,"This query below will give you a nice percentile breakdown for your application's init duration which is the code outside the handler method. Feel free to change the bin bucket from 1 hour to whatever time helps you. For example, using ",(0,o.kt)("inlineCode",{parentName:"p"},"1d")," (1 day) over a longer duration (weeks) allows you to see statistical trends. In general, your ",(0,o.kt)("inlineCode",{parentName:"p"},"p50")," should be under 5 seconds."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-coffee"},"fields @initDuration\n| filter ispresent(@initDuration)\n| stats pct(@initDuration, 5) as p5,\n pct(@initDuration, 50) as p50,\n pct(@initDuration, 95) as p95,\n pct(@initDuration, 99) as p99\n by bin(1h)\n")),(0,o.kt)(r.Z,{alt:"Rails cold start data from CloudWatch Insights. Shows percentiles for p5, p50, p95, and p99.",sources:{light:(0,s.Z)("/img/docs/cold-start-cloudwatch-insights-percentiles.png"),dark:(0,s.Z)("/img/docs/cold-start-cloudwatch-insights-percentiles-dark.png")},mdxType:"ThemedImage"}),(0,o.kt)("admonition",{type:"info"},(0,o.kt)("p",{parentName:"admonition"},"See the ",(0,o.kt)("a",{parentName:"p",href:"#proactive-initialization"},"Proactive Initialization")," section for more details on how to use Lamby's new CloudWatch Metrics to measure both cold starts and proactive initialization.")),(0,o.kt)("h2",{id:"proactive-initialization"},"Proactive Initialization"),(0,o.kt)("p",null,"As described in ",(0,o.kt)("a",{parentName:"p",href:"https://twitter.com/astuyve"},"AJ Stuyvenberg's")," post on the topic ",(0,o.kt)("a",{parentName:"p",href:"https://aaronstuyvenberg.com/posts/understanding-proactive-initialization"},"Understanding AWS Lambda Proactive Initialization"),", AWS Lambda may have solved some of your cold start issues for you since March 2023. Stated in an excerpt ",(0,o.kt)("a",{parentName:"p",href:"https://aaronstuyvenberg.com/posts/understanding-proactive-initialization"},"from AWS' docs"),":"),(0,o.kt)("blockquote",null,(0,o.kt)("p",{parentName:"blockquote"},"For functions using unreserved (on-demand) concurrency, Lambda occasionally pre-initializes execution environments to reduce the number of cold start invocations. For example, Lambda might initialize a new execution environment to replace an execution environment that is about to be shut down. If a pre-initialized execution environment becomes available while Lambda is initializing a new execution environment to process an invocation, Lambda can use the pre-initialized execution environment.")),(0,o.kt)("p",null,"This means the ",(0,o.kt)("a",{parentName:"p",href:"#monitoring-with-cloudwatch"},"Monitoring with CloudWatch")," is just half the picture. But how much is your application potentially benefiting from proactive inits? Since ",(0,o.kt)("a",{parentName:"p",href:"https://github.com/rails-lambda/lamby/pull/169"},"Lamby v5.1.0"),", you can now find out easily using CloudWatch Metrics. To turn metrics on, enable the config like so:"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-rails",metastring:'title="config/environments/production.rb"',title:'"config/environments/production.rb"'},"config.lamby.cold_start_metrics = true\n")),(0,o.kt)("p",null,"Lamby will now publish ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format.html"},"CloudWatch Embedded Metrics")," in the ",(0,o.kt)("inlineCode",{parentName:"p"},"Lamby")," namespace with a custom dimension for each application's name. Captured metrics include counts for Cold Starts vs. Proactive Initializations. Here is an example running sum of 3 days of data for a large Rails application in the ",(0,o.kt)("inlineCode",{parentName:"p"},"us-east-1")," region."),(0,o.kt)(r.Z,{alt:"Rails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill Invokes",sources:{light:(0,s.Z)("/img/docs/lamby-cloud-watch-metrics-cold-start-v-proactive-init-light.png"),dark:(0,s.Z)("/img/docs/lamby-cloud-watch-metrics-cold-start-v-proactive-init-dark.png")},mdxType:"ThemedImage"}),(0,o.kt)("p",null,"This data shows the vast majority of your initialized Lambda Contaienrs are proactively initialized. Hence, no cold starts are felt by end users or consumers of your function. If you need to customize the name of your Rails application in the CloudWatch Metrics dimension, you can do so using this config."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-rails",metastring:'title="config/environments/production.rb"',title:'"config/environments/production.rb"'},"config.lamby.metrics_app_name = 'MyServiceName'\n")),(0,o.kt)("h2",{id:"bootsnap-by-shopify"},"Bootsnap by Shopify"),(0,o.kt)("p",null,"Reducing your Rails applications boot time should be your first optimization option against true cold starts. ",(0,o.kt)("a",{parentName:"p",href:"https://github.com/Shopify/bootsnap"},"Bootsnap")," has been developed by Shopify to speed up Rails boot time for production environments using a mix of compile and load path caches. When complete, your deployed container will have everything it needs to boot faster!"),(0,o.kt)("p",null,"How much faster? Generally 1 to 3 seconds depending on your Lambda application. Adding Bootsnap to your Rails Lambda application is straightforward. First, add the gem to your production group in your ",(0,o.kt)("inlineCode",{parentName:"p"},"Gemfile"),"."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-ruby",metastring:'title="Gemfile"',title:'"Gemfile"'},"group :production do\n gem 'bootsnap'\nend\n")),(0,o.kt)("p",null,"Next, we need to add the Bootsnap caches with your deployed container. Add these lines to your project's ",(0,o.kt)("inlineCode",{parentName:"p"},"Dockerfile")," after your ",(0,o.kt)("inlineCode",{parentName:"p"},"COPY . .")," declaration. It will run two commands. The first is the standard Bootsnap precompile which builds both the Ruby ISeq & YAML caches. The second line loads your application into memory and thus automatically creates the ",(0,o.kt)("inlineCode",{parentName:"p"},"$LOAD_PATH")," cache."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-dockerfile",metastring:'title="Dockerfile"',title:'"Dockerfile"'},"ENV BOOTSNAP_CACHE_DIR=/var/task/tmp/cache\nRUN bundle exec bootsnap precompile --gemfile . \\\n && bundle exec ruby config/environment.rb\n")),(0,o.kt)("p",null,"Afterward you should be able to verify that Bootsnap's caches are working. Measure your cold starts using a 1 day stats duration for better long term visibility."),(0,o.kt)("h2",{id:"other-cold-start-factors"},"Other Cold Start Factors"),(0,o.kt)("p",null,"Most of these should be considered before using ",(0,o.kt)("a",{parentName:"p",href:"#provisioned-concurrency"},"Provisioned Concurrency"),". Also note, that ",(0,o.kt)("a",{parentName:"p",href:"#proactive-initialization"},"Proactive Initialization")," may be masking some of these optimizations for you already. That said, consider the following:"),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"Client Connect Timeouts")," - Your Lambda application may be used by clients who have a low ",(0,o.kt)("a",{parentName:"p",href:"https://ruby-doc.org/stdlib/libdoc/net/http/rdoc/Net/HTTP.html#open_timeout-attribute-method"},"http open timeout"),". If this is the case, you may have to increase client timeouts, leverage provisioned concurrency, and/or reduce initialization time."),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"Update Ruby")," - New versions of Ruby typically boot and run faster. Since our ",(0,o.kt)(i.Z,{id:"quick-start",name:"cookiecutter",mdxType:"DocLink"})," project uses custom Ruby Ubuntu with Lambda containers, updating Ruby should be as easy as changing a few lines of code."),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"Memory & vCPU")," - It has been proposed that increased Memory/vCPU could reduce cold starts. We have not seen any evidence of this. For example, we recommend that Rails functions use ",(0,o.kt)("inlineCode",{parentName:"p"},"1792")," for its ",(0,o.kt)("inlineCode",{parentName:"p"},"MemorySize")," equal to 1 vCPU. Any lower would sacrifice response times. Tests showed that increasing this to ",(0,o.kt)("inlineCode",{parentName:"p"},"3008")," equal to 2 vCPUs did nothing for a basic Rails application but cost more. However, if your function does concurrent work doing initialization, consider testing different values here."),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"Lazy DB/Resource Connections"),' - Rails is really good at lazy loading database connections. This is important to keep the "Init" phase of the ',(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/lambda/latest/dg/runtimes-context.html#runtimes-lifecycle"},"Lambda execution lifecycle"),' quick and under 10s. This allows the first "Invoke" to connect to other resources. To keep init duration low, make sure your application does not eagerly connect to resources. Both ActiveRecord and Memcached w/Dalli are lazy loaded by default.'),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"ActiveRecord Schema Cache")," - Commonly called Rails' best kept performance feature, the ",(0,o.kt)("a",{parentName:"p",href:"https://kirshatrov.com/2016/12/13/schema-cache/"},"schema cache")," can help reduce first request response time after Rails is initialized. So it should not help the init time but it could very easily help the first invoke times."),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"Reduce Image Size")," - Sort of related to your Ruby version, always make sure that your ECR image is as small as possible. Lambda Containers supports up to 10GB for your image. There is no data on how much this could effect cold starts. So please ",(0,o.kt)("a",{parentName:"p",href:"https://github.com/rails-lambda/lamby/discussions"},"share your stories"),"."),(0,o.kt)("h2",{id:"provisioned-concurrency"},"Provisioned Concurrency"),(0,o.kt)("admonition",{type:"caution"},(0,o.kt)("p",{parentName:"admonition"},"Provisioned concurrency comes with additional execution costs. Now that we have ",(0,o.kt)("a",{parentName:"p",href:"#proactive-initialization"},"Proactive Initialization")," it may never be needed.")),(0,o.kt)("p",null,"AWS provides an option called ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/lambda/latest/dg/configuration-concurrency.html"},"Provisioned Concurrency")," (PC) which allows you to warm instances prior to receiving requests. This lets you execute Lambda functions with super low latency and no cold starts. Besides setting a static PC value, there are two fundamental methods for scaling with Provisioned Concurrency. Please use the ",(0,o.kt)("a",{parentName:"p",href:"#concurrency-cloudwatch-metrics"},"Concurrency CloudWatch Metrics")," section to help you make a determination on what method is right for you."),(0,o.kt)("h3",{id:"requirements"},"Requirements"),(0,o.kt)("p",null,"Our ",(0,o.kt)(i.Z,{id:"quick-start",mdxType:"DocLink"})," cookiecutter includes both an ",(0,o.kt)("inlineCode",{parentName:"p"},"AutoPublishAlias")," and an all at once ",(0,o.kt)("inlineCode",{parentName:"p"},"DeploymentPreference"),'. The publish alias is needed for provisioned concurrency. You can read about both in AWS "',(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/automating-updates-to-serverless-apps.html"},"Deploying serverless applications gradually"),"\" guide. The code snippets below assume your function's logical resource is ",(0,o.kt)("inlineCode",{parentName:"p"},"RailsLambda")," and you have an alias named ",(0,o.kt)("inlineCode",{parentName:"p"},"live"),"."),(0,o.kt)("h3",{id:"auto-scaling"},"Auto Scaling"),(0,o.kt)("p",null,"Here we are creating an ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html"},(0,o.kt)("inlineCode",{parentName:"a"},"AWS::AutoScaling::ScalingPolicy"))," and a ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-applicationautoscaling-scalabletarget.html"},(0,o.kt)("inlineCode",{parentName:"a"},"AWS::ApplicationAutoScaling::ScalableTarget"))," which effectively creates a managed CloudWatch Rule that monitors your application to scale it up and down as needed. In this example we set a maximum of ",(0,o.kt)("inlineCode",{parentName:"p"},"40")," and minimal of ",(0,o.kt)("inlineCode",{parentName:"p"},"5")," provisioned instances. We have a ",(0,o.kt)("inlineCode",{parentName:"p"},"TargetValue")," of ",(0,o.kt)("inlineCode",{parentName:"p"},"0.4")," which is a percentage of provisioned concurrency to trigger the CloudWatch Rules via the ",(0,o.kt)("inlineCode",{parentName:"p"},"ProvisionedConcurrencyUtilization")," metric. In this case, lower equals a more aggressive scaling strategy."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-yaml",metastring:'title="template.yaml"',title:'"template.yaml"'},"Resources:\n RailsLambda:\n # ...\n Properties:\n ProvisionedConcurrencyConfig:\n ProvisionedConcurrentExecutions: 5\n\n RailsScalableTarget:\n Type: AWS::ApplicationAutoScaling::ScalableTarget\n Properties:\n MaxCapacity: 40\n MinCapacity: 5\n ResourceId: !Sub function:${RailsLambda}:live\n RoleARN: !Sub arn:aws:iam::${AWS::AccountId}:role/aws-service-role/lambda.application-autoscaling.amazonaws.com/AWSServiceRoleForApplicationAutoScaling_LambdaConcurrency\n ScalableDimension: lambda:function:ProvisionedConcurrency\n ServiceNamespace: lambda\n DependsOn: RailsLambdaAliaslive\n\n RailsScalingPolicy:\n Type: AWS::ApplicationAutoScaling::ScalingPolicy\n Properties:\n PolicyName: utilization\n PolicyType: TargetTrackingScaling\n ScalingTargetId: !Ref RailsScalableTarget\n TargetTrackingScalingPolicyConfiguration:\n TargetValue: 0.4\n PredefinedMetricSpecification:\n PredefinedMetricType: LambdaProvisionedConcurrencyUtilization\n")),(0,o.kt)("p",null,"Please read this related article. ",(0,o.kt)("a",{parentName:"p",href:"https://georgemao.medium.com/understanding-lambda-provisioned-concurrency-autoscaling-735eb14040cf"},"Lambda Provisioned Concurrency AutoScaling is Awesome. Make sure you understand how it works!")," It goes into great detail on how short traffic bursts (common for most of us) can be missed by the standard CloudWatch Alarms and possible remediation to scale up."),(0,o.kt)("h3",{id:"using-a-schedule"},"Using a Schedule"),(0,o.kt)("p",null,"In this example we have measured via CloudWatch Metrics (image above) that our concurrent executions never really goes past ",(0,o.kt)("inlineCode",{parentName:"p"},"40")," instances during daytime peak usage. In this case to totally remove cold starts from a small percentage of requests we can draw a big virtual box around the curves above to always keep ",(0,o.kt)("inlineCode",{parentName:"p"},"40")," instances warm during our peak times starting at 6am EST and going back down to ",(0,o.kt)("inlineCode",{parentName:"p"},"0")," Provisioned Concurrency at 11PM EST. Here is how we would do that with a Provisioned Concurrency schedule."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-yaml",metastring:'title="template.yaml"',title:'"template.yaml"'},'Resources:\n RailsScalableTarget:\n Type: AWS::ApplicationAutoScaling::ScalableTarget\n Properties:\n MaxCapacity: 0\n MinCapacity: 0\n ResourceId: !Sub function:${RailsLambda}:live\n RoleARN: !Sub arn:aws:iam::${AWS::AccountId}:role/aws-service-role/lambdaapplication-autoscaling. amazonaws.com/AWSServiceRoleForApplicationAutoScaling_LambdaConcurrency\n ScalableDimension: lambda:function:ProvisionedConcurrency\n ServiceNamespace: lambda\n ScheduledActions:\n - ScalableTargetAction:\n MaxCapacity: 0\n MinCapacity: 0\n ScheduledActionName: ScaleDown\n Schedule: "cron(0 3 * * ? *)"\n - ScalableTargetAction:\n MaxCapacity: 40\n MinCapacity: 40\n ScheduledActionName: ScaleUp\n Schedule: "cron(0 10 * * ? *)"\n DependsOn: RailsLambdaAliaslive\n')),(0,o.kt)("h3",{id:"concurrency-cloudwatch-metrics"},"Concurrency CloudWatch Metrics"),(0,o.kt)("p",null,"The graphs below were made using the following managed AWS Lambda CloudWatch Metrics. Please make sure to use your deploy alias of ",(0,o.kt)("inlineCode",{parentName:"p"},":live")," when targeting your functions resource in these reports."),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("inlineCode",{parentName:"li"},"ConcurrentExecutions")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("inlineCode",{parentName:"li"},"ProvisionedConcurrentExecutions")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("inlineCode",{parentName:"li"},"ProvisionedConcurrencySpilloverInvocations"))),(0,o.kt)("p",null,"This chart shows that a static ",(0,o.kt)("inlineCode",{parentName:"p"},"ProvisionedConcurrentExecutions")," of ",(0,o.kt)("inlineCode",{parentName:"p"},"5")," can handle most invocations for the first 3 days. Later, for the remaining 4 days, auto scaling was added with a ",(0,o.kt)("inlineCode",{parentName:"p"},"TargetValue")," of ",(0,o.kt)("inlineCode",{parentName:"p"},"0.4"),". Because of the workload's spiky nature, the Invocations look almost 100% provisioned. However, the concurrent executions show otherwise."),(0,o.kt)(r.Z,{alt:"Rails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill Invokes",sources:{light:(0,s.Z)("/img/docs/cold-start-concurrency.png"),dark:(0,s.Z)("/img/docs/cold-start-concurrency-dark.png")},mdxType:"ThemedImage"}),(0,o.kt)("p",null,"Here is a 7 day view from the 4 day mark above. The ",(0,o.kt)("inlineCode",{parentName:"p"},"TargetValue")," is still set to ",(0,o.kt)("inlineCode",{parentName:"p"},"0.4"),". It illustrates how the default CloudWatch Rule for ",(0,o.kt)("inlineCode",{parentName:"p"},"ProvisionedConcurrencyUtilization")," metrics over a 3 minute span are not quick enough to scale PC. It is possible to use a ",(0,o.kt)("inlineCode",{parentName:"p"},"TargetValue")," of ",(0,o.kt)("inlineCode",{parentName:"p"},"0.1")," to force the PC lines to meet the blue. But your cost at this point would be unrealistically high."),(0,o.kt)(r.Z,{alt:"Rails in Lambda Concurrent Executions and Provisioned Executions",sources:{light:(0,s.Z)("/img/docs/cold-start-concurrency-vs-spilled.png"),dark:(0,s.Z)("/img/docs/cold-start-concurrency-vs-spilled-dark.png")},mdxType:"ThemedImage"}),(0,o.kt)("h2",{id:"gradual-deployments"},"Gradual Deployments"),(0,o.kt)("p",null,"As mentioned in the ",(0,o.kt)("a",{parentName:"p",href:"#provisioned-concurrency"},"Provisioned Concurrency")," section we use a simple ",(0,o.kt)("inlineCode",{parentName:"p"},"DeploymentPreference")," value called ",(0,o.kt)("inlineCode",{parentName:"p"},"AllAtOnce"),". When a deploy happens, Lambda will need to download your new ECR image before your application is initialized. In certain high traffic scenarios along with a potentially slow loading application, deploys can be a thundering herd effect causing your concurrency to spike and a small percentage of users having longer response times."),(0,o.kt)("p",null,"Please see AWS' \"",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/automating-updates-to-serverless-apps.html"},"Deploying serverless applications gradually"),'" guide for full details but one way to soften this would be to roll out your new code in 10 minutes total via the ',(0,o.kt)("inlineCode",{parentName:"p"},"Linear10PercentEvery1Minute")," deployment preference. This will automatically create a ",(0,o.kt)("a",{parentName:"p",href:"https://aws.amazon.com/codedeploy/"},"AWS CodeDeploy")," application and deployments for you. So cool!"))}h.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunklamby=self.webpackChunklamby||[]).push([[3278],{3905:(e,t,a)=>{a.d(t,{Zo:()=>d,kt:()=>h});var n=a(7294);function o(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function r(e){for(var t=1;t=0||(o[a]=e[a]);return o}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(o[a]=e[a])}return o}var l=n.createContext({}),c=function(e){var t=n.useContext(l),a=t;return e&&(a="function"==typeof e?e(t):r(r({},t),e)),a},d=function(e){var t=c(e.components);return n.createElement(l.Provider,{value:t},e.children)},p="mdxType",u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var a=e.components,o=e.mdxType,i=e.originalType,l=e.parentName,d=s(e,["components","mdxType","originalType","parentName"]),p=c(a),m=o,h=p["".concat(l,".").concat(m)]||p[m]||u[m]||i;return a?n.createElement(h,r(r({ref:t},d),{},{components:a})):n.createElement(h,r({ref:t},d))}));function h(e,t){var a=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var i=a.length,r=new Array(i);r[0]=m;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[p]="string"==typeof e?e:o,r[1]=s;for(var c=2;c{a.d(t,{Z:()=>i});var n=a(7294);const o={anatomy:"How Lamby Works",cpu:"CPU Architecture",environment:"ENV Variables & Secrets",assets:"JavaScript & Assets",deploy:"Build & Deploy","custom-domain":"Custom Domain Names",activejob:"ActiveJob & Background Processing",observability:"Logging & Observability",database:"Database & VPCs",webservers:"Web Proxy Integrations"};function i(e){let{id:t,name:a,anchor:i}=e;const r=a||o[t]||t.replace(/(_|-)/g," ").split(" ").map((e=>e.charAt(0).toUpperCase()+e.toLowerCase().slice(1))).join(" ");return i?n.createElement("a",{href:`/docs/${t}#${i}`},r):n.createElement("a",{href:`/docs/${t}`},r)}},3442:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>p,contentTitle:()=>c,default:()=>h,frontMatter:()=>l,metadata:()=>d,toc:()=>u});var n=a(7462),o=(a(7294),a(3905)),i=a(304),r=a(941),s=a(4996);const l={id:"cold-starts",toc_max_heading_level:2},c="Cold Starts",d={unversionedId:"cold-starts",id:"cold-starts",title:"Cold Starts",description:"Cold starts (or init times) are an incredibly addictive topic. In many cases they can be ignored as an optimization to perform when the time and data suggests action. In practice, the more traffic your function handles the less likely cold starts are an issue since they statistically disappear under the 99th percentile. However in rare cases, you may want to optimize for them. This guide can help you make decisions on how to go about it. It also descibes how AWS may be doing this for you already with Proactive Initialization.",source:"@site/docs/cold-starts.mdx",sourceDirName:".",slug:"/cold-starts",permalink:"/docs/cold-starts",draft:!1,editUrl:"https://github.com/rails-lambda/lamby-site/tree/master/docs/cold-starts.mdx",tags:[],version:"current",frontMatter:{id:"cold-starts",toc_max_heading_level:2},sidebar:"docsSidebar",previous:{title:"Web Proxy Integrations",permalink:"/docs/webservers"}},p={},u=[{value:"Monitoring with CloudWatch",id:"monitoring-with-cloudwatch",level:2},{value:"Proactive Initialization",id:"proactive-initialization",level:2},{value:"Bootsnap by Shopify",id:"bootsnap-by-shopify",level:2},{value:"Other Cold Start Factors",id:"other-cold-start-factors",level:2},{value:"Provisioned Concurrency",id:"provisioned-concurrency",level:2},{value:"Requirements",id:"requirements",level:3},{value:"Auto Scaling",id:"auto-scaling",level:3},{value:"Using a Schedule",id:"using-a-schedule",level:3},{value:"Concurrency CloudWatch Metrics",id:"concurrency-cloudwatch-metrics",level:3},{value:"Gradual Deployments",id:"gradual-deployments",level:2}],m={toc:u};function h(e){let{components:t,...a}=e;return(0,o.kt)("wrapper",(0,n.Z)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,o.kt)("h1",{id:"cold-starts"},"Cold Starts"),(0,o.kt)("p",null,"Cold starts (or init times) are an ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/lambda/latest/dg/runtimes-context.html#runtimes-lifecycle"},"incredibly addictive")," topic. In many cases they can be ignored as an optimization to perform when the time and data suggests action. In practice, the more traffic your function handles the less likely cold starts are an issue since they statistically disappear under the ",(0,o.kt)("a",{parentName:"p",href:"https://aws.amazon.com/blogs/aws/amazon-cloudwatch-update-percentile-statistics-and-new-dashboard-widgets/"},"99th percentile"),". However in rare cases, you may want to optimize for them. This guide can help you make decisions on how to go about it. It also descibes how AWS may be doing this for you already with ",(0,o.kt)("a",{parentName:"p",href:"#proactive-initialization"},"Proactive Initialization"),"."),(0,o.kt)("admonition",{type:"info"},(0,o.kt)("p",{parentName:"admonition"},"Modest sized Rails applications generally boot within 3 to 5 seconds. This happens exactly once for the duration of the function's lifecycle which could last for 30 minutes or more and service a huge amount of traffic with no latency.")),(0,o.kt)("h2",{id:"monitoring-with-cloudwatch"},"Monitoring with CloudWatch"),(0,o.kt)("p",null,"You can not optimize what you do not measure. Thankfully, AWS Lambda logs initialization time of your function to CloudWatch logs which you can query using ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html"},"CloudWatch Insights"),"."),(0,o.kt)("p",null,"This query below will give you a nice percentile breakdown for your application's init duration which is the code outside the handler method. Feel free to change the bin bucket from 1 hour to whatever time helps you. For example, using ",(0,o.kt)("inlineCode",{parentName:"p"},"1d")," (1 day) over a longer duration (weeks) allows you to see statistical trends. In general, your ",(0,o.kt)("inlineCode",{parentName:"p"},"p50")," should be under 5 seconds."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-coffee"},"fields @initDuration\n| filter ispresent(@initDuration)\n| stats pct(@initDuration, 5) as p5,\n pct(@initDuration, 50) as p50,\n pct(@initDuration, 95) as p95,\n pct(@initDuration, 99) as p99\n by bin(1h)\n")),(0,o.kt)(r.Z,{alt:"Rails cold start data from CloudWatch Insights. Shows percentiles for p5, p50, p95, and p99.",sources:{light:(0,s.Z)("/img/docs/cold-start-cloudwatch-insights-percentiles.png"),dark:(0,s.Z)("/img/docs/cold-start-cloudwatch-insights-percentiles-dark.png")},mdxType:"ThemedImage"}),(0,o.kt)("admonition",{type:"info"},(0,o.kt)("p",{parentName:"admonition"},"See the ",(0,o.kt)("a",{parentName:"p",href:"#proactive-initialization"},"Proactive Initialization")," section for more details on how to use Lamby's new CloudWatch Metrics to measure both cold starts and proactive initialization.")),(0,o.kt)("h2",{id:"proactive-initialization"},"Proactive Initialization"),(0,o.kt)("p",null,"As described in ",(0,o.kt)("a",{parentName:"p",href:"https://twitter.com/astuyve"},"AJ Stuyvenberg's")," post on the topic ",(0,o.kt)("a",{parentName:"p",href:"https://aaronstuyvenberg.com/posts/understanding-proactive-initialization"},"Understanding AWS Lambda Proactive Initialization"),", AWS Lambda may have solved some of your cold start issues for you since March 2023. Stated in an excerpt ",(0,o.kt)("a",{parentName:"p",href:"https://aaronstuyvenberg.com/posts/understanding-proactive-initialization"},"from AWS' docs"),":"),(0,o.kt)("blockquote",null,(0,o.kt)("p",{parentName:"blockquote"},"For functions using unreserved (on-demand) concurrency, Lambda occasionally pre-initializes execution environments to reduce the number of cold start invocations. For example, Lambda might initialize a new execution environment to replace an execution environment that is about to be shut down. If a pre-initialized execution environment becomes available while Lambda is initializing a new execution environment to process an invocation, Lambda can use the pre-initialized execution environment.")),(0,o.kt)("p",null,"This means the ",(0,o.kt)("a",{parentName:"p",href:"#monitoring-with-cloudwatch"},"Monitoring with CloudWatch")," is just half the picture. But how much is your application potentially benefiting from proactive inits? Since ",(0,o.kt)("a",{parentName:"p",href:"https://github.com/rails-lambda/lamby/pull/169"},"Lamby v5.1.0"),", you can now find out easily using CloudWatch Metrics. To turn metrics on, enable the config like so:"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-rails",metastring:'title="config/environments/production.rb"',title:'"config/environments/production.rb"'},"config.lamby.cold_start_metrics = true\n")),(0,o.kt)("p",null,"Lamby will now publish ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Embedded_Metric_Format.html"},"CloudWatch Embedded Metrics")," in the ",(0,o.kt)("inlineCode",{parentName:"p"},"Lamby")," namespace with a custom dimension for each application's name. Captured metrics include counts for Cold Starts vs. Proactive Initializations. Here is an example running sum of 3 days of data for a large Rails application in the ",(0,o.kt)("inlineCode",{parentName:"p"},"us-east-1")," region."),(0,o.kt)(r.Z,{alt:"Rails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill Invokes",sources:{light:(0,s.Z)("/img/docs/lamby-cloud-watch-metrics-cold-start-v-proactive-init-light.png"),dark:(0,s.Z)("/img/docs/lamby-cloud-watch-metrics-cold-start-v-proactive-init-dark.png")},mdxType:"ThemedImage"}),(0,o.kt)("p",null,"This data shows the vast majority of your initialized Lambda Containers are proactively initialized. Hence, no cold starts are felt by end users or consumers of your function. If you need to customize the name of your Rails application in the CloudWatch Metrics dimension, you can do so using this config."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-rails",metastring:'title="config/environments/production.rb"',title:'"config/environments/production.rb"'},"config.lamby.metrics_app_name = 'MyServiceName'\n")),(0,o.kt)("h2",{id:"bootsnap-by-shopify"},"Bootsnap by Shopify"),(0,o.kt)("p",null,"Reducing your Rails applications boot time should be your first optimization option against true cold starts. ",(0,o.kt)("a",{parentName:"p",href:"https://github.com/Shopify/bootsnap"},"Bootsnap")," has been developed by Shopify to speed up Rails boot time for production environments using a mix of compile and load path caches. When complete, your deployed container will have everything it needs to boot faster!"),(0,o.kt)("p",null,"How much faster? Generally 1 to 3 seconds depending on your Lambda application. Adding Bootsnap to your Rails Lambda application is straightforward. First, add the gem to your production group in your ",(0,o.kt)("inlineCode",{parentName:"p"},"Gemfile"),"."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-ruby",metastring:'title="Gemfile"',title:'"Gemfile"'},"group :production do\n gem 'bootsnap'\nend\n")),(0,o.kt)("p",null,"Next, we need to add the Bootsnap caches with your deployed container. Add these lines to your project's ",(0,o.kt)("inlineCode",{parentName:"p"},"Dockerfile")," after your ",(0,o.kt)("inlineCode",{parentName:"p"},"COPY . .")," declaration. It will run two commands. The first is the standard Bootsnap precompile which builds both the Ruby ISeq & YAML caches. The second line loads your application into memory and thus automatically creates the ",(0,o.kt)("inlineCode",{parentName:"p"},"$LOAD_PATH")," cache."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-dockerfile",metastring:'title="Dockerfile"',title:'"Dockerfile"'},"ENV BOOTSNAP_CACHE_DIR=/var/task/tmp/cache\nRUN bundle exec bootsnap precompile --gemfile . \\\n && bundle exec ruby config/environment.rb\n")),(0,o.kt)("p",null,"Afterward you should be able to verify that Bootsnap's caches are working. Measure your cold starts using a 1 day stats duration for better long term visibility."),(0,o.kt)("h2",{id:"other-cold-start-factors"},"Other Cold Start Factors"),(0,o.kt)("p",null,"Most of these should be considered before using ",(0,o.kt)("a",{parentName:"p",href:"#provisioned-concurrency"},"Provisioned Concurrency"),". Also note, that ",(0,o.kt)("a",{parentName:"p",href:"#proactive-initialization"},"Proactive Initialization")," may be masking some of these optimizations for you already. That said, consider the following:"),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"Client Connect Timeouts")," - Your Lambda application may be used by clients who have a low ",(0,o.kt)("a",{parentName:"p",href:"https://ruby-doc.org/stdlib/libdoc/net/http/rdoc/Net/HTTP.html#open_timeout-attribute-method"},"http open timeout"),". If this is the case, you may have to increase client timeouts, leverage provisioned concurrency, and/or reduce initialization time."),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"Update Ruby")," - New versions of Ruby typically boot and run faster. Since our ",(0,o.kt)(i.Z,{id:"quick-start",name:"cookiecutter",mdxType:"DocLink"})," project uses custom Ruby Ubuntu with Lambda containers, updating Ruby should be as easy as changing a few lines of code."),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"Memory & vCPU")," - It has been proposed that increased Memory/vCPU could reduce cold starts. We have not seen any evidence of this. For example, we recommend that Rails functions use ",(0,o.kt)("inlineCode",{parentName:"p"},"1792")," for its ",(0,o.kt)("inlineCode",{parentName:"p"},"MemorySize")," equal to 1 vCPU. Any lower would sacrifice response times. Tests showed that increasing this to ",(0,o.kt)("inlineCode",{parentName:"p"},"3008")," equal to 2 vCPUs did nothing for a basic Rails application but cost more. However, if your function does concurrent work doing initialization, consider testing different values here."),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"Lazy DB/Resource Connections"),' - Rails is really good at lazy loading database connections. This is important to keep the "Init" phase of the ',(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/lambda/latest/dg/runtimes-context.html#runtimes-lifecycle"},"Lambda execution lifecycle"),' quick and under 10s. This allows the first "Invoke" to connect to other resources. To keep init duration low, make sure your application does not eagerly connect to resources. Both ActiveRecord and Memcached w/Dalli are lazy loaded by default.'),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"ActiveRecord Schema Cache")," - Commonly called Rails' best kept performance feature, the ",(0,o.kt)("a",{parentName:"p",href:"https://kirshatrov.com/2016/12/13/schema-cache/"},"schema cache")," can help reduce first request response time after Rails is initialized. So it should not help the init time but it could very easily help the first invoke times."),(0,o.kt)("p",null,(0,o.kt)("strong",{parentName:"p"},"Reduce Image Size")," - Sort of related to your Ruby version, always make sure that your ECR image is as small as possible. Lambda Containers supports up to 10GB for your image. There is no data on how much this could effect cold starts. So please ",(0,o.kt)("a",{parentName:"p",href:"https://github.com/rails-lambda/lamby/discussions"},"share your stories"),"."),(0,o.kt)("h2",{id:"provisioned-concurrency"},"Provisioned Concurrency"),(0,o.kt)("admonition",{type:"caution"},(0,o.kt)("p",{parentName:"admonition"},"Provisioned concurrency comes with additional execution costs. Now that we have ",(0,o.kt)("a",{parentName:"p",href:"#proactive-initialization"},"Proactive Initialization")," it may never be needed.")),(0,o.kt)("p",null,"AWS provides an option called ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/lambda/latest/dg/configuration-concurrency.html"},"Provisioned Concurrency")," (PC) which allows you to warm instances prior to receiving requests. This lets you execute Lambda functions with super low latency and no cold starts. Besides setting a static PC value, there are two fundamental methods for scaling with Provisioned Concurrency. Please use the ",(0,o.kt)("a",{parentName:"p",href:"#concurrency-cloudwatch-metrics"},"Concurrency CloudWatch Metrics")," section to help you make a determination on what method is right for you."),(0,o.kt)("h3",{id:"requirements"},"Requirements"),(0,o.kt)("p",null,"Our ",(0,o.kt)(i.Z,{id:"quick-start",mdxType:"DocLink"})," cookiecutter includes both an ",(0,o.kt)("inlineCode",{parentName:"p"},"AutoPublishAlias")," and an all at once ",(0,o.kt)("inlineCode",{parentName:"p"},"DeploymentPreference"),'. The publish alias is needed for provisioned concurrency. You can read about both in AWS "',(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/automating-updates-to-serverless-apps.html"},"Deploying serverless applications gradually"),"\" guide. The code snippets below assume your function's logical resource is ",(0,o.kt)("inlineCode",{parentName:"p"},"RailsLambda")," and you have an alias named ",(0,o.kt)("inlineCode",{parentName:"p"},"live"),"."),(0,o.kt)("h3",{id:"auto-scaling"},"Auto Scaling"),(0,o.kt)("p",null,"Here we are creating an ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-as-policy.html"},(0,o.kt)("inlineCode",{parentName:"a"},"AWS::AutoScaling::ScalingPolicy"))," and a ",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-applicationautoscaling-scalabletarget.html"},(0,o.kt)("inlineCode",{parentName:"a"},"AWS::ApplicationAutoScaling::ScalableTarget"))," which effectively creates a managed CloudWatch Rule that monitors your application to scale it up and down as needed. In this example we set a maximum of ",(0,o.kt)("inlineCode",{parentName:"p"},"40")," and minimal of ",(0,o.kt)("inlineCode",{parentName:"p"},"5")," provisioned instances. We have a ",(0,o.kt)("inlineCode",{parentName:"p"},"TargetValue")," of ",(0,o.kt)("inlineCode",{parentName:"p"},"0.4")," which is a percentage of provisioned concurrency to trigger the CloudWatch Rules via the ",(0,o.kt)("inlineCode",{parentName:"p"},"ProvisionedConcurrencyUtilization")," metric. In this case, lower equals a more aggressive scaling strategy."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-yaml",metastring:'title="template.yaml"',title:'"template.yaml"'},"Resources:\n RailsLambda:\n # ...\n Properties:\n ProvisionedConcurrencyConfig:\n ProvisionedConcurrentExecutions: 5\n\n RailsScalableTarget:\n Type: AWS::ApplicationAutoScaling::ScalableTarget\n Properties:\n MaxCapacity: 40\n MinCapacity: 5\n ResourceId: !Sub function:${RailsLambda}:live\n RoleARN: !Sub arn:aws:iam::${AWS::AccountId}:role/aws-service-role/lambda.application-autoscaling.amazonaws.com/AWSServiceRoleForApplicationAutoScaling_LambdaConcurrency\n ScalableDimension: lambda:function:ProvisionedConcurrency\n ServiceNamespace: lambda\n DependsOn: RailsLambdaAliaslive\n\n RailsScalingPolicy:\n Type: AWS::ApplicationAutoScaling::ScalingPolicy\n Properties:\n PolicyName: utilization\n PolicyType: TargetTrackingScaling\n ScalingTargetId: !Ref RailsScalableTarget\n TargetTrackingScalingPolicyConfiguration:\n TargetValue: 0.4\n PredefinedMetricSpecification:\n PredefinedMetricType: LambdaProvisionedConcurrencyUtilization\n")),(0,o.kt)("p",null,"Please read this related article. ",(0,o.kt)("a",{parentName:"p",href:"https://georgemao.medium.com/understanding-lambda-provisioned-concurrency-autoscaling-735eb14040cf"},"Lambda Provisioned Concurrency AutoScaling is Awesome. Make sure you understand how it works!")," It goes into great detail on how short traffic bursts (common for most of us) can be missed by the standard CloudWatch Alarms and possible remediation to scale up."),(0,o.kt)("h3",{id:"using-a-schedule"},"Using a Schedule"),(0,o.kt)("p",null,"In this example we have measured via CloudWatch Metrics (image above) that our concurrent executions never really goes past ",(0,o.kt)("inlineCode",{parentName:"p"},"40")," instances during daytime peak usage. In this case to totally remove cold starts from a small percentage of requests we can draw a big virtual box around the curves above to always keep ",(0,o.kt)("inlineCode",{parentName:"p"},"40")," instances warm during our peak times starting at 6am EST and going back down to ",(0,o.kt)("inlineCode",{parentName:"p"},"0")," Provisioned Concurrency at 11PM EST. Here is how we would do that with a Provisioned Concurrency schedule."),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-yaml",metastring:'title="template.yaml"',title:'"template.yaml"'},'Resources:\n RailsScalableTarget:\n Type: AWS::ApplicationAutoScaling::ScalableTarget\n Properties:\n MaxCapacity: 0\n MinCapacity: 0\n ResourceId: !Sub function:${RailsLambda}:live\n RoleARN: !Sub arn:aws:iam::${AWS::AccountId}:role/aws-service-role/lambdaapplication-autoscaling. amazonaws.com/AWSServiceRoleForApplicationAutoScaling_LambdaConcurrency\n ScalableDimension: lambda:function:ProvisionedConcurrency\n ServiceNamespace: lambda\n ScheduledActions:\n - ScalableTargetAction:\n MaxCapacity: 0\n MinCapacity: 0\n ScheduledActionName: ScaleDown\n Schedule: "cron(0 3 * * ? *)"\n - ScalableTargetAction:\n MaxCapacity: 40\n MinCapacity: 40\n ScheduledActionName: ScaleUp\n Schedule: "cron(0 10 * * ? *)"\n DependsOn: RailsLambdaAliaslive\n')),(0,o.kt)("h3",{id:"concurrency-cloudwatch-metrics"},"Concurrency CloudWatch Metrics"),(0,o.kt)("p",null,"The graphs below were made using the following managed AWS Lambda CloudWatch Metrics. Please make sure to use your deploy alias of ",(0,o.kt)("inlineCode",{parentName:"p"},":live")," when targeting your functions resource in these reports."),(0,o.kt)("ul",null,(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("inlineCode",{parentName:"li"},"ConcurrentExecutions")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("inlineCode",{parentName:"li"},"ProvisionedConcurrentExecutions")),(0,o.kt)("li",{parentName:"ul"},(0,o.kt)("inlineCode",{parentName:"li"},"ProvisionedConcurrencySpilloverInvocations"))),(0,o.kt)("p",null,"This chart shows that a static ",(0,o.kt)("inlineCode",{parentName:"p"},"ProvisionedConcurrentExecutions")," of ",(0,o.kt)("inlineCode",{parentName:"p"},"5")," can handle most invocations for the first 3 days. Later, for the remaining 4 days, auto scaling was added with a ",(0,o.kt)("inlineCode",{parentName:"p"},"TargetValue")," of ",(0,o.kt)("inlineCode",{parentName:"p"},"0.4"),". Because of the workload's spiky nature, the Invocations look almost 100% provisioned. However, the concurrent executions show otherwise."),(0,o.kt)(r.Z,{alt:"Rails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill Invokes",sources:{light:(0,s.Z)("/img/docs/cold-start-concurrency.png"),dark:(0,s.Z)("/img/docs/cold-start-concurrency-dark.png")},mdxType:"ThemedImage"}),(0,o.kt)("p",null,"Here is a 7 day view from the 4 day mark above. The ",(0,o.kt)("inlineCode",{parentName:"p"},"TargetValue")," is still set to ",(0,o.kt)("inlineCode",{parentName:"p"},"0.4"),". It illustrates how the default CloudWatch Rule for ",(0,o.kt)("inlineCode",{parentName:"p"},"ProvisionedConcurrencyUtilization")," metrics over a 3 minute span are not quick enough to scale PC. It is possible to use a ",(0,o.kt)("inlineCode",{parentName:"p"},"TargetValue")," of ",(0,o.kt)("inlineCode",{parentName:"p"},"0.1")," to force the PC lines to meet the blue. But your cost at this point would be unrealistically high."),(0,o.kt)(r.Z,{alt:"Rails in Lambda Concurrent Executions and Provisioned Executions",sources:{light:(0,s.Z)("/img/docs/cold-start-concurrency-vs-spilled.png"),dark:(0,s.Z)("/img/docs/cold-start-concurrency-vs-spilled-dark.png")},mdxType:"ThemedImage"}),(0,o.kt)("h2",{id:"gradual-deployments"},"Gradual Deployments"),(0,o.kt)("p",null,"As mentioned in the ",(0,o.kt)("a",{parentName:"p",href:"#provisioned-concurrency"},"Provisioned Concurrency")," section we use a simple ",(0,o.kt)("inlineCode",{parentName:"p"},"DeploymentPreference")," value called ",(0,o.kt)("inlineCode",{parentName:"p"},"AllAtOnce"),". When a deploy happens, Lambda will need to download your new ECR image before your application is initialized. In certain high traffic scenarios along with a potentially slow loading application, deploys can be a thundering herd effect causing your concurrency to spike and a small percentage of users having longer response times."),(0,o.kt)("p",null,"Please see AWS' \"",(0,o.kt)("a",{parentName:"p",href:"https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/automating-updates-to-serverless-apps.html"},"Deploying serverless applications gradually"),'" guide for full details. However, one way to soften this would be to roll out your new code in 10 minutes total via the ',(0,o.kt)("inlineCode",{parentName:"p"},"Linear10PercentEvery1Minute")," deployment preference. This will automatically create a ",(0,o.kt)("a",{parentName:"p",href:"https://aws.amazon.com/codedeploy/"},"AWS CodeDeploy")," application and deployments for you. So cool!"))}h.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.58179dd5.js b/assets/js/runtime~main.bf6fb0cb.js similarity index 99% rename from assets/js/runtime~main.58179dd5.js rename to assets/js/runtime~main.bf6fb0cb.js index 4e5917d..ad90fb7 100644 --- a/assets/js/runtime~main.58179dd5.js +++ b/assets/js/runtime~main.bf6fb0cb.js @@ -1 +1 @@ -(()=>{"use strict";var e,c,a,f,d,t={},r={};function b(e){var c=r[e];if(void 0!==c)return c.exports;var a=r[e]={id:e,loaded:!1,exports:{}};return t[e].call(a.exports,a,a.exports,b),a.loaded=!0,a.exports}b.m=t,e=[],b.O=(c,a,f,d)=>{if(!a){var t=1/0;for(i=0;i=d)&&Object.keys(b.O).every((e=>b.O[e](a[o])))?a.splice(o--,1):(r=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[a,f,d]},b.n=e=>{var c=e&&e.__esModule?()=>e.default:()=>e;return b.d(c,{a:c}),c},a=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);b.r(d);var t={};c=c||[null,a({}),a([]),a(a)];for(var r=2&f&&e;"object"==typeof r&&!~c.indexOf(r);r=a(r))Object.getOwnPropertyNames(r).forEach((c=>t[c]=()=>e[c]));return t.default=()=>e,b.d(d,t),d},b.d=(e,c)=>{for(var a in c)b.o(c,a)&&!b.o(e,a)&&Object.defineProperty(e,a,{enumerable:!0,get:c[a]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((c,a)=>(b.f[a](e,c),c)),[])),b.u=e=>"assets/js/"+({53:"935f2afb",333:"61017d4a",533:"b2b675dd",924:"8a35f747",1184:"70de0f61",1477:"b2f554cd",1555:"1251e1ad",1713:"a7023ddc",1855:"80b4b642",1909:"2df21221",2138:"26c6198c",2386:"c8834535",2535:"814f3328",2636:"46561613",2747:"b15f565e",2907:"54f35b6e",3085:"1f391b9e",3089:"a6aa9e1f",3160:"c4bce498",3278:"85255e3d",3541:"af7d3287",3608:"9e4087bc",3664:"fd8ac5c7",3699:"c179f821",3778:"9f4696d1",3780:"24f32f24",3905:"06290c0c",4013:"01a85c17",4080:"070c1425",4118:"2f8c493c",4195:"c4f5d8e4",4233:"7395e211",4620:"a5083a01",4906:"10c925be",4926:"56266e26",5029:"b9cb7ccd",5220:"f9923828",5660:"f14f229f",5756:"35d36514",5862:"25b0c2f2",6103:"ccc49370",6113:"5b3c4957",6517:"7aad579e",6661:"a61b1262",7052:"dec55e99",7155:"77629f2c",7414:"393be207",7449:"0e4ec2e4",7776:"f0cb9948",7918:"17896441",7944:"da31232e",7984:"da51dd87",8170:"485ddb37",8275:"eb00ae4c",8376:"5a0d6fcc",8522:"bfe8b5d0",8538:"ae830a23",8610:"6875c492",8984:"ea25a349",9086:"b804622e",9091:"06d8b529",9374:"bcf90576",9416:"f6cffe6d",9492:"92496324",9514:"1be78505",9529:"c5f7c8e4"}[e]||e)+"."+{53:"c319dcbd",333:"cd3d7fcb",533:"6b50f661",924:"ded5fabd",1184:"c6c2f655",1477:"3ad27fe0",1555:"f81f1d0b",1713:"d9401ca4",1855:"e0af5b19",1909:"a430df54",2138:"fc83801b",2386:"fe68bfd2",2529:"e5b5435e",2535:"400db2f8",2636:"08c5addc",2747:"c7e0ba18",2907:"bdd6b09e",3085:"388259d8",3089:"5b7e45a3",3160:"aebfe9a6",3278:"b55ae0f8",3541:"15dc54e5",3608:"3e2d0116",3664:"b4d647be",3699:"4e9f1b9a",3778:"75c1a097",3780:"1d807e76",3905:"7ad56b4b",4013:"1d3a5dca",4080:"50c04994",4118:"5bbbc9b9",4195:"cd476cbb",4233:"4843b6d5",4620:"ae775bf6",4906:"b524ea52",4926:"9882decd",4972:"5210ce7e",5029:"15dcf9b5",5220:"b845b59a",5660:"f56f1e83",5756:"7bcb5202",5862:"f04dc244",6103:"e3e86339",6113:"74416c99",6517:"a3742444",6661:"40acfd06",7052:"fcaff9e9",7155:"f6c29961",7414:"9156a83f",7449:"009c239d",7770:"9af49b5d",7776:"340c572a",7918:"0450fa0f",7944:"b7b989a5",7984:"895eb8d8",8170:"b0f93634",8275:"a093c7e0",8376:"aac9be19",8522:"cf1e6028",8538:"ce917605",8610:"9f6dce22",8984:"90660894",9086:"b3f3f5a7",9091:"4484fe12",9374:"1322ff86",9416:"a688b378",9492:"20d8f650",9514:"ee4fa683",9529:"e417be20"}[e]+".js",b.miniCssF=e=>{},b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,c)=>Object.prototype.hasOwnProperty.call(e,c),f={},d="lamby:",b.l=(e,c,a,t)=>{if(f[e])f[e].push(c);else{var r,o;if(void 0!==a)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var d=f[e];if(delete f[e],r.parentNode&&r.parentNode.removeChild(r),d&&d.forEach((e=>e(a))),c)return c(a)},s=setTimeout(u.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=u.bind(null,r.onerror),r.onload=u.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.nmd=e=>(e.paths=[],e.children||(e.children=[]),e),b.p="/",b.gca=function(e){return e={17896441:"7918",46561613:"2636",92496324:"9492","935f2afb":"53","61017d4a":"333",b2b675dd:"533","8a35f747":"924","70de0f61":"1184",b2f554cd:"1477","1251e1ad":"1555",a7023ddc:"1713","80b4b642":"1855","2df21221":"1909","26c6198c":"2138",c8834535:"2386","814f3328":"2535",b15f565e:"2747","54f35b6e":"2907","1f391b9e":"3085",a6aa9e1f:"3089",c4bce498:"3160","85255e3d":"3278",af7d3287:"3541","9e4087bc":"3608",fd8ac5c7:"3664",c179f821:"3699","9f4696d1":"3778","24f32f24":"3780","06290c0c":"3905","01a85c17":"4013","070c1425":"4080","2f8c493c":"4118",c4f5d8e4:"4195","7395e211":"4233",a5083a01:"4620","10c925be":"4906","56266e26":"4926",b9cb7ccd:"5029",f9923828:"5220",f14f229f:"5660","35d36514":"5756","25b0c2f2":"5862",ccc49370:"6103","5b3c4957":"6113","7aad579e":"6517",a61b1262:"6661",dec55e99:"7052","77629f2c":"7155","393be207":"7414","0e4ec2e4":"7449",f0cb9948:"7776",da31232e:"7944",da51dd87:"7984","485ddb37":"8170",eb00ae4c:"8275","5a0d6fcc":"8376",bfe8b5d0:"8522",ae830a23:"8538","6875c492":"8610",ea25a349:"8984",b804622e:"9086","06d8b529":"9091",bcf90576:"9374",f6cffe6d:"9416","1be78505":"9514",c5f7c8e4:"9529"}[e]||e,b.p+b.u(e)},(()=>{var e={1303:0,532:0};b.f.j=(c,a)=>{var f=b.o(e,c)?e[c]:void 0;if(0!==f)if(f)a.push(f[2]);else if(/^(1303|532)$/.test(c))e[c]=0;else{var d=new Promise(((a,d)=>f=e[c]=[a,d]));a.push(f[2]=d);var t=b.p+b.u(c),r=new Error;b.l(t,(a=>{if(b.o(e,c)&&(0!==(f=e[c])&&(e[c]=void 0),f)){var d=a&&("load"===a.type?"missing":a.type),t=a&&a.target&&a.target.src;r.message="Loading chunk "+c+" failed.\n("+d+": "+t+")",r.name="ChunkLoadError",r.type=d,r.request=t,f[1](r)}}),"chunk-"+c,c)}},b.O.j=c=>0===e[c];var c=(c,a)=>{var f,d,t=a[0],r=a[1],o=a[2],n=0;if(t.some((c=>0!==e[c]))){for(f in r)b.o(r,f)&&(b.m[f]=r[f]);if(o)var i=o(b)}for(c&&c(a);n{"use strict";var e,c,a,f,d,t={},r={};function b(e){var c=r[e];if(void 0!==c)return c.exports;var a=r[e]={id:e,loaded:!1,exports:{}};return t[e].call(a.exports,a,a.exports,b),a.loaded=!0,a.exports}b.m=t,e=[],b.O=(c,a,f,d)=>{if(!a){var t=1/0;for(i=0;i=d)&&Object.keys(b.O).every((e=>b.O[e](a[o])))?a.splice(o--,1):(r=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[a,f,d]},b.n=e=>{var c=e&&e.__esModule?()=>e.default:()=>e;return b.d(c,{a:c}),c},a=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);b.r(d);var t={};c=c||[null,a({}),a([]),a(a)];for(var r=2&f&&e;"object"==typeof r&&!~c.indexOf(r);r=a(r))Object.getOwnPropertyNames(r).forEach((c=>t[c]=()=>e[c]));return t.default=()=>e,b.d(d,t),d},b.d=(e,c)=>{for(var a in c)b.o(c,a)&&!b.o(e,a)&&Object.defineProperty(e,a,{enumerable:!0,get:c[a]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((c,a)=>(b.f[a](e,c),c)),[])),b.u=e=>"assets/js/"+({53:"935f2afb",333:"61017d4a",533:"b2b675dd",924:"8a35f747",1184:"70de0f61",1477:"b2f554cd",1555:"1251e1ad",1713:"a7023ddc",1855:"80b4b642",1909:"2df21221",2138:"26c6198c",2386:"c8834535",2535:"814f3328",2636:"46561613",2747:"b15f565e",2907:"54f35b6e",3085:"1f391b9e",3089:"a6aa9e1f",3160:"c4bce498",3278:"85255e3d",3541:"af7d3287",3608:"9e4087bc",3664:"fd8ac5c7",3699:"c179f821",3778:"9f4696d1",3780:"24f32f24",3905:"06290c0c",4013:"01a85c17",4080:"070c1425",4118:"2f8c493c",4195:"c4f5d8e4",4233:"7395e211",4620:"a5083a01",4906:"10c925be",4926:"56266e26",5029:"b9cb7ccd",5220:"f9923828",5660:"f14f229f",5756:"35d36514",5862:"25b0c2f2",6103:"ccc49370",6113:"5b3c4957",6517:"7aad579e",6661:"a61b1262",7052:"dec55e99",7155:"77629f2c",7414:"393be207",7449:"0e4ec2e4",7776:"f0cb9948",7918:"17896441",7944:"da31232e",7984:"da51dd87",8170:"485ddb37",8275:"eb00ae4c",8376:"5a0d6fcc",8522:"bfe8b5d0",8538:"ae830a23",8610:"6875c492",8984:"ea25a349",9086:"b804622e",9091:"06d8b529",9374:"bcf90576",9416:"f6cffe6d",9492:"92496324",9514:"1be78505",9529:"c5f7c8e4"}[e]||e)+"."+{53:"c319dcbd",333:"cd3d7fcb",533:"6b50f661",924:"ded5fabd",1184:"c6c2f655",1477:"3ad27fe0",1555:"f81f1d0b",1713:"d9401ca4",1855:"e0af5b19",1909:"a430df54",2138:"fc83801b",2386:"fe68bfd2",2529:"e5b5435e",2535:"400db2f8",2636:"08c5addc",2747:"c7e0ba18",2907:"bdd6b09e",3085:"388259d8",3089:"5b7e45a3",3160:"aebfe9a6",3278:"c5fee749",3541:"15dc54e5",3608:"3e2d0116",3664:"b4d647be",3699:"4e9f1b9a",3778:"75c1a097",3780:"1d807e76",3905:"7ad56b4b",4013:"1d3a5dca",4080:"50c04994",4118:"5bbbc9b9",4195:"cd476cbb",4233:"4843b6d5",4620:"ae775bf6",4906:"b524ea52",4926:"9882decd",4972:"5210ce7e",5029:"15dcf9b5",5220:"b845b59a",5660:"f56f1e83",5756:"7bcb5202",5862:"f04dc244",6103:"e3e86339",6113:"74416c99",6517:"a3742444",6661:"40acfd06",7052:"fcaff9e9",7155:"f6c29961",7414:"9156a83f",7449:"009c239d",7770:"9af49b5d",7776:"340c572a",7918:"0450fa0f",7944:"b7b989a5",7984:"895eb8d8",8170:"b0f93634",8275:"a093c7e0",8376:"aac9be19",8522:"cf1e6028",8538:"ce917605",8610:"9f6dce22",8984:"90660894",9086:"b3f3f5a7",9091:"4484fe12",9374:"1322ff86",9416:"a688b378",9492:"20d8f650",9514:"ee4fa683",9529:"e417be20"}[e]+".js",b.miniCssF=e=>{},b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,c)=>Object.prototype.hasOwnProperty.call(e,c),f={},d="lamby:",b.l=(e,c,a,t)=>{if(f[e])f[e].push(c);else{var r,o;if(void 0!==a)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var d=f[e];if(delete f[e],r.parentNode&&r.parentNode.removeChild(r),d&&d.forEach((e=>e(a))),c)return c(a)},s=setTimeout(u.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=u.bind(null,r.onerror),r.onload=u.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.nmd=e=>(e.paths=[],e.children||(e.children=[]),e),b.p="/",b.gca=function(e){return e={17896441:"7918",46561613:"2636",92496324:"9492","935f2afb":"53","61017d4a":"333",b2b675dd:"533","8a35f747":"924","70de0f61":"1184",b2f554cd:"1477","1251e1ad":"1555",a7023ddc:"1713","80b4b642":"1855","2df21221":"1909","26c6198c":"2138",c8834535:"2386","814f3328":"2535",b15f565e:"2747","54f35b6e":"2907","1f391b9e":"3085",a6aa9e1f:"3089",c4bce498:"3160","85255e3d":"3278",af7d3287:"3541","9e4087bc":"3608",fd8ac5c7:"3664",c179f821:"3699","9f4696d1":"3778","24f32f24":"3780","06290c0c":"3905","01a85c17":"4013","070c1425":"4080","2f8c493c":"4118",c4f5d8e4:"4195","7395e211":"4233",a5083a01:"4620","10c925be":"4906","56266e26":"4926",b9cb7ccd:"5029",f9923828:"5220",f14f229f:"5660","35d36514":"5756","25b0c2f2":"5862",ccc49370:"6103","5b3c4957":"6113","7aad579e":"6517",a61b1262:"6661",dec55e99:"7052","77629f2c":"7155","393be207":"7414","0e4ec2e4":"7449",f0cb9948:"7776",da31232e:"7944",da51dd87:"7984","485ddb37":"8170",eb00ae4c:"8275","5a0d6fcc":"8376",bfe8b5d0:"8522",ae830a23:"8538","6875c492":"8610",ea25a349:"8984",b804622e:"9086","06d8b529":"9091",bcf90576:"9374",f6cffe6d:"9416","1be78505":"9514",c5f7c8e4:"9529"}[e]||e,b.p+b.u(e)},(()=>{var e={1303:0,532:0};b.f.j=(c,a)=>{var f=b.o(e,c)?e[c]:void 0;if(0!==f)if(f)a.push(f[2]);else if(/^(1303|532)$/.test(c))e[c]=0;else{var d=new Promise(((a,d)=>f=e[c]=[a,d]));a.push(f[2]=d);var t=b.p+b.u(c),r=new Error;b.l(t,(a=>{if(b.o(e,c)&&(0!==(f=e[c])&&(e[c]=void 0),f)){var d=a&&("load"===a.type?"missing":a.type),t=a&&a.target&&a.target.src;r.message="Loading chunk "+c+" failed.\n("+d+": "+t+")",r.name="ChunkLoadError",r.type=d,r.request=t,f[1](r)}}),"chunk-"+c,c)}},b.O.j=c=>0===e[c];var c=(c,a)=>{var f,d,t=a[0],r=a[1],o=a[2],n=0;if(t.some((c=>0!==e[c]))){for(f in r)b.o(r,f)&&(b.m[f]=r[f]);if(o)var i=o(b)}for(c&&c(a);n Blog | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

· 2 min read
Ken Collins

As described in AJ Stuyvenberg's post on the topic Understanding AWS Lambda Proactive Initialization, AWS Lambda may have solved some of your cold start issues for you since March 2023. Stated in an excerpt from AWS' docs:

For functions using unreserved (on-demand) concurrency, Lambda occasionally pre-initializes execution environments to reduce the number of cold start invocations. For example, Lambda might initialize a new execution environment to replace an execution environment that is about to be shut down. If a pre-initialized execution environment becomes available while Lambda is initializing a new execution environment to process an invocation, Lambda can use the pre-initialized execution environment.

· 4 min read
Ken Collins

After years of smashing Cloud & Rails together, I've come up with an idea. Better than an idea, a working specification! One where us Rails & Lambda enthusiasts can once again "console into" our "servers" and execute CLI tasks like migrations or interact via our beloved IRB friend, the Rails console. Today, I would like to present, the Lambda Console project. An open specification proposal for any AWS Lambda runtime to adopt.

· 4 min read
Ken Collins

I am incredibly happy to announcy Lamby v4 and a major update to our documentation website. Huge shout out to Docusaurus which has made in my opinion the best content-driven static site generator for open-source projects like our Lamby community.

- + \ No newline at end of file diff --git a/blog/2023/07/16/goodbye-cold-starts-hello-proactive-initilizations.html b/blog/2023/07/16/goodbye-cold-starts-hello-proactive-initilizations.html index 200602c..1d4bc4c 100644 --- a/blog/2023/07/16/goodbye-cold-starts-hello-proactive-initilizations.html +++ b/blog/2023/07/16/goodbye-cold-starts-hello-proactive-initilizations.html @@ -5,13 +5,13 @@ Goodbye Cold Starts, Hello Proactive Initialization | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Goodbye Cold Starts, Hello Proactive Initialization

· 2 min read
Ken Collins

As described in AJ Stuyvenberg's post on the topic Understanding AWS Lambda Proactive Initialization, AWS Lambda may have solved some of your cold start issues for you since March 2023. Stated in an excerpt from AWS' docs:

For functions using unreserved (on-demand) concurrency, Lambda occasionally pre-initializes execution environments to reduce the number of cold start invocations. For example, Lambda might initialize a new execution environment to replace an execution environment that is about to be shut down. If a pre-initialized execution environment becomes available while Lambda is initializing a new execution environment to process an invocation, Lambda can use the pre-initialized execution environment.

This means the Monitoring with CloudWatch is just half the picture. But how much is your application potentially benefiting from proactive inits? Since Lamby v5.1.0, you can now find out easily using CloudWatch Metrics. To turn metrics on, enable the config like so:

config/environments/production.rb
config.lamby.cold_start_metrics = true

Lamby will now publish CloudWatch Embedded Metrics in the Lamby namespace with a custom dimension for each application's name. Captured metrics include counts for Cold Starts vs. Proactive Initializations. Here is an example running sum of 3 days of data for a large Rails application in the us-east-1 region.

A CloudWatch Metrics graph showing a running sum of cold starts vs proactive inits for a large Rails application on LambdaA CloudWatch Metrics graph showing a running sum of cold starts vs proactive inits for a large Rails application on Lambda

This data shows the vast majority of your initialized Lambda Containers are proactively initialized. Hence, no cold starts are felt by end users or consumers of your function. If you need to customize the name of your Rails application in the CloudWatch Metrics dimension, you can do so using this config.

config/environments/production.rb
config.lamby.metrics_app_name = 'MyServiceName'
- + \ No newline at end of file diff --git a/blog/archive.html b/blog/archive.html index 3582dd1..2615f35 100644 --- a/blog/archive.html +++ b/blog/archive.html @@ -5,13 +5,13 @@ Archive | Lamby - Simple Rails & AWS Lambda Integration using Rack - + - + \ No newline at end of file diff --git a/blog/tags.html b/blog/tags.html index e39e0a1..7e7fe41 100644 --- a/blog/tags.html +++ b/blog/tags.html @@ -5,13 +5,13 @@ Tags | Lamby - Simple Rails & AWS Lambda Integration using Rack - + - + \ No newline at end of file diff --git a/blog/tags/cold-starts.html b/blog/tags/cold-starts.html index 51ea40d..4fe72e5 100644 --- a/blog/tags/cold-starts.html +++ b/blog/tags/cold-starts.html @@ -5,13 +5,13 @@ One post tagged with "cold-starts" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "cold-starts"

View All Tags

· 2 min read
Ken Collins

As described in AJ Stuyvenberg's post on the topic Understanding AWS Lambda Proactive Initialization, AWS Lambda may have solved some of your cold start issues for you since March 2023. Stated in an excerpt from AWS' docs:

For functions using unreserved (on-demand) concurrency, Lambda occasionally pre-initializes execution environments to reduce the number of cold start invocations. For example, Lambda might initialize a new execution environment to replace an execution environment that is about to be shut down. If a pre-initialized execution environment becomes available while Lambda is initializing a new execution environment to process an invocation, Lambda can use the pre-initialized execution environment.

- + \ No newline at end of file diff --git a/blog/tags/console.html b/blog/tags/console.html index a8422c9..0fc8797 100644 --- a/blog/tags/console.html +++ b/blog/tags/console.html @@ -5,13 +5,13 @@ One post tagged with "console" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "console"

View All Tags

· 4 min read
Ken Collins

After years of smashing Cloud & Rails together, I've come up with an idea. Better than an idea, a working specification! One where us Rails & Lambda enthusiasts can once again "console into" our "servers" and execute CLI tasks like migrations or interact via our beloved IRB friend, the Rails console. Today, I would like to present, the Lambda Console project. An open specification proposal for any AWS Lambda runtime to adopt.

- + \ No newline at end of file diff --git a/blog/tags/container.html b/blog/tags/container.html index 841a17e..48c622d 100644 --- a/blog/tags/container.html +++ b/blog/tags/container.html @@ -5,13 +5,13 @@ One post tagged with "container" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "container"

View All Tags
- + \ No newline at end of file diff --git a/blog/tags/containers.html b/blog/tags/containers.html index c4742c9..07593ef 100644 --- a/blog/tags/containers.html +++ b/blog/tags/containers.html @@ -5,13 +5,13 @@ One post tagged with "containers" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "containers"

View All Tags

· 4 min read
Ken Collins

I am incredibly happy to announcy Lamby v4 and a major update to our documentation website. Huge shout out to Docusaurus which has made in my opinion the best content-driven static site generator for open-source projects like our Lamby community.

- + \ No newline at end of file diff --git a/blog/tags/extension.html b/blog/tags/extension.html index 40c2484..ceb2a94 100644 --- a/blog/tags/extension.html +++ b/blog/tags/extension.html @@ -5,13 +5,13 @@ One post tagged with "extension" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "extension"

View All Tags
- + \ No newline at end of file diff --git a/blog/tags/initialization.html b/blog/tags/initialization.html index ea1cc85..01f48b5 100644 --- a/blog/tags/initialization.html +++ b/blog/tags/initialization.html @@ -5,13 +5,13 @@ One post tagged with "initialization" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "initialization"

View All Tags

· 2 min read
Ken Collins

As described in AJ Stuyvenberg's post on the topic Understanding AWS Lambda Proactive Initialization, AWS Lambda may have solved some of your cold start issues for you since March 2023. Stated in an excerpt from AWS' docs:

For functions using unreserved (on-demand) concurrency, Lambda occasionally pre-initializes execution environments to reduce the number of cold start invocations. For example, Lambda might initialize a new execution environment to replace an execution environment that is about to be shut down. If a pre-initialized execution environment becomes available while Lambda is initializing a new execution environment to process an invocation, Lambda can use the pre-initialized execution environment.

- + \ No newline at end of file diff --git a/blog/tags/interaction.html b/blog/tags/interaction.html index 229e1a0..5bf6806 100644 --- a/blog/tags/interaction.html +++ b/blog/tags/interaction.html @@ -5,13 +5,13 @@ One post tagged with "interaction" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "interaction"

View All Tags

· 4 min read
Ken Collins

After years of smashing Cloud & Rails together, I've come up with an idea. Better than an idea, a working specification! One where us Rails & Lambda enthusiasts can once again "console into" our "servers" and execute CLI tasks like migrations or interact via our beloved IRB friend, the Rails console. Today, I would like to present, the Lambda Console project. An open specification proposal for any AWS Lambda runtime to adopt.

- + \ No newline at end of file diff --git a/blog/tags/lambda.html b/blog/tags/lambda.html index 0f7057b..280970c 100644 --- a/blog/tags/lambda.html +++ b/blog/tags/lambda.html @@ -5,13 +5,13 @@ 4 posts tagged with "lambda" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

4 posts tagged with "lambda"

View All Tags

· 2 min read
Ken Collins

As described in AJ Stuyvenberg's post on the topic Understanding AWS Lambda Proactive Initialization, AWS Lambda may have solved some of your cold start issues for you since March 2023. Stated in an excerpt from AWS' docs:

For functions using unreserved (on-demand) concurrency, Lambda occasionally pre-initializes execution environments to reduce the number of cold start invocations. For example, Lambda might initialize a new execution environment to replace an execution environment that is about to be shut down. If a pre-initialized execution environment becomes available while Lambda is initializing a new execution environment to process an invocation, Lambda can use the pre-initialized execution environment.

· 4 min read
Ken Collins

After years of smashing Cloud & Rails together, I've come up with an idea. Better than an idea, a working specification! One where us Rails & Lambda enthusiasts can once again "console into" our "servers" and execute CLI tasks like migrations or interact via our beloved IRB friend, the Rails console. Today, I would like to present, the Lambda Console project. An open specification proposal for any AWS Lambda runtime to adopt.

· 4 min read
Ken Collins

I am incredibly happy to announcy Lamby v4 and a major update to our documentation website. Huge shout out to Docusaurus which has made in my opinion the best content-driven static site generator for open-source projects like our Lamby community.

- + \ No newline at end of file diff --git a/blog/tags/rails.html b/blog/tags/rails.html index 5c72b3d..b046ff5 100644 --- a/blog/tags/rails.html +++ b/blog/tags/rails.html @@ -5,13 +5,13 @@ 3 posts tagged with "rails" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

3 posts tagged with "rails"

View All Tags

· 2 min read
Ken Collins

As described in AJ Stuyvenberg's post on the topic Understanding AWS Lambda Proactive Initialization, AWS Lambda may have solved some of your cold start issues for you since March 2023. Stated in an excerpt from AWS' docs:

For functions using unreserved (on-demand) concurrency, Lambda occasionally pre-initializes execution environments to reduce the number of cold start invocations. For example, Lambda might initialize a new execution environment to replace an execution environment that is about to be shut down. If a pre-initialized execution environment becomes available while Lambda is initializing a new execution environment to process an invocation, Lambda can use the pre-initialized execution environment.

· 4 min read
Ken Collins

After years of smashing Cloud & Rails together, I've come up with an idea. Better than an idea, a working specification! One where us Rails & Lambda enthusiasts can once again "console into" our "servers" and execute CLI tasks like migrations or interact via our beloved IRB friend, the Rails console. Today, I would like to present, the Lambda Console project. An open specification proposal for any AWS Lambda runtime to adopt.

· 4 min read
Ken Collins

I am incredibly happy to announcy Lamby v4 and a major update to our documentation website. Huge shout out to Docusaurus which has made in my opinion the best content-driven static site generator for open-source projects like our Lamby community.

- + \ No newline at end of file diff --git a/blog/tags/runner.html b/blog/tags/runner.html index ac0beab..f7a790e 100644 --- a/blog/tags/runner.html +++ b/blog/tags/runner.html @@ -5,13 +5,13 @@ One post tagged with "runner" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "runner"

View All Tags

· 4 min read
Ken Collins

After years of smashing Cloud & Rails together, I've come up with an idea. Better than an idea, a working specification! One where us Rails & Lambda enthusiasts can once again "console into" our "servers" and execute CLI tasks like migrations or interact via our beloved IRB friend, the Rails console. Today, I would like to present, the Lambda Console project. An open specification proposal for any AWS Lambda runtime to adopt.

- + \ No newline at end of file diff --git a/blog/tags/specification.html b/blog/tags/specification.html index 3d72455..71dcfc7 100644 --- a/blog/tags/specification.html +++ b/blog/tags/specification.html @@ -5,13 +5,13 @@ One post tagged with "specification" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "specification"

View All Tags

· 4 min read
Ken Collins

After years of smashing Cloud & Rails together, I've come up with an idea. Better than an idea, a working specification! One where us Rails & Lambda enthusiasts can once again "console into" our "servers" and execute CLI tasks like migrations or interact via our beloved IRB friend, the Rails console. Today, I would like to present, the Lambda Console project. An open specification proposal for any AWS Lambda runtime to adopt.

- + \ No newline at end of file diff --git a/blog/tags/tailscale.html b/blog/tags/tailscale.html index 51af3de..2e8ed75 100644 --- a/blog/tags/tailscale.html +++ b/blog/tags/tailscale.html @@ -5,13 +5,13 @@ One post tagged with "tailscale" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "tailscale"

View All Tags
- + \ No newline at end of file diff --git a/blog/tags/tasks.html b/blog/tags/tasks.html index 61ee7d9..e8eede4 100644 --- a/blog/tags/tasks.html +++ b/blog/tags/tasks.html @@ -5,13 +5,13 @@ One post tagged with "tasks" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "tasks"

View All Tags

· 4 min read
Ken Collins

After years of smashing Cloud & Rails together, I've come up with an idea. Better than an idea, a working specification! One where us Rails & Lambda enthusiasts can once again "console into" our "servers" and execute CLI tasks like migrations or interact via our beloved IRB friend, the Rails console. Today, I would like to present, the Lambda Console project. An open specification proposal for any AWS Lambda runtime to adopt.

- + \ No newline at end of file diff --git a/blog/tags/websockets.html b/blog/tags/websockets.html index 347b171..bd5e513 100644 --- a/blog/tags/websockets.html +++ b/blog/tags/websockets.html @@ -5,13 +5,13 @@ One post tagged with "websockets" | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

One post tagged with "websockets"

View All Tags
- + \ No newline at end of file diff --git a/blog/tailscale-extension-for-lambda-containers.html b/blog/tailscale-extension-for-lambda-containers.html index 5419165..ca18204 100644 --- a/blog/tailscale-extension-for-lambda-containers.html +++ b/blog/tailscale-extension-for-lambda-containers.html @@ -5,13 +5,13 @@ Using Tailscale on Lambda for a Live Development Proxy | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Using Tailscale on Lambda for a Live Development Proxy

· 4 min read
Ken Collins

Using Tailscale on Lambda for a Live Development Proxy

⚠️ DISCLAIMER: In no way am I advocating for the use of live proxies as a normal way to develop against cloud resources. However in some edge cases, such as developing a new system, live dev proxies or the general use of Tailscale in Lambda could be useful.

🐋 Tailscale on Lambda

Tailscale makes networking easy. Like really easy. It shines in situations where private networks do not allow inbound connections. Tailscale can connect your devices and development environments for easy access to remote resources, or allow those remote systems to access your home or office network devices.

A few years ago Corey Quinn wrote a Tailscale Lambda Extension. It is great and helped a lot of folks. Today, I'd like to share a new project based on Corey's work that makes it even easier to use Tailscale in Lambda Container. Check it out here.

🔗 Tailscale Lambda Extension for Containers on GitHub 🐙

This new version tries to improve upon Corey's work. Initialization is now stable, there are more configuration options, and we even have multi-platform Docker container packages for both x86_64 and arm64. We even have Amazon Linux 2 and Debian/Ubuntu variants. Installation is really easy, simply add one line to your Dockerfile. For example:

FROM public.ecr.aws/lambda/ruby:3.2
RUN yum install -y curl
COPY --from=ghcr.io/rails-lambda/tailscale-extension-amzn:1 /opt /opt

Once your container starts, taking to any device within your tailnet can be done by using the local SOCKS5 proxy. In the example below, we are using Ruby's socksify gem.

require 'socksify/http'
Net::HTTP.socks_proxy('localhost', 1055).start(...) do |http|
# your http code here...
end

🔌 ActionCable on Lambda

How did I use Tailscale for the Rails on Lambda work? A few months ago, I started work on the last critical part of the Rails ecosystem which did not work on Lambda... ActionCable & WebSockets. Specifically, I wanted Hotwire to work.

So far, everything is working great with our new LambdaCable gem. Eventually it will be a drop-in adapter for ActionCable and join the ranks of other popular alternatives like AnyCable. To bring the project to completion faster, I needed feedback loops that were much faster than deploying code to the cloud. I needed a development proxy! One where my Rails application would receive events from both Lambda's Function URLs and the WebSocket events from API Gateway. Illustrated below with a demo video.

Architecture diagram of the use of a Lambda development proxy for WebSockets with API Gateway.

If you are curious to learn more about how Rails & Lambda work together, check out our Lamby project. The architecture of Lambda Containers works so well with Rails since our framework distills everything from HTTP, Jobs, Events, & WebSocket connections down to Docker's CMD interface. The architecture above at the proxy layer was easy to build and connect up to our single delegate function, Lamby.cmd. Shown below:

Architecture diagram of the use of a Lambda development proxy for WebSockets with API Gateway.

For our Rails application on Lambda, here are the changes we made to leverage this. All outlined in our WebSockets Demo Pull Request.

  • Created a .localdev folder. Added a copy of our SAM template.yaml for all AWS Resources.
  • Made a simple .localdev/Dockerfile that included the Tailscale Extension along with basic proxy code.
  • Leveraged Lamby's Local Development Proxy Sever.
  • Ensured our Devcontainers exposed port 3000 to all local network devices so Tailscale could detect the service.

I hope you find reasons to learn more about Tailscale and how using a SOCKS5 proxy from Lambda could help your development or production needs. More so, I hope you like the new Lambda Extension project of ours making it easy for containerized applications to use. Drop us a comment if you do.

🔗 Tailscale Lambda Extension for Containers on GitHub 🐙

- + \ No newline at end of file diff --git a/blog/the-elusive-lambda-console-a-specification-proposal.html b/blog/the-elusive-lambda-console-a-specification-proposal.html index 9b8ccfe..bc4f9df 100644 --- a/blog/the-elusive-lambda-console-a-specification-proposal.html +++ b/blog/the-elusive-lambda-console-a-specification-proposal.html @@ -5,13 +5,13 @@ The Elusive Lambda Console; A Specification Proposal. | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

The Elusive Lambda Console; A Specification Proposal.

· 4 min read
Ken Collins

After years of smashing Cloud & Rails together, I've come up with an idea. Better than an idea, a working specification! One where us Rails & Lambda enthusiasts can once again "console into" our "servers" and execute CLI tasks like migrations or interact via our beloved IRB friend, the Rails console. Today, I would like to present, the Lambda Console project. An open specification proposal for any AWS Lambda runtime to adopt.

Lamby: Simple Rails & AWS Lambda Integration using RackLamby: Simple Rails & AWS Lambda Integration using Rack

Lambda Console

npm install -g lambda-console-cli

The Lambda Console is a CLI written in Node.js that will interactively create an AWS SDK session for you to invoke your Lambda functions with two types of modes.

  1. CLI Runner
  2. Interactive Commands

Think of the CLI Runner as a bash prompt. You can run any process command or interact with the filesystem or environment. For Rails users, running rake tasks or DB migrations. These tasks assume the Lambda task root as the present working directory.

Interactive commands however are evaluated in the context of your running application. For Ruby and Rails applications, this simulates IRB (Interactive Ruby Shell). For Lamby users, this mode simulates the Rails console. Making it easy for users to query their DB or poke their models and code.

The Proposal

There is nothing about the Lambda Console that is coupled to Ruby or Rails. The idea is simple, as a Lambda community, could we do the following?

  1. Finalize a Lambda Console request/response specification.
  2. Create more runtime-specific language implementations.
  3. Build an amazing CLI client for any runtime.

Here is what we have today. The request specification, a simple event structure that is only a few dozen lines of JSON schema.

{ "X_LAMBDA_CONSOLE": { "run": "cat /etc/os-release" } }
{ "X_LAMBDA_CONSOLE": { "interact": "User.find(1)" } }

Any Lambda runtime code or framework could implement the handling of these event in their own language-specific pakages. You can find the Ruby implementation of these in the Lambda Console's first reference implementations.

The Possibilities

What I really want is an amazing CLI client. The current Lambda Console CLI was hacked together in a few days using some amazing Node.js tools that make building interactive CLIs so so easy. But I've never done this before. If this type of tooling sounds interesting to you and you like Node.js, let me know! It would be amazing to see implementation packages for these for Node, PHP, Python, and other frameworks using these languages. Here are some ideas on where I could see this going.

Live STDOUT & STDERR: We could take advantage of Lambda's new Response Streaming and send output buffers as they happen.

Pseudo TTY: Is there a way to better simulate a real TTY session? Could this even include ANSI colors?

Quality of Life Improvements: Everything from, Allowing the CLI tool to switch modes without restarting it; Creating a command buffer to up arrow navigate history; Prettier UI.

Formal Response JSON Schema: As the features grow, should the response JSON be standardized? For example, if the client wanted to syntax highlight interactive language commands, how would it know what language was being used? We could have a X_LAMBDA_CONSOLE_LANG response header.

What else would you like to see in a Lambda Console client?

- + \ No newline at end of file diff --git a/blog/welcome-to-lamby-v4.html b/blog/welcome-to-lamby-v4.html index bc8a8a5..1cdc4ae 100644 --- a/blog/welcome-to-lamby-v4.html +++ b/blog/welcome-to-lamby-v4.html @@ -5,14 +5,14 @@ Welcome Lamby v4! | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Welcome Lamby v4!

· 4 min read
Ken Collins

I am incredibly happy to announcy Lamby v4 and a major update to our documentation website. Huge shout out to Docusaurus which has made in my opinion the best content-driven static site generator for open-source projects like our Lamby community.

Lamby: Simple Rails & AWS Lambda Integration using RackLamby: Simple Rails & AWS Lambda Integration using Rack

So what is new and will v4 break anything? Lots! Mostly new ways of thinking around the same basic architecture. Nothing should break either. Lamby v4's semver change is mostly marketing driven. Here is a short list of what is new.

Updated Starter

Want to see all this new stuff in action? Use our Quick Start guide to deploy a new Rails application to AWS Lambda in 5min ⏱️.

Bring Your Own Containers

Lamby still works with the Zip packaging format, but all documentation on how to use it has been removed. Containers are here to stay and their value goes way past a simple packaging format.

We now encourage bringing your own containers by using Lambda's Runtime Interface Client (RIC). The RIC allows us to use Docker's CMD to load Rails and invoke a function. In this case we are loading our Rails application through its config/environment.rb file (.rb extension is implied) and once that is done, calling the new Lamby.cmd as the Lambda handler. No more app.rb file needed!

Dockerfile
FROM ruby:3.2-bullseye
RUN gem install 'aws_lambda_ric'
ENTRYPOINT [ "/usr/local/bundle/bin/aws_lambda_ric" ]
CMD ["config/environment.Lamby.cmd"]

Secrets with Crypteia

The Crypteia package is Rust Lambda Extension for any Runtime/Container to preload SSM Parameters as secure environment variables. It takes advantages of LD_PRELOAD to seamlessly fetch values from SSM when a process starts and then injects them as natively accesible Ruby ENV variables. Our Quick Start guide's cookiecutter includes Crypteia already for you via a Docker COPY command into the Lambda Extension /opt directory.

Dockerfile
FROM ruby:3.2-bullseye
COPY --from=ghcr.io/rails-lambda/crypteia-extension-debian:1 /opt /opt

Usage is simply done by adding variables to your SAM template and accessing the values fetched from SSM like any other environment variable. Please read the Crypteia's documentation for full details.

Globals:
Environment:
Variables:
SECRET: x-crypteia-ssm:/myapp/SECRET
ENV['SECRET'] # 1A2B3C4D5E6F

Development Containers

Described in the Quick Start guide, our Lamby starter makes use of the Development Container specification via a .devcontainer directory. Commonly used with Codespaces, dev containers can be used locally with any editor.

Our dev container's Dockerfile uses the same base image as the one at the root of your project. This helps ensure your development experience, like installing system dependencies and Ruby gems with native extensions, aligns with the same process as your production image.

We also leverage the devcontainer's dockerComposeFile capability to include a MySQL service as well. The Lamby starter also includes a range of devcontainer features which are installed within the Ubuntu development image. For example, Node, Docker in Docker, SSH, and the AWS CLI & SAM CLI.

- + \ No newline at end of file diff --git a/docs/activejob.html b/docs/activejob.html index 1433f20..764a1ac 100644 --- a/docs/activejob.html +++ b/docs/activejob.html @@ -5,13 +5,13 @@ ActiveJob & Background Processing | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

ActiveJob & Background Processing

Lambdakiq

Lambdakiq - ActiveJob on SQS & Lambda

Using ActiveJob on AWS Lambda is a reimagination of the problem for Rails. Instead of starting up long running process that polls for work, we instead use the event-driven architecture of AWS Lambda to our advantage using a gem named Lambdakiq which is mostly a drop-in replacement for Sidekiq.

It allows you to leverage AWS' managed infrastructure to the fullest extent. Gone are the days of managing pods and long polling processes. Instead AWS delivers messages directly to your Rails' job functions and scales it up and down as needed. Observability is built in using AWS CloudWatch Metrics, Dashboards, and Alarms. Key Features:

  • Distinct web & jobs Lambda functions.
  • AWS fully managed polling. Event-driven.
  • Maximum 12 retries. Per job configurable.
  • Mirror Sidekiq's retry backoff timing.
  • Last retry is at 11 hours 30 minutes.
  • Supports ActiveJob's wait/delay. Up to 15 minutes.
  • Dead messages are stored for up to 14 days.

Learn more on GitHub: https://github.com/rails-lambda/lambdakiq

LambdaPunch

Async Processing Using Lambda Extensions

You may need lightweight background job processing similiar to how SuckerPunch gem works. The only way to do this for Lambda is to use the LambdaPunnch gem. LambdaPunch is a Lambda Extensions that works with the Lambda invoke model. This solution is required if you are using New Relic as described in our Logging & Observability guide.

Learn more on GitHub: https://github.com/rails-lambda/lambda_punch

- + \ No newline at end of file diff --git a/docs/anatomy.html b/docs/anatomy.html index 910abd0..4ee067b 100644 --- a/docs/anatomy.html +++ b/docs/anatomy.html @@ -5,13 +5,13 @@ How Lamby Works | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

How Lamby Works

The quickest way to see Lamby in action is to create a new Rails app via our cookiecutter project template using our Quick Start guide. This guide will instead outline what is happening within that starter project allowing you to cherry-pick which files and/or practices from our cookiecutter project can be applied to your own application(s).

note

If you copy any template files without using the cookiecutter, remember to customize the {% include ... %} template sections with your own app name and remove the curly bracket sections like these {{ "..." }} from various string literals.

Architecture

Lamby is a Rack adapter that converts AWS Lambda integration events into native Rack Environment objects which are sent directly to your application. Lamby can automatically do this when using either Lambda Function URLs, API Gateway HTTP API v1/v2 payloads, API Gateway REST API, or even Application Load Balancer (ALB) integrations.

Since Rails applications are Rack applications, Lamby removes the need for a companion Rack Web Server like Passenger or Puma to be running within your container. Essentially AWS integrations become your web server and scaling is managed by the Lambda service spinning up new container instances, one for each request or down to zero if needed. Lambda instances live for several minutes or more. A small pool of concurrent fuctions can handle a large amount of traffic.

Install Lamby

Start by adding the Lamby gem to your Gemfile. It remains inert until it detects special environment variables present when run on AWS Lambda. When activated, it mostly does a few simple things like ensuring Rails logs to standard out.

gem 'lamby'

Runtime Container

You will need some Dockerfile to build your Lambda container image. Referencing architecture diagram above mentions something called a RIC (Rick). The RIC is short for the Lambda Runtime Interface Client. It is a small interface packaged as a Ruby gem that acts as the ENTRYPOINT for any OCI continer image to run on the AWS Lambda platform. Below you can see that we are using an Official Ruby Ubuntu variant base image, installing the RIC and setting it as the entrypoint.

Dockerfile
FROM ruby:3.2-bullseye

RUN gem install 'aws_lambda_ric'
ENTRYPOINT [ "/usr/local/bundle/bin/aws_lambda_ric" ]
CMD ["config/environment.Lamby.cmd"]

The RIC allows us to use Docker's CMD to load Rails and invoke a function. In this case we are loading our Rails application through its config/environment.rb file (.rb extension is implied) and once that is done, calling the Lamby.cmd as the Lambda handler.

note

Our cookiecutter project defaults to building a Linux image targeting the arm64 architecture vs the traditional x86_64. However, this is easily changed to accomodate your needs. Check out the CPU Architecture section for more details.

SAM CloudFormation File

The template.yaml file at the root of your project describes your Serverless Application. Don't worry, we have done some heavy lifting for you. Here is the Serverless Function Resource we start you off with and a brief explanation:

template.yaml
RailsLambda:
Type: AWS::Serverless::Function
Metadata:
DockerContext: .
Dockerfile: Dockerfile
DockerTag: web
Properties:
AutoPublishAlias: live
FunctionUrlConfig:
AuthType: NONE
DeploymentPreference:
Type: AllAtOnce
MemorySize: 1792
PackageType: Image
Timeout: 30
  • Your Rails function will have a MemorySize of 1,792 MB of RAM and 1 vCPU. This is the sweet spot for Rails speed and cost optimization. Remember, you're not running a web server in a single function nor scaling by memory.
  • The FunctionUrlConfig has been configured to be a public HTTP proxy. You can change this to IAM authentication or swap out to other web server integrations like API Gateway if you need their features. Details in other guides.
  • The maximum amount of Timeout for an HTTP integration is 30 seconds.
AWS SAM Introduction

As your application grows you may end up adding Resources like EventBridge Rules, SQS, S3 Buckets, and IAM Policies. Please take some time to learn how SAM & CloudFormation work.

Development Container

Described in the Quick Start guide, our Lamby starter makes use of the Development Container specification via a .devcontainer directory. Commonly used with Codespaces, dev containers can be used locally with any editor.

Our dev container's Dockerfile uses the same base image as the one at the root of your project. This helps ensure your development experience, like installing system dependencies and Ruby gems with native extensions, aligns with the same process as your production image.

We also leverage the devcontainer's dockerComposeFile capability to include a MySQL service as well. The Lamby starter also includes a range of devcontainer features which are installed within the Ubuntu development image. For example, Node, Docker in Docker, SSH, and the AWS CLI & SAM CLI.

note

Technically you do not need to adopt these devcontainer patterns, but it is really nice to be able to use this container to ensure your CI/CD process is reproducable locally using VS Code or the Dev Container CLI. More details in the following CI/CD section.

Deployment & CI/CD

So how does that CloudFormation file and container image get created within AWS? We use the AWS SAM CLI's build, package, and deploy commands in a single bin/deploy file. This file also does the following. Feel free to customize your deploy files as needed:

  • Finds/Creates an ECR repository with the same name as your project.
  • Sets various ENV variables if not set already. For example, easy to deploy to multiple regions by setting AWS_REGION.
  • Install gems into local vendor/bundle for deployment via a Docker COPY.

If you used our Quick Start guide, you likely made your first deploy using VS Code's integrated terminal within the development container. This is critically important since your Ruby gems with native extensions are built within the context of the Ruby Ubuntu image being built and copied to ECR for Lambda to use.

When automating deployments, the system must have permission to create the needed resources and IAM Roles with permission(s) for your application to work. Most hobby users have admin level access to their own AWS account. For more security minded folks, consider creating a least privilege user for your deployments with OpenID Connect identity providers. We found that the AWSCodeDeployFullAccess managed policy is often overlooked.

CircleCI

If arm64 is your target platform in production, CircleCI make it easy to to do so using their Arm Execution Environment. Our starter includes a CircleCI config.yml file that runs tests on each commit or deploy by manually triggering a workflow. It even uses the Devcontainer CLI to ensure your CI/CD matches your development experience. Changing between arm64 and x86_64 is described in our CPU Architecture guide.

Deploying requires manually triggering the workflow. Simply select a branch then click "Trigger Workflow" and pass a string parameter called "workflow" with a value of "deploy". Feel free to change this workflow to suite your needs.

CircleCI Trigger Workflow for DeployCircleCI Trigger Workflow for Deploy

GitHub Actions

You can automate both the test and deploy process using our provided GitHub Actions which also leverage the Dev Container Build and Ruby CI project.

  1. Within your project's GitHub repository add two Encrypted Secrets using the credentials values above with the environment names of AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.
  2. If needed, change the aws-region in your .github/workflows/deploy.yml file from us-east-1 to your own region.
  3. Trigger a deploy by navigating to the Deploy workflow and clicking "Run workflow".
Lambda & Rails deploy with GitHub Actions
- + \ No newline at end of file diff --git a/docs/assets.html b/docs/assets.html index a465b8b..9b4140e 100644 --- a/docs/assets.html +++ b/docs/assets.html @@ -5,13 +5,13 @@ JavaScript & Assets | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

JavaScript & Assets

Assets require a Custom Domain Name to link properly when using API Gateway. For Function URLs, the work out of the box. Both API Gateway and Function URLs can benefit from a CloudFront CDN to cache /assets and avoid hitting your backend function on each request.

Serving Static Assets

Our Quick Start cookiecutter project leverages Rails' built in ability to serve static assets. We do this by setting this environment variable in your Dockerfile.

Dockerfile
ENV RAILS_SERVE_STATIC_FILES=1

We also add this configuration to your config/environments/production.rb file. In this case we are setting the cache control to 30 days, which you can change. The X-Lamby-Base64 header signals to the Lamby rack adapter that the content requires base64 binary encoding.

config/environments/production.rb
config.public_file_server.headers = {
'Cache-Control' => "public, max-age=#{30.days.seconds.to_i}",
'X-Lamby-Base64' => '1'
}

Adding CloudFront

CloudFront is an amazing CDN and is pretty easy to setup with Rails. Simply point CloudFront to your Rails app and allow the origin to set the cache headers. Because we set the public_file_server headers above, everything should work out perfectly. Assuming you have setup a Custom Domain Name via CloudFront, here is how to setup an behavior for your /assets path. From your CloudFront distribution

  • Click the "Behaviors" tab
  • Click "Create Behavior" button
  • Path Pattern: /assets/*
  • Select your API Gateway or Function URL origin.
  • Compress objects automatically: Yes
  • Viewer protocol policy: Redirect HTTP to HTTPS
  • Allowed HTTP Methods: GET, HEAD
  • Restrict viewer access: No
  • 🔘 Cache policy and origin request policy (recommended)
    • Cache policy: CachingOptimized
    • Origin request policy: None

JavaScript Ready

Our Quick Start cookiecutter project is ready to hit the ground running with all the latest Rails defaults for JavaScript & CSS development. We do this by adding Node.js to the development container which is also used to build your production image. See our How Lamby Works guide for details.

For example, we can add the TailwindCSS Rails gem, run the ./bin/rails tailwindcss:install command, and edit the temporary starter index page like so. Once redeployed, we should see our Hello TailwindCSS page working correctly.

Gemfile
gem 'tailwindcss-rails'
app/views/application/index.html.erb
<h1 class="
text-center
text-9xl
text-blue-400
mt-5
">Hello TailwindCSS</h1>
TailwindCSS on Rails on LambdaTailwindCSS on Rails on Lambda
- + \ No newline at end of file diff --git a/docs/cold-starts.html b/docs/cold-starts.html index 32163fb..3271ea4 100644 --- a/docs/cold-starts.html +++ b/docs/cold-starts.html @@ -5,13 +5,13 @@ Cold Starts | Lamby - Simple Rails & AWS Lambda Integration using Rack - +
-

Cold Starts

Cold starts (or init times) are an incredibly addictive topic. In many cases they can be ignored as an optimization to perform when the time and data suggests action. In practice, the more traffic your function handles the less likely cold starts are an issue since they statistically disappear under the 99th percentile. However in rare cases, you may want to optimize for them. This guide can help you make decisions on how to go about it. It also descibes how AWS may be doing this for you already with Proactive Initialization.

info

Modest sized Rails applications generally boot within 3 to 5 seconds. This happens exactly once for the duration of the function's lifecycle which could last for 30 minutes or more and service a huge amount of traffic with no latency.

Monitoring with CloudWatch

You can not optimize what you do not measure. Thankfully, AWS Lambda logs initialization time of your function to CloudWatch logs which you can query using CloudWatch Insights.

This query below will give you a nice percentile breakdown for your application's init duration which is the code outside the handler method. Feel free to change the bin bucket from 1 hour to whatever time helps you. For example, using 1d (1 day) over a longer duration (weeks) allows you to see statistical trends. In general, your p50 should be under 5 seconds.

fields @initDuration
| filter ispresent(@initDuration)
| stats pct(@initDuration, 5) as p5,
pct(@initDuration, 50) as p50,
pct(@initDuration, 95) as p95,
pct(@initDuration, 99) as p99
by bin(1h)
Rails cold start data from CloudWatch Insights. Shows percentiles for p5, p50, p95, and p99.Rails cold start data from CloudWatch Insights. Shows percentiles for p5, p50, p95, and p99.
info

See the Proactive Initialization section for more details on how to use Lamby's new CloudWatch Metrics to measure both cold starts and proactive initialization.

Proactive Initialization

As described in AJ Stuyvenberg's post on the topic Understanding AWS Lambda Proactive Initialization, AWS Lambda may have solved some of your cold start issues for you since March 2023. Stated in an excerpt from AWS' docs:

For functions using unreserved (on-demand) concurrency, Lambda occasionally pre-initializes execution environments to reduce the number of cold start invocations. For example, Lambda might initialize a new execution environment to replace an execution environment that is about to be shut down. If a pre-initialized execution environment becomes available while Lambda is initializing a new execution environment to process an invocation, Lambda can use the pre-initialized execution environment.

This means the Monitoring with CloudWatch is just half the picture. But how much is your application potentially benefiting from proactive inits? Since Lamby v5.1.0, you can now find out easily using CloudWatch Metrics. To turn metrics on, enable the config like so:

config/environments/production.rb
config.lamby.cold_start_metrics = true

Lamby will now publish CloudWatch Embedded Metrics in the Lamby namespace with a custom dimension for each application's name. Captured metrics include counts for Cold Starts vs. Proactive Initializations. Here is an example running sum of 3 days of data for a large Rails application in the us-east-1 region.

Rails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill InvokesRails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill Invokes

This data shows the vast majority of your initialized Lambda Contaienrs are proactively initialized. Hence, no cold starts are felt by end users or consumers of your function. If you need to customize the name of your Rails application in the CloudWatch Metrics dimension, you can do so using this config.

config/environments/production.rb
config.lamby.metrics_app_name = 'MyServiceName'

Bootsnap by Shopify

Reducing your Rails applications boot time should be your first optimization option against true cold starts. Bootsnap has been developed by Shopify to speed up Rails boot time for production environments using a mix of compile and load path caches. When complete, your deployed container will have everything it needs to boot faster!

How much faster? Generally 1 to 3 seconds depending on your Lambda application. Adding Bootsnap to your Rails Lambda application is straightforward. First, add the gem to your production group in your Gemfile.

Gemfile
group :production do
gem 'bootsnap'
end

Next, we need to add the Bootsnap caches with your deployed container. Add these lines to your project's Dockerfile after your COPY . . declaration. It will run two commands. The first is the standard Bootsnap precompile which builds both the Ruby ISeq & YAML caches. The second line loads your application into memory and thus automatically creates the $LOAD_PATH cache.

Dockerfile
ENV BOOTSNAP_CACHE_DIR=/var/task/tmp/cache
RUN bundle exec bootsnap precompile --gemfile . \
&& bundle exec ruby config/environment.rb

Afterward you should be able to verify that Bootsnap's caches are working. Measure your cold starts using a 1 day stats duration for better long term visibility.

Other Cold Start Factors

Most of these should be considered before using Provisioned Concurrency. Also note, that Proactive Initialization may be masking some of these optimizations for you already. That said, consider the following:

Client Connect Timeouts - Your Lambda application may be used by clients who have a low http open timeout. If this is the case, you may have to increase client timeouts, leverage provisioned concurrency, and/or reduce initialization time.

Update Ruby - New versions of Ruby typically boot and run faster. Since our cookiecutter project uses custom Ruby Ubuntu with Lambda containers, updating Ruby should be as easy as changing a few lines of code.

Memory & vCPU - It has been proposed that increased Memory/vCPU could reduce cold starts. We have not seen any evidence of this. For example, we recommend that Rails functions use 1792 for its MemorySize equal to 1 vCPU. Any lower would sacrifice response times. Tests showed that increasing this to 3008 equal to 2 vCPUs did nothing for a basic Rails application but cost more. However, if your function does concurrent work doing initialization, consider testing different values here.

Lazy DB/Resource Connections - Rails is really good at lazy loading database connections. This is important to keep the "Init" phase of the Lambda execution lifecycle quick and under 10s. This allows the first "Invoke" to connect to other resources. To keep init duration low, make sure your application does not eagerly connect to resources. Both ActiveRecord and Memcached w/Dalli are lazy loaded by default.

ActiveRecord Schema Cache - Commonly called Rails' best kept performance feature, the schema cache can help reduce first request response time after Rails is initialized. So it should not help the init time but it could very easily help the first invoke times.

Reduce Image Size - Sort of related to your Ruby version, always make sure that your ECR image is as small as possible. Lambda Containers supports up to 10GB for your image. There is no data on how much this could effect cold starts. So please share your stories.

Provisioned Concurrency

caution

Provisioned concurrency comes with additional execution costs. Now that we have Proactive Initialization it may never be needed.

AWS provides an option called Provisioned Concurrency (PC) which allows you to warm instances prior to receiving requests. This lets you execute Lambda functions with super low latency and no cold starts. Besides setting a static PC value, there are two fundamental methods for scaling with Provisioned Concurrency. Please use the Concurrency CloudWatch Metrics section to help you make a determination on what method is right for you.

Requirements

Our Quick Start cookiecutter includes both an AutoPublishAlias and an all at once DeploymentPreference. The publish alias is needed for provisioned concurrency. You can read about both in AWS "Deploying serverless applications gradually" guide. The code snippets below assume your function's logical resource is RailsLambda and you have an alias named live.

Auto Scaling

Here we are creating an AWS::AutoScaling::ScalingPolicy and a AWS::ApplicationAutoScaling::ScalableTarget which effectively creates a managed CloudWatch Rule that monitors your application to scale it up and down as needed. In this example we set a maximum of 40 and minimal of 5 provisioned instances. We have a TargetValue of 0.4 which is a percentage of provisioned concurrency to trigger the CloudWatch Rules via the ProvisionedConcurrencyUtilization metric. In this case, lower equals a more aggressive scaling strategy.

template.yaml
Resources:
RailsLambda:
# ...
Properties:
ProvisionedConcurrencyConfig:
ProvisionedConcurrentExecutions: 5

RailsScalableTarget:
Type: AWS::ApplicationAutoScaling::ScalableTarget
Properties:
MaxCapacity: 40
MinCapacity: 5
ResourceId: !Sub function:${RailsLambda}:live
RoleARN: !Sub arn:aws:iam::${AWS::AccountId}:role/aws-service-role/lambda.application-autoscaling.amazonaws.com/AWSServiceRoleForApplicationAutoScaling_LambdaConcurrency
ScalableDimension: lambda:function:ProvisionedConcurrency
ServiceNamespace: lambda
DependsOn: RailsLambdaAliaslive

RailsScalingPolicy:
Type: AWS::ApplicationAutoScaling::ScalingPolicy
Properties:
PolicyName: utilization
PolicyType: TargetTrackingScaling
ScalingTargetId: !Ref RailsScalableTarget
TargetTrackingScalingPolicyConfiguration:
TargetValue: 0.4
PredefinedMetricSpecification:
PredefinedMetricType: LambdaProvisionedConcurrencyUtilization

Please read this related article. Lambda Provisioned Concurrency AutoScaling is Awesome. Make sure you understand how it works! It goes into great detail on how short traffic bursts (common for most of us) can be missed by the standard CloudWatch Alarms and possible remediation to scale up.

Using a Schedule

In this example we have measured via CloudWatch Metrics (image above) that our concurrent executions never really goes past 40 instances during daytime peak usage. In this case to totally remove cold starts from a small percentage of requests we can draw a big virtual box around the curves above to always keep 40 instances warm during our peak times starting at 6am EST and going back down to 0 Provisioned Concurrency at 11PM EST. Here is how we would do that with a Provisioned Concurrency schedule.

template.yaml
Resources:
RailsScalableTarget:
Type: AWS::ApplicationAutoScaling::ScalableTarget
Properties:
MaxCapacity: 0
MinCapacity: 0
ResourceId: !Sub function:${RailsLambda}:live
RoleARN: !Sub arn:aws:iam::${AWS::AccountId}:role/aws-service-role/lambdaapplication-autoscaling. amazonaws.com/AWSServiceRoleForApplicationAutoScaling_LambdaConcurrency
ScalableDimension: lambda:function:ProvisionedConcurrency
ServiceNamespace: lambda
ScheduledActions:
- ScalableTargetAction:
MaxCapacity: 0
MinCapacity: 0
ScheduledActionName: ScaleDown
Schedule: "cron(0 3 * * ? *)"
- ScalableTargetAction:
MaxCapacity: 40
MinCapacity: 40
ScheduledActionName: ScaleUp
Schedule: "cron(0 10 * * ? *)"
DependsOn: RailsLambdaAliaslive

Concurrency CloudWatch Metrics

The graphs below were made using the following managed AWS Lambda CloudWatch Metrics. Please make sure to use your deploy alias of :live when targeting your functions resource in these reports.

  • ConcurrentExecutions
  • ProvisionedConcurrentExecutions
  • ProvisionedConcurrencySpilloverInvocations

This chart shows that a static ProvisionedConcurrentExecutions of 5 can handle most invocations for the first 3 days. Later, for the remaining 4 days, auto scaling was added with a TargetValue of 0.4. Because of the workload's spiky nature, the Invocations look almost 100% provisioned. However, the concurrent executions show otherwise.

Rails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill InvokesRails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill Invokes

Here is a 7 day view from the 4 day mark above. The TargetValue is still set to 0.4. It illustrates how the default CloudWatch Rule for ProvisionedConcurrencyUtilization metrics over a 3 minute span are not quick enough to scale PC. It is possible to use a TargetValue of 0.1 to force the PC lines to meet the blue. But your cost at this point would be unrealistically high.

Rails in Lambda Concurrent Executions and Provisioned ExecutionsRails in Lambda Concurrent Executions and Provisioned Executions

Gradual Deployments

As mentioned in the Provisioned Concurrency section we use a simple DeploymentPreference value called AllAtOnce. When a deploy happens, Lambda will need to download your new ECR image before your application is initialized. In certain high traffic scenarios along with a potentially slow loading application, deploys can be a thundering herd effect causing your concurrency to spike and a small percentage of users having longer response times.

Please see AWS' "Deploying serverless applications gradually" guide for full details but one way to soften this would be to roll out your new code in 10 minutes total via the Linear10PercentEvery1Minute deployment preference. This will automatically create a AWS CodeDeploy application and deployments for you. So cool!

- +

Cold Starts

Cold starts (or init times) are an incredibly addictive topic. In many cases they can be ignored as an optimization to perform when the time and data suggests action. In practice, the more traffic your function handles the less likely cold starts are an issue since they statistically disappear under the 99th percentile. However in rare cases, you may want to optimize for them. This guide can help you make decisions on how to go about it. It also descibes how AWS may be doing this for you already with Proactive Initialization.

info

Modest sized Rails applications generally boot within 3 to 5 seconds. This happens exactly once for the duration of the function's lifecycle which could last for 30 minutes or more and service a huge amount of traffic with no latency.

Monitoring with CloudWatch

You can not optimize what you do not measure. Thankfully, AWS Lambda logs initialization time of your function to CloudWatch logs which you can query using CloudWatch Insights.

This query below will give you a nice percentile breakdown for your application's init duration which is the code outside the handler method. Feel free to change the bin bucket from 1 hour to whatever time helps you. For example, using 1d (1 day) over a longer duration (weeks) allows you to see statistical trends. In general, your p50 should be under 5 seconds.

fields @initDuration
| filter ispresent(@initDuration)
| stats pct(@initDuration, 5) as p5,
pct(@initDuration, 50) as p50,
pct(@initDuration, 95) as p95,
pct(@initDuration, 99) as p99
by bin(1h)
Rails cold start data from CloudWatch Insights. Shows percentiles for p5, p50, p95, and p99.Rails cold start data from CloudWatch Insights. Shows percentiles for p5, p50, p95, and p99.
info

See the Proactive Initialization section for more details on how to use Lamby's new CloudWatch Metrics to measure both cold starts and proactive initialization.

Proactive Initialization

As described in AJ Stuyvenberg's post on the topic Understanding AWS Lambda Proactive Initialization, AWS Lambda may have solved some of your cold start issues for you since March 2023. Stated in an excerpt from AWS' docs:

For functions using unreserved (on-demand) concurrency, Lambda occasionally pre-initializes execution environments to reduce the number of cold start invocations. For example, Lambda might initialize a new execution environment to replace an execution environment that is about to be shut down. If a pre-initialized execution environment becomes available while Lambda is initializing a new execution environment to process an invocation, Lambda can use the pre-initialized execution environment.

This means the Monitoring with CloudWatch is just half the picture. But how much is your application potentially benefiting from proactive inits? Since Lamby v5.1.0, you can now find out easily using CloudWatch Metrics. To turn metrics on, enable the config like so:

config/environments/production.rb
config.lamby.cold_start_metrics = true

Lamby will now publish CloudWatch Embedded Metrics in the Lamby namespace with a custom dimension for each application's name. Captured metrics include counts for Cold Starts vs. Proactive Initializations. Here is an example running sum of 3 days of data for a large Rails application in the us-east-1 region.

Rails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill InvokesRails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill Invokes

This data shows the vast majority of your initialized Lambda Containers are proactively initialized. Hence, no cold starts are felt by end users or consumers of your function. If you need to customize the name of your Rails application in the CloudWatch Metrics dimension, you can do so using this config.

config/environments/production.rb
config.lamby.metrics_app_name = 'MyServiceName'

Bootsnap by Shopify

Reducing your Rails applications boot time should be your first optimization option against true cold starts. Bootsnap has been developed by Shopify to speed up Rails boot time for production environments using a mix of compile and load path caches. When complete, your deployed container will have everything it needs to boot faster!

How much faster? Generally 1 to 3 seconds depending on your Lambda application. Adding Bootsnap to your Rails Lambda application is straightforward. First, add the gem to your production group in your Gemfile.

Gemfile
group :production do
gem 'bootsnap'
end

Next, we need to add the Bootsnap caches with your deployed container. Add these lines to your project's Dockerfile after your COPY . . declaration. It will run two commands. The first is the standard Bootsnap precompile which builds both the Ruby ISeq & YAML caches. The second line loads your application into memory and thus automatically creates the $LOAD_PATH cache.

Dockerfile
ENV BOOTSNAP_CACHE_DIR=/var/task/tmp/cache
RUN bundle exec bootsnap precompile --gemfile . \
&& bundle exec ruby config/environment.rb

Afterward you should be able to verify that Bootsnap's caches are working. Measure your cold starts using a 1 day stats duration for better long term visibility.

Other Cold Start Factors

Most of these should be considered before using Provisioned Concurrency. Also note, that Proactive Initialization may be masking some of these optimizations for you already. That said, consider the following:

Client Connect Timeouts - Your Lambda application may be used by clients who have a low http open timeout. If this is the case, you may have to increase client timeouts, leverage provisioned concurrency, and/or reduce initialization time.

Update Ruby - New versions of Ruby typically boot and run faster. Since our cookiecutter project uses custom Ruby Ubuntu with Lambda containers, updating Ruby should be as easy as changing a few lines of code.

Memory & vCPU - It has been proposed that increased Memory/vCPU could reduce cold starts. We have not seen any evidence of this. For example, we recommend that Rails functions use 1792 for its MemorySize equal to 1 vCPU. Any lower would sacrifice response times. Tests showed that increasing this to 3008 equal to 2 vCPUs did nothing for a basic Rails application but cost more. However, if your function does concurrent work doing initialization, consider testing different values here.

Lazy DB/Resource Connections - Rails is really good at lazy loading database connections. This is important to keep the "Init" phase of the Lambda execution lifecycle quick and under 10s. This allows the first "Invoke" to connect to other resources. To keep init duration low, make sure your application does not eagerly connect to resources. Both ActiveRecord and Memcached w/Dalli are lazy loaded by default.

ActiveRecord Schema Cache - Commonly called Rails' best kept performance feature, the schema cache can help reduce first request response time after Rails is initialized. So it should not help the init time but it could very easily help the first invoke times.

Reduce Image Size - Sort of related to your Ruby version, always make sure that your ECR image is as small as possible. Lambda Containers supports up to 10GB for your image. There is no data on how much this could effect cold starts. So please share your stories.

Provisioned Concurrency

caution

Provisioned concurrency comes with additional execution costs. Now that we have Proactive Initialization it may never be needed.

AWS provides an option called Provisioned Concurrency (PC) which allows you to warm instances prior to receiving requests. This lets you execute Lambda functions with super low latency and no cold starts. Besides setting a static PC value, there are two fundamental methods for scaling with Provisioned Concurrency. Please use the Concurrency CloudWatch Metrics section to help you make a determination on what method is right for you.

Requirements

Our Quick Start cookiecutter includes both an AutoPublishAlias and an all at once DeploymentPreference. The publish alias is needed for provisioned concurrency. You can read about both in AWS "Deploying serverless applications gradually" guide. The code snippets below assume your function's logical resource is RailsLambda and you have an alias named live.

Auto Scaling

Here we are creating an AWS::AutoScaling::ScalingPolicy and a AWS::ApplicationAutoScaling::ScalableTarget which effectively creates a managed CloudWatch Rule that monitors your application to scale it up and down as needed. In this example we set a maximum of 40 and minimal of 5 provisioned instances. We have a TargetValue of 0.4 which is a percentage of provisioned concurrency to trigger the CloudWatch Rules via the ProvisionedConcurrencyUtilization metric. In this case, lower equals a more aggressive scaling strategy.

template.yaml
Resources:
RailsLambda:
# ...
Properties:
ProvisionedConcurrencyConfig:
ProvisionedConcurrentExecutions: 5

RailsScalableTarget:
Type: AWS::ApplicationAutoScaling::ScalableTarget
Properties:
MaxCapacity: 40
MinCapacity: 5
ResourceId: !Sub function:${RailsLambda}:live
RoleARN: !Sub arn:aws:iam::${AWS::AccountId}:role/aws-service-role/lambda.application-autoscaling.amazonaws.com/AWSServiceRoleForApplicationAutoScaling_LambdaConcurrency
ScalableDimension: lambda:function:ProvisionedConcurrency
ServiceNamespace: lambda
DependsOn: RailsLambdaAliaslive

RailsScalingPolicy:
Type: AWS::ApplicationAutoScaling::ScalingPolicy
Properties:
PolicyName: utilization
PolicyType: TargetTrackingScaling
ScalingTargetId: !Ref RailsScalableTarget
TargetTrackingScalingPolicyConfiguration:
TargetValue: 0.4
PredefinedMetricSpecification:
PredefinedMetricType: LambdaProvisionedConcurrencyUtilization

Please read this related article. Lambda Provisioned Concurrency AutoScaling is Awesome. Make sure you understand how it works! It goes into great detail on how short traffic bursts (common for most of us) can be missed by the standard CloudWatch Alarms and possible remediation to scale up.

Using a Schedule

In this example we have measured via CloudWatch Metrics (image above) that our concurrent executions never really goes past 40 instances during daytime peak usage. In this case to totally remove cold starts from a small percentage of requests we can draw a big virtual box around the curves above to always keep 40 instances warm during our peak times starting at 6am EST and going back down to 0 Provisioned Concurrency at 11PM EST. Here is how we would do that with a Provisioned Concurrency schedule.

template.yaml
Resources:
RailsScalableTarget:
Type: AWS::ApplicationAutoScaling::ScalableTarget
Properties:
MaxCapacity: 0
MinCapacity: 0
ResourceId: !Sub function:${RailsLambda}:live
RoleARN: !Sub arn:aws:iam::${AWS::AccountId}:role/aws-service-role/lambdaapplication-autoscaling. amazonaws.com/AWSServiceRoleForApplicationAutoScaling_LambdaConcurrency
ScalableDimension: lambda:function:ProvisionedConcurrency
ServiceNamespace: lambda
ScheduledActions:
- ScalableTargetAction:
MaxCapacity: 0
MinCapacity: 0
ScheduledActionName: ScaleDown
Schedule: "cron(0 3 * * ? *)"
- ScalableTargetAction:
MaxCapacity: 40
MinCapacity: 40
ScheduledActionName: ScaleUp
Schedule: "cron(0 10 * * ? *)"
DependsOn: RailsLambdaAliaslive

Concurrency CloudWatch Metrics

The graphs below were made using the following managed AWS Lambda CloudWatch Metrics. Please make sure to use your deploy alias of :live when targeting your functions resource in these reports.

  • ConcurrentExecutions
  • ProvisionedConcurrentExecutions
  • ProvisionedConcurrencySpilloverInvocations

This chart shows that a static ProvisionedConcurrentExecutions of 5 can handle most invocations for the first 3 days. Later, for the remaining 4 days, auto scaling was added with a TargetValue of 0.4. Because of the workload's spiky nature, the Invocations look almost 100% provisioned. However, the concurrent executions show otherwise.

Rails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill InvokesRails in Lambda Concurrent Executions, Invocations, and Provisioned Executions & Spill Invokes

Here is a 7 day view from the 4 day mark above. The TargetValue is still set to 0.4. It illustrates how the default CloudWatch Rule for ProvisionedConcurrencyUtilization metrics over a 3 minute span are not quick enough to scale PC. It is possible to use a TargetValue of 0.1 to force the PC lines to meet the blue. But your cost at this point would be unrealistically high.

Rails in Lambda Concurrent Executions and Provisioned ExecutionsRails in Lambda Concurrent Executions and Provisioned Executions

Gradual Deployments

As mentioned in the Provisioned Concurrency section we use a simple DeploymentPreference value called AllAtOnce. When a deploy happens, Lambda will need to download your new ECR image before your application is initialized. In certain high traffic scenarios along with a potentially slow loading application, deploys can be a thundering herd effect causing your concurrency to spike and a small percentage of users having longer response times.

Please see AWS' "Deploying serverless applications gradually" guide for full details. However, one way to soften this would be to roll out your new code in 10 minutes total via the Linear10PercentEvery1Minute deployment preference. This will automatically create a AWS CodeDeploy application and deployments for you. So cool!

+ \ No newline at end of file diff --git a/docs/cpu.html b/docs/cpu.html index 7e3edbc..efc0c98 100644 --- a/docs/cpu.html +++ b/docs/cpu.html @@ -5,13 +5,13 @@ CPU Architecture | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

CPU Architecture

Our cookiecutter project defaults to building a Linux container image targeting the arm64 architecture vs the traditional x86_64 cpu type. Applications that use arm64 (AWS Graviton2 processor) can achieve significantly better price and performance than the equivalent workloads running an on x86_64 architecture.

Deploying arm64 applications is still a relatively new process and requires a few special considerations from local development to your CI/CD tooling. AWS Lambda makes this easy using the Architectures setting of the AWS::Lambda::Function CloudFormation resource. However, here are a few things you should know.

Docker Images

Most base Docker images are now build for multiple platforms. Consider the following Dockerfile:

FROM ruby:3.2-bullseye

How does Docker know which platform to use? The anwser is to use the default platform of the host. If you are on a M1 or M2 Mac, arm64 would be the platform used. Which platforms are in a specific base image? We can find out using the docker manifest command. For example:

$ docker manifest inspect ruby:3.2 | grep arch
"architecture": "amd64",
"architecture": "arm64",
$ docker manifest inspect | grep arch
"architecture": "amd64",
"architecture": "arm64",

All the images in our starter project are multi-platform. This means any host can be used for development. Your computer, Codespaces, etc will use the proper platform image variants.

Deployment Gotchas

Though there are numerous ways to deploy containers using techniques such as emulation. However, we recommend you following one simple rule. Matching your “Development Host OS/Arch” to that of your target “Deployment Host OS/Arch” provides the least development friction. Use a CI/CD platform that matches your deployment target.

caution

Currently GitHub Actions does not support native arm64 runners. They are working to add this feature.

Our Quick Start guide has your first deploy happening from your local machine. Since we default to arm64 this should work fine if you are on a Mac with Apple Silicon. But what if you are on a Windows or Linux system with an x86_64 architecture? Your function will not work since your application's system dependences (like mysq2) will be compiled for the wrong architecture. Depending on your needs, you may have to switch back to x86_64 as described below.

For more information on deployments, see our How Lamby Works guide.

Switching to x86_64

Based off the current state of our cookiecutter project, here are the changes required to switch to a x86_64 deployment target. First, change your CircleCI workflows default machine from `arm.large`` to a standard large.

.circleci/config.yml
default-machine: &default-machine
machine:
image: ubuntu-2204:current
docker_layer_caching: true
- resource_class: arm.large
+ resource_class: large

Now open up your AWS SAM serverless template.yaml file, find the Globals section and change your Architecture property from arm64 to x86_64.

template.yaml
Globals:
Function:
Architectures:
- - arm64
+ - x86_64
- + \ No newline at end of file diff --git a/docs/custom-domain.html b/docs/custom-domain.html index f43a96c..b4fb1fd 100644 --- a/docs/custom-domain.html +++ b/docs/custom-domain.html @@ -5,13 +5,13 @@ Custom Domain Names | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Custom Domain Names

Function URL

If you are following our latest cookiecutter pattern, then you are using Lambda's free Function URLs (FURL) which allows JavaScript & Assets to work out of the box. Using a custom domain name with a FURL is as easy as adding CloudFront. To see your FURL in the AWS Console, open the Lambda section -> Click Your Function Name -> Open Versions Tab -> Open Configuration Tab -> Click alias: live. Your Function URL will appear in the upper right. Ex: uniquestring.lambda-url.us-east-1.on.aws. Custom Domain Name Steps:

API Gateway

For API Gateway Lamby users, their Custom Domain Name featue is the only way to get JavaScript & Assets working correctly by removing the stage path. You can optionally add a CloudFront distribution above this for edge caching. Custom Domain Name Steps:

Individual Steps

Secure Certificate with ACM

We are going to use AWS Certificate Manager to secure your HTTPS traffic under your custom domain. Again, this assumes your domain is setup in Route53 since you will need to validate the certificate and AWS makes that super easy with DNS.

  • AWS Console -> Certificate Manager
  • Click "Request a certificate" button.
  • Select "Request a public certificate", and "Request a certificate" button.
  • Domain name. Ex: *.example.com
  • Click "Next"
  • Select "DNS validation", and "Review".
  • Click "Confirm and request" button.
  • Click the tiny disclosure triangle beside your domain name.
  • Click the "Create record in Route 53" button then "Create" again in modal.
  • Click "Continue"

Verification will take about 3 minutes. From the Certificate Manager dashboard, you can wait and/or hit the 🔄 button and the Status will change from "Pending validation" to "Issued".

Simple CloudFront Distribution

Basic reference steps for creating a CloudFront distribution. If you are editing an existing CloudFront distribution, some of these settings might be in your default behavior vs the distribution.

  • Origin:
    • Origin Domain: Function URL or API Gateway Custom Domain Name Endpoint Config
    • Protocol: HTTPS only
    • Minimum Origin SSL Protocol: TLSv1.2
    • Origin Path: /production (⚠️ Ignore for FURLs. API Gateway stage name.)
    • Add Custom Header: X-Forwarded-Host myapp.example.com
  • Default Cache Behavior:
    • Compress Objects Automatically: Yes
    • Viewer Protocol Policy: Redirect HTTP to HTTPS
    • Allowed HTTP Methods: GET, HEAD, OPTIONS, PUT, POST, PATCH, DELETE
      • Cached HTTP Methods: ✔️ OPTIONS
    • Cache key and origin requests:
    • 🔘 Legacy cache settings:
      • Headers: Include the following headers
        • Accept
      • Query Strings: All
      • Cookies: All
      • 🔘 Use origin cache headers
  • Settings:
    • Price Class: Use only North America and Europe
    • Alternate domain name (CNAME): myapp.example.com
    • Custom SSL Certificate (select *.example.com from ACM steps)

This process takes a while to fully deploy. Once done you will have a CloudFront domain name looking something like dxxxxxxxxxxxxx.cloudfront.net. You can now Create a Route53 Record alias for myapp.example.com to this CloudFront distribution domain name.

Feel free to create an additional behavior for the /assets path using the CachingOptimized cache policy and None for the Origin request policy. This will ensure the asset pipeline files are edge-cached and compressed.

Create a Route53 Record

From here all we need is a DNS entry in Route53 that points to our origin. Typically this would be to your CloudFront distribution. Like the one you may have created for your Function URLs or your [API Gateway](#api-gateway] custom domain name.

  • AWS Console -> Route 53 -> Hosted zones
  • Click on your domain
  • Click "Create record"
  • Click "Switch to wizard" if not selected already.
  • Select "Simple routing"
  • Click "Next"
  • Click "Define simple record"
  • Record name. Ex: myapp
  • Record type: A - Routes traffic to an IPv4 address and some AWS resources
  • Value/Route traffic to: (either or)
    • Alias to CloudFront distribution
      • Endpoint: dxxxxxxxxxxxxx.cloudfront.net
    • Alias to API Gateway API
      • Choose Region: Ex: us-east-1
      • Choose endpoint: Should autofill, Ex: d-xxxxxxxxxx.execute-api.us-east-1.amazonaws.com
  • Evaluate target health: No
  • Click "Define simple record"
  • Click "Create records"

API Gateway Custom Domain Names

Any Web Proxy Integrations with API Gateway will need to leverage its Custom Domain Name feature. The only exception would be if you are using an Application Load Balancer without REST API. When completed, your final endpoint would look like this d-byp3km86t3.execute-api.us-east-1.amazonaws.com and would then become an CloudFront origin.

  • AWS Console -> API Gateway
  • Click "Custom domain names" in the left panel.
  • Click "Create" button
  • Enter domain name. Ex: myapp.example.com
  • Use default TLS 1.2 (recommended).
  • Endpoint type Regional.
  • ACM certificate. Select wildcard matching domain from above.
  • Click "Create domain name"

After this has been created, the mappings tab should be selected. From here we need to create an API Mapping to point to your specific API Gateway and stage/path. Assuming it is selected:

  • Click the "API mappings" tab.
  • You should see "No API mappings have been configured..." message
  • Click "Configure API mappings" button.
  • Click "Add new mapping" button.
  • Select your API: Ex: myapp (HTTP - 511n0spvi9).
  • Select your Stage: Ex: production.
    • If you see Stage and production ignore Stage. Known REST bug.
  • Leave Path empty.
  • Click the "Save" button.
- + \ No newline at end of file diff --git a/docs/database.html b/docs/database.html index ae3192a..bcd5290 100644 --- a/docs/database.html +++ b/docs/database.html @@ -5,13 +5,13 @@ Database & VPCs | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Database & VPCs

Any database supported by Ruby or Rails can be used with Lambda assuming your VPC/Network allows those connections. This guide will not get into the details on how to setup or use various databases options within AWS itself like RDS, Aurora, or DynamoDB. However, we will address a few high level topics along with some conventions in our cookiecutter project.

Our Cookiecutter

Our Quick Start project does not create a database but it does have a MySQL service attached to the dev container to faciliate quickly iterating toward using one. The two key files' snippets are below. If you decide to use switch to a different database like PostgreSQL, make adjustments to these files and your Gemfile as needed.

config/database.yml
default: &default
adapter: mysql2
username: root
password: <%= ENV["MYSQL_ROOT_PASSWORD"] %>
host: <%= ENV.fetch("MYSQL_HOST") { "localhost" } %>
.devcontainer/docker-compose.yml
services:
app:
environment:
- MYSQL_HOST=mysql
- MYSQL_ROOT_PASSWORD=root

VPC Configuration

Most Rails applications within AWS are deployed to a private subnet(s) within a VPC which allows you to have direct network access to your relational database. For most folks, this is the default VPC which means finding your subnet ids and security groups are fairly easy. Once you have those, add this VpcConfig to your project's template.yaml file within the existing globals section.

template.yaml
Globals:
Function:
VpcConfig:
SubnetIds:
- subnet-09792e6cd06dd59ad
- subnet-0501f3136415021da
SecurityGroupIds:
- sg-07be99aff5fb14557

Adding it here will ensure every function within your stack has a common VPC setting. Using a VpcConfig should automatically add the AWSLambdaVPCAccessExecutionRole managed policy to your Lambda's execution role. If not, you can manually add it to your Policies section.

template.yaml
RailsLambda:
Properties:
Policies:
- AWSLambdaVPCAccessExecutionRole

Database Migrations

Please see the Running Tasks guide on how to use Lamby's task runner for migrations or other on-demand tasks like Rake.

Using DynamoDB

In some cases Rails with DynamoDB is an excellent choice. If this sounds right for you, I highly recommend using the Aws::Record gem which leverages the aws-sdk-dynamodb in a very Rails like ActiveModel way. Please share your stories with us.

- + \ No newline at end of file diff --git a/docs/environment.html b/docs/environment.html index 06579de..0d66d04 100644 --- a/docs/environment.html +++ b/docs/environment.html @@ -5,13 +5,13 @@ ENV Variables & Secrets | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Environment Configuration & Secrets

Most Rails applications require over a dozen environment variables to configure itself along with other popular gems used. Most notable is ActiveRecord's DATABASE_URL. There are numerous ways to configure environment variables ranging from "quick and dirty" by adding secrets to your git repo (⚠️) all the way to a strict "separation of config" from code using countless methods to achieve a proper Twelve-Factor application. We want to cover a few topics that may help you pick and choose what works best for you.

Configuration

You can add simple configurations to your all of your function's environment using SAM's global section. Configurations like these are ones that you feel safe comitting to your git repo.

template.yaml
Globals:
Environment:
Variables:
SOME_SERVICE_URL: https://prod.some-service.com/api

If you deploy to multiple environments, you can even have these be dynamic by leveraging CloudFormation's mappings. Here is an example that builds on our RailsEnv parameter.

template.yaml
Mappings:
SomeService:
staging:
Url: https://staging.some-service.com/api
production:
Url: https://prod.some-service.com/api
# ...
Globals:
Environment:
Variables:
SOME_SERVICE_URL: !FindInMap [SomeService, !Ref RailsEnv, Url]

Secrets with Crypteia

The Crypteia package is Rust Lambda Extension for any Runtime/Container to preload SSM Parameters as secure environment variables. It takes advantages of LD_PRELOAD to seamlessly fetch values from SSM when a process starts and then injects them as natively accesible Ruby ENV variables. Our Quick Start guide's cookiecutter includes Crypteia already for you via a Docker COPY command into the Lambda Extension /opt directory.

Dockerfile
FROM ruby:3.2-bullseye
COPY --from=ghcr.io/rails-lambda/crypteia-extension-debian:1 /opt /opt

Usage is simply done by adding variables to your SAM template and accessing the values fetched from SSM like any other environment variable. Please read the Crypteia's documentation for full details on how to add IAM Permissions to read SSM Parameters.

Globals:
Environment:
Variables:
SECRET: x-crypteia-ssm:/myapp/SECRET
ENV['SECRET'] # 1A2B3C4D5E6F

About SECRET_KEY_BASE

Our cookiecutter project disabled Rails encrypted credentials in favor of a more simple SECRET_KEY_BASE setting. The starter project places a temporary value for this environment variable in the config/initializers/secret_key_base.rb file. Please remove the ENV['SECRET_KEY_BASE'] = '0123...' line and use Crypteia as described above.

Modern IAM Role Usage

If your application uses other AWS resources like EventBridge or S3, you may be using environment variables like AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. Avoid this pattern. Instead, please add explicit IAM policies within your template.yaml file. They will be attached to your Lambda's Execution Role and inherently give your Lambda the needed permissions. AWS is constantly making IAM permissions more approachable. There are two high level interfaces within SAM to connect your application to cloud resources. Newest first:

  • AWS SAM Connectors: Are an AWS SAM abstract resource type, identified as AWS::Serverless::Connector, that can be defined in your AWS SAM templates to grant Read and Write access of data and events from a supported AWS resource to another.
  • AWS SAM Policy Templates: Are pre-defined sets of permissions that you can add to your AWS SAM templates to manage access and permissions between your AWS Lambda functions, AWS Step Functions state machines and the resources they interact with.

If needed, you can use the lower level Policies property of your AWS::Serverless::Function resource to attach any inline policies to your application's IAM Role.

- + \ No newline at end of file diff --git a/docs/observability.html b/docs/observability.html index 9d9260c..7ef191d 100644 --- a/docs/observability.html +++ b/docs/observability.html @@ -5,13 +5,13 @@ Logging & Observability | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Logging & Observability

One of the greatest things of AWS Lambda is that you get all the benfits of CloudWatch logging built into the platform. Logging is just a simple Ruby puts command away. Here are a few amazing things to help you succeed with good logging and observability patterns in AWS with CloudWatch.

STDOUT is a Must!

Lambda is a read-only file system. The Lamby gem will set the RAILS_LOG_TO_STDOUT environment variable on your behalf. It also freedom patches the core Ruby Logger class to force STDOUT. That said, be on the lookout for any rogue disk-based logging you may have to address. Older Rails applications may have to use a pattern like this.

config/environments/production.rb
logger = ActiveSupport::Logger.new(STDOUT)
logger.formatter = ActiveSupport::Logger::SimpleFormatter.new
config.logger = logger
config.log_level = :info

Using Lograge

Our cookiecutter installs and configures the Lograge gem to reduce CloudWatch data costs while easily allowing CloudWatch Insights to parse and query your logs. If your project is not using Lograge, please consider adding it as we do.

Gemfile
gem 'lograge'
config/environments/production.rb
config.lograge.enabled = true
config.lograge.formatter = Lograge::Formatters::Json.new
config.lograge.custom_payload do |controller|
{ requestid: controller.request.request_id }
end

CloudWatch Log Insights

CloudWatch Logs Insights enables you to interactively search and analyze your log data in Amazon CloudWatch Logs. You can perform queries to help you quickly and effectively respond to operational issues. If an issue occurs, you can use CloudWatch Logs Insights to identify potential causes and validate deployed fixes.

🎥 YouTube: Analyze Log Data with CloudWatch Logs Insights

CloudWatch Embedded Metrics

The CloudWatch Embedded Metric Format enables CloudWatch to ingest complex high-cardinality application data in the form of logs and easily generate actionable metrics and alarms from them. By sending your logs in the new Embedded Metric Format, you can now easily create custom metrics without having to instrument or maintain separate code, while gaining powerful analytical capabilities on your log data. You can get started with embedded metrics by using our rails-lambda/aws-embedded-metrics Ruby gem. The following Lamby-friendly libraries use this format:

  • Crypteia: SSM Parameters as secure ENV variables.
  • Lambdakiq: ActiveJob on SQS & Lambda.

New Relic

Some older Application Performance Monitor (APM) gems can be used with Lambda but you must flush their data after each request in a way that does not impact response performance. You can do this with the LambdaPunch.

config.lambda.handled_proc = Proc.new do |_event, context|
LambdaPunch.push { NewRelic::Agent.agent.flush_pipe_data }
LambdaPunch.handled!(context)
end
Async Processing Using Lambda Extensions
- + \ No newline at end of file diff --git a/docs/quick-start.html b/docs/quick-start.html index 06382a4..d8cade1 100644 --- a/docs/quick-start.html +++ b/docs/quick-start.html @@ -5,13 +5,13 @@ Quick Start | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Quick Start

Deploy a new Rails APP to Lambda in 5 minutes!

Lamby can be used with existing Rails v5 application or higher. The quickest way to learn how Rails works on Lambda is to deploy a new application to your AWS account using our cookiecutter project template.

note

Before you get started, make sure that you have Docker installed, an AWS account, and Visual Studio Code open. 🚢 Install Docker ⛅️ AWS Account 📝 Install VS Code

Rails Project Template

We created a rails-lambda/lamby-cookiecutter repository which allows you to initialize a new project from a GitHub template, commonly called a cookiecutter. Run this terminal command to create a new Rails project with Lamby already installed.

docker run \
--rm \
--interactive \
--volume "${PWD}:/var/task" \
ghcr.io/rails-lambda/lamby-cookiecutter \
"gh:rails-lambda/lamby-cookiecutter"

You will be prompted for a project name. Choose something short, no spaces, and with underscores for word breaks. Example: new_service.

project_name [my_awesome_lambda]:

Development Container

caution

Is your local computer an x86_64 system such as an older Intel Mac or Windows? Our starter defaults to an arm64 deployment target. If needed, see our CPU Architecture guide on how to switch to x86_64.

Your new Rails project with Lamby leverages GitHub Codespaces which itself is built atop of the Development Container specification. In short, this means your project's containers are easy to use by any editor, even outside of Codespaces.

VS Code makes this incredibly easy. Within a new window, open the command pallet and type "dev container open" and select that action to Open Folder in Container.... When prompted, select the project folder created in the previous step.

Docusaurus themed imageDocusaurus themed image

When the dev container's build is complete, VS Code will display the project folder within the container. This container uses the same base Docker image as the one we are going to deploy to AWS. Unlike the production image, this continer comes with all sorts of build utilties, including the AWS & SAM CLI which we are going to use in the next step to deploy your Rails application to AWS Lambda.

Deploy to Lambda

Open the integrated terminal by typing View: Toggle Terminal in the command pallet. This VS Code terminal is within your development container, an official Ruby Ubuntu image.

Docusaurus themed imageDocusaurus themed image

First, configure the AWS CLI with your AWS access key and secret.

aws configure

Now we can run the deploy script which uses the AWS SAM CLI.

./bin/deploy
caution

Deploy scripts are best run via automated CI/CD system such as GitHub Actions. Please see our full How Lamby Works deployment section

Yay! Your're on Rails!

At the end of the deploy process above, you will see SAM print the outputs for the CloudFormation template being deployed. This includes your Lambda Function URL, a free web server proxy to your Lambda container running Rails.

CloudFormation outputs from deployed stack
-------------------------------------------------------------------------------------------
Outputs
-------------------------------------------------------------------------------------------
Key RailsLambdaUrl
Description Lambda Function URL
Value https://b4hsncwngvxg6rv67b64r545ly0jrwnk.lambda-url.us-east-1.on.aws/
-------------------------------------------------------------------------------------------

Successfully created/updated stack - new-service-production in us-east-1

Open your browser and go to the URL. You should see the familiar welcome to Rails screen.

Yay! You're on Rails with AWS Lambda containersYay! You're on Rails with AWS Lambda containers

What Just Happened?

You just deployed a new Rails application to AWS Lambda containers using a basic Ruby Ubuntu Docker base image. Every part of your application is wrapped up neatly in a single CloudFormation stack. This stack has everything you need for a server-side API and/or a client JavaScript application hosted on AWS Lambda. Please take some time to explore how Lamby works in the next sections.

- + \ No newline at end of file diff --git a/docs/running-tasks.html b/docs/running-tasks.html index 368adad..91be828 100644 --- a/docs/running-tasks.html +++ b/docs/running-tasks.html @@ -5,13 +5,13 @@ Running Tasks & Console | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Running Tasks & Console

It can be common for Rails engineers to fire up the Rails console for some quick debugging or to run code like a Rake task. That said, console'ing into a Lambda function (typically done via SSH) is not possible and requires an event-driven & stateless solution. For this, we have the Lambda Console tool.

Lamby: Simple Rails & AWS Lambda Integration using RackLamby: Simple Rails & AWS Lambda Integration using Rack

💁‍♂️ https://github.com/rails-lambda/lambda-console

Lamby leverages the Lambda Console using our Ruby implementation of the spec via the lambda-console-ruby gem. Here is a quick overview on how to use it for common Rails tasks. Please see the Lambda Console project for complete documentation on the CLI installation and usage.

caution

To use the Lambda Console, please make sure you are using Lamby v5.0.0 or higher.

Common Considerations

Here are some common considerations when using the Lambda Console to run tasks or interactive commands.

Function Timeout

Each run or interact event sent will need to respond within your function's timeout. Since HTTP interactions via most AWS services are limited to 30s, so too is your function's default timeout set to that. If your task takes longer than this, consider temporarily increasing the value in your Cloud Formation template or duplicating your function (copy paste) to a new Lambda Function resource dedicated for running console tasks. A Lambda function can have a maximum of 15m execution time. Just remember that API Gateway integration will always be limited to 30s under the function's timeout. So these timeouts can operate independently.

IAM Security & Permissions

The Lambda Console leverages AWS SDKs to send invoke events to your function(s). This means you are in full control of the security of your function and whom can invoke it with the following IAM actions for your user or role:

  • lambda:ListFunctions
  • lambda:InvokeFunction

Customizing Runner Patterns

By default, Lamby v5 and higher allows any command to be run. If you want to enforce which commands can be run at the application layer, please use the Lamby config in your production.rb environment file.

config.lamby.runner_patterns.clear
config.lamby.runner_patterns.push %r{\A/bin/foo.*}

Here are are clearning/removing the deafault expression pattern of /.*/ in favor of one that allows any /bin/foo command to be run.

- + \ No newline at end of file diff --git a/docs/webservers.html b/docs/webservers.html index 102f73b..0b447d1 100644 --- a/docs/webservers.html +++ b/docs/webservers.html @@ -5,13 +5,13 @@ Web Proxy Integrations | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Web Proxy Integrations

We recommend using Lambda Function URLs which are free, work with JavaScript & CSS assets out of the box, and are easy to map to a custom domain name. However, here are some SAM YAML snippets if you would like to use an alternate web server integration for your application. Remember, Lamby automatically detects which integration you are using.

note

The code snippets below are shown in diff format when compared to the latest files in the Quick Start cookiecutter project template.

API Gateway HTTP API

The HTTP API is the most modern integration after Function URLs. It is also really easy to add into our template file.

--- template.yaml
+++ template.yaml
@@ -28,14 +28,22 @@ Resources:
DockerTag: web
Properties:
AutoPublishAlias: live
- FunctionUrlConfig:
- AuthType: NONE
DeploymentPreference:
Type: AllAtOnce
+ Events:
+ HttpApiProxy:
+ Type: HttpApi
+ Properties:
+ ApiId: !Ref RailsHttpApi
MemorySize: 1792
PackageType: Image
Timeout: 30

+ RailsHttpApi:
+ Type: AWS::Serverless::HttpApi
+ Properties:
+ StageName: !Ref RailsEnv

API Gateway REST API

The REST API is a little more verbose, but it essentially sets up a simple proxy that Lamby can use. The integration uri lines use the :live alias since our starter defaults to using an AutoPublishAlias: live

Application Load Balancer

Using Lambda's ALB Integration is a great way to setup your application on a private VPC. However, they limit response payloads to less than 1MB vs. the 6MB limit for API Gateway. Using Rack::Deflater can help with this if needed. These resources make use of the VPC subnets and security groups mentioned in the Database & VPCs guide.

--- template.yaml
+++ template.yaml
@@ -28,14 +28,52 @@ Resources:
DockerTag: web
Properties:
AutoPublishAlias: live
- FunctionUrlConfig:
- AuthType: NONE
DeploymentPreference:
Type: AllAtOnce
MemorySize: 1792
PackageType: Image
Timeout: 30

+ RailsLoadBalancer:
+ Type: AWS::ElasticLoadBalancingV2::LoadBalancer
+ Properties:
+ Scheme: internal
+ SubnetIds:
+ - subnet-09792e6cd06dd59ad
+ - subnet-0501f3136415021da
+ SecurityGroupIds:
+ - sg-07be99aff5fb14557
+
+ RailsLoadBalancerHttpsListener:
+ Type: AWS::ElasticLoadBalancingV2::Listener
+ Properties:
+ Certificates:
+ - CertificateArn: arn:aws:acm:us-east-1:123456789012:certificate/38613b58-c21e-11eb-8529-0242ac130003
+ DefaultActions:
+ - TargetGroupArn: !Ref RailsLoadBalancerTargetGroup
+ Type: forward
+ LoadBalancerArn: !Ref RailsLoadBalancer
+ Port: 443
+ Protocol: HTTPS
+
+ RailsLoadBalancerTargetGroup:
+ Type: AWS::ElasticLoadBalancingV2::TargetGroup
+ DependsOn: RailsLambdaInvokePermission
+ Properties:
+ TargetType: lambda
+ TargetGroupAttributes:
+ - Key: lambda.multi_value_headers.enabled
+ Value: true
+ Targets:
+ - Id: !GetAtt RailsLambda.Arn
+
+ RailsLambdaInvokePermission:
+ Type: AWS::Lambda::Permission
+ Properties:
+ FunctionName: !GetAtt RailsLambda.Arn
+ Action: "lambda:InvokeFunction"
+ Principal: elasticloadbalancing.amazonaws.com
- + \ No newline at end of file diff --git a/index.html b/index.html index 698e5f6..8fcfa95 100644 --- a/index.html +++ b/index.html @@ -5,13 +5,13 @@ Lamby - Simple Rails & AWS Lambda Integration using Rack | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Simple Rails & AWS Lambda Integration using Rack

Event-driven and deeply integrated within AWS, Lambda allows your Rails architecture to be completely reimagined atop fully managed infrastructure resources like Aurora, SQS, S3, CloudWatch, IAM, and much more. Using Lamby can help your engineering teams learn to "program the cloud".

Event-Based with Rack

No webserver needed! Lamby is as a Rack adapter that converts any AWS Lambda integration into Rack objects that are sent directly to your app. Lamby supports Function URLs, API Gateway (HTTP or REST), and Application Load Balancer (ALB) integrations. Background jobs and other events are supported.

Container-First Principles

Any containerized Rails application can run on AWS Lambda. Compute can rapidly scale to meet any demand and back down to zero for cost savings. Images have access to 10 GB size limits, 10 GB of memory, & as many as 6 vCPUs. Lamby even promotes the use of the same containers for development.

Easy IaC to CI/CD

Infrastructure as Code (IaC) will now be front and center in your Rails project folder by using the AWS Serverless Application Model (SAM). Easily create serverless Resources like S3 Buckets, EventBridge Rules, IAM Roles and more. Leverage SAM's CLI to create/update AWS Resources and your container images.

- + \ No newline at end of file diff --git a/markdown-page.html b/markdown-page.html index 9590917..17a03ea 100644 --- a/markdown-page.html +++ b/markdown-page.html @@ -5,13 +5,13 @@ Markdown page example | Lamby - Simple Rails & AWS Lambda Integration using Rack - +

Markdown page example

You don't need React to write simple standalone pages.

- + \ No newline at end of file