From 278022d940504766451b94e4c50543cb89d3b164 Mon Sep 17 00:00:00 2001 From: Alon Burg Date: Wed, 3 Jan 2024 15:31:25 +0200 Subject: [PATCH] Deploy website - based on df8e342392cb59dd5476e84a18910b26ca1249b5 --- 404.html | 4 ++-- assets/js/5cce4860.1646f486.js | 1 - assets/js/5cce4860.745a04b4.js | 1 + assets/js/{935f2afb.df8f5db5.js => 935f2afb.71ee9e05.js} | 2 +- assets/js/9fda4f55.54385613.js | 1 - assets/js/9fda4f55.9fd222fc.js | 1 + ...{runtime~main.7992fbcc.js => runtime~main.1691e83c.js} | 2 +- blog/archive/index.html | 4 ++-- blog/first-blog-post/index.html | 4 ++-- blog/index.html | 4 ++-- blog/long-blog-post/index.html | 4 ++-- blog/mdx-blog-post/index.html | 4 ++-- blog/tags/docusaurus/index.html | 4 ++-- blog/tags/facebook/index.html | 4 ++-- blog/tags/hello/index.html | 4 ++-- blog/tags/hola/index.html | 4 ++-- blog/tags/index.html | 4 ++-- blog/welcome/index.html | 4 ++-- docs/api/overview/index.html | 4 ++-- docs/api/prompt/create/index.html | 4 ++-- docs/api/prompt/list/index.html | 4 ++-- docs/api/prompt/prompt/index.html | 4 ++-- docs/api/prompt/retrieve/index.html | 4 ++-- docs/api/sdxl-api/index.html | 4 ++-- docs/api/tune/create/index.html | 4 ++-- docs/api/tune/index.html | 4 ++-- docs/api/tune/list/index.html | 4 ++-- docs/api/tune/retrieve/index.html | 4 ++-- docs/category/api/index.html | 4 ++-- docs/category/features/index.html | 4 ++-- docs/category/prompts/index.html | 4 ++-- docs/category/tunes/index.html | 4 ++-- docs/category/use-cases/index.html | 4 ++-- docs/changes/index.html | 8 ++++---- docs/features/face-inpainting/index.html | 4 ++-- docs/features/face-swap/index.html | 4 ++-- docs/features/loras/index.html | 4 ++-- docs/features/multi-pass-inference/index.html | 6 +++--- docs/features/multiperson/index.html | 4 ++-- docs/features/prompt-expansion/index.html | 4 ++-- docs/features/prompt-masking/index.html | 4 ++-- docs/features/tiled-upscale/index.html | 4 ++-- docs/use-cases/ai-photoshoot/index.html | 4 ++-- docs/use-cases/controlnet/index.html | 4 ++-- docs/use-cases/faq/index.html | 4 ++-- docs/use-cases/finetuning-guide/index.html | 4 ++-- docs/use-cases/inpainting-and-masking/index.html | 4 ++-- docs/use-cases/masked-portraits/index.html | 4 ++-- docs/use-cases/product-shots/index.html | 4 ++-- docs/use-cases/sdxl-training/index.html | 4 ++-- docs/use-cases/toonify/index.html | 4 ++-- docs/use-cases/video-generation/index.html | 4 ++-- index.html | 4 ++-- markdown-page/index.html | 4 ++-- 54 files changed, 103 insertions(+), 103 deletions(-) delete mode 100644 assets/js/5cce4860.1646f486.js create mode 100644 assets/js/5cce4860.745a04b4.js rename assets/js/{935f2afb.df8f5db5.js => 935f2afb.71ee9e05.js} (67%) delete mode 100644 assets/js/9fda4f55.54385613.js create mode 100644 assets/js/9fda4f55.9fd222fc.js rename assets/js/{runtime~main.7992fbcc.js => runtime~main.1691e83c.js} (57%) diff --git a/404.html b/404.html index f20ffc4c..09fd6deb 100644 --- a/404.html +++ b/404.html @@ -5,13 +5,13 @@ Page Not Found | Astria documentation - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/5cce4860.1646f486.js b/assets/js/5cce4860.1646f486.js deleted file mode 100644 index e02d8f77..00000000 --- a/assets/js/5cce4860.1646f486.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkastria_docs_2=self.webpackChunkastria_docs_2||[]).push([[2585],{3905:(e,t,n)=>{n.d(t,{Zo:()=>c,kt:()=>g});var a=n(7294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function i(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function s(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var o=a.createContext({}),p=function(e){var t=a.useContext(o),n=t;return e&&(n="function"==typeof e?e(t):s(s({},t),e)),n},c=function(e){var t=p(e.components);return a.createElement(o.Provider,{value:t},e.children)},u="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},m=a.forwardRef((function(e,t){var n=e.components,r=e.mdxType,i=e.originalType,o=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),u=p(n),m=r,g=u["".concat(o,".").concat(m)]||u[m]||d[m]||i;return n?a.createElement(g,s(s({ref:t},c),{},{components:n})):a.createElement(g,s({ref:t},c))}));function g(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var i=n.length,s=new Array(i);s[0]=m;var l={};for(var o in t)hasOwnProperty.call(t,o)&&(l[o]=t[o]);l.originalType=e,l[u]="string"==typeof e?e:r,s[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>o,contentTitle:()=>s,default:()=>d,frontMatter:()=>i,metadata:()=>l,toc:()=>p});var a=n(7462),r=(n(7294),n(3905));const i={},s="Multi-Pass inference",l={unversionedId:"features/multi-pass-inference",id:"features/multi-pass-inference",title:"Multi-Pass inference",description:"BETA",source:"@site/docs/features/multi-pass-inference.md",sourceDirName:"features",slug:"/features/multi-pass-inference",permalink:"/docs/features/multi-pass-inference",draft:!1,editUrl:"https://github.com/facebook/docusaurus/tree/main/packages/create-docusaurus/templates/shared/docs/features/multi-pass-inference.md",tags:[],version:"current",frontMatter:{},sidebar:"tutorialSidebar",previous:{title:"LoRAs",permalink:"/docs/features/loras"},next:{title:"Multi-Person",permalink:"/docs/features/multiperson"}},o={},p=[{value:"Example prompt",id:"example-prompt",level:2}],c={toc:p},u="wrapper";function d(e){let{components:t,...i}=e;return(0,r.kt)(u,(0,a.Z)({},c,i,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"multi-pass-inference"},"Multi-Pass inference"),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"BETA")),(0,r.kt)("div",{style:{display:"grid","grid-template-columns":"1fr 1fr 1fr 1fr",gap:"1.5rem"}},(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Gym"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"source.png",src:n(7306).Z,width:"3072",height:"4608"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Hiking"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(5605).Z,width:"3072",height:"4608"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Posh"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(6149).Z,width:"1536",height:"2304"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Swimsuit"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(2626).Z,width:"1536",height:"2304"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Fashion"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"source.png",src:n(5101).Z,width:"3072",height:"4608"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Starwars"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(2092).Z,width:"2048",height:"3070"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Mech-warrior"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(4406).Z,width:"3072",height:"4608"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Dreamy"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(2056).Z,width:"3072",height:"4608"})))),(0,r.kt)("p",null,"The prompt is divided by the ",(0,r.kt)("inlineCode",{parentName:"p"},"BREAK")," keyword such as:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"1st ",(0,r.kt)("strong",{parentName:"li"},"base prompt")," used to generate the background and scene. "),(0,r.kt)("li",{parentName:"ul"},"2nd ",(0,r.kt)("strong",{parentName:"li"},"common prompt")," that's concatenated to each person prompt and the base prompt to avoid repetition."),(0,r.kt)("li",{parentName:"ul"},"3rd ",(0,r.kt)("strong",{parentName:"li"},"person prompt")," Describe what the person is wearing or holding")),(0,r.kt)("p",null,"The advantages of using multi-pass inference are:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Base prompt first pass allows to use the full breadth of an unmodified model. As such image compositions, fabrics and textures and details will show much better."),(0,r.kt)("li",{parentName:"ul"},"Separating the person from the background allows to prompt in more details on clothing, accessories and colors."),(0,r.kt)("li",{parentName:"ul"},"Better similarity to the original subject.")),(0,r.kt)("p",null,"See ",(0,r.kt)("a",{parentName:"p",href:"https://www.astria.ai/pricing"},"pricing")," for the cost of this feature."),(0,r.kt)("h2",{id:"example-prompt"},"Example prompt"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-text"},"realistic digital painting, astronaut in a garden on a spring day, by martine johanna and simon stalenhag and chie yoshii and casey weldon and wlop, ornate, dynamic, particulate, rich colors, intricate, elegant, highly detailed, harpers bazaar art, fashion magazine, smooth, sharp focus, 8 k, octane rende --mask_prompt foreground --mask_negative clothes --mask_invert --mask_dilate -20 --hires_denoising_strength 0.2\nnum_images=1\nnegative_prompt=clay, text, watermark, padding, cropped, typography\nseed=\nsteps=30\ncfg_scale=\ncontrolnet=pose\ninput_image_url=https://sdbooth2-production.s3.amazonaws.com/d6ff3soq5pok5tlbcanf599vkw06\nmask_image_url=\ndenoising_strength=\ncontrolnet_conditioning_scale=\ncontrolnet_txt2img=false\nsuper_resolution=true\ninpaint_faces=false\nface_correct=true\nfilm_grain=false\nface_swap=false\nhires_fix=true\nprompt_expansion=false\nar=1:1\nscheduler=dpm++sde_karras\ncolor_grading=\nuse_lpw=true\nw=\nh=\n")))}d.isMDXComponent=!0},7306:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-1-3fad6ed550153c10fa613cda7ce94924.jpeg"},5605:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-2-e292d27c95dd1cbda105cc0b51f780af.jpeg"},6149:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-3-83a8fa2dd107d0d25f4c8574df30e1eb.jpeg"},2626:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-4-bdd36b76b18a0a09e94b2745cfd4b182.jpeg"},5101:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-xl-1-199f63a9bca7524238c55680f6fbc708.jpeg"},2092:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-xl-2-22fd2a63e7df9c89239ae4056945a21c.jpeg"},4406:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-xl-3-afae0bb7b3da2b9a5d6ccd02e75e6095.jpeg"},2056:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-xl-4-7c04a47cf6322e0c81353d960c4a6a03.jpeg"}}]); \ No newline at end of file diff --git a/assets/js/5cce4860.745a04b4.js b/assets/js/5cce4860.745a04b4.js new file mode 100644 index 00000000..af31a16c --- /dev/null +++ b/assets/js/5cce4860.745a04b4.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkastria_docs_2=self.webpackChunkastria_docs_2||[]).push([[2585],{3905:(e,t,n)=>{n.d(t,{Zo:()=>c,kt:()=>g});var a=n(7294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function s(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function i(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var o=a.createContext({}),p=function(e){var t=a.useContext(o),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},c=function(e){var t=p(e.components);return a.createElement(o.Provider,{value:t},e.children)},u="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},m=a.forwardRef((function(e,t){var n=e.components,r=e.mdxType,s=e.originalType,o=e.parentName,c=l(e,["components","mdxType","originalType","parentName"]),u=p(n),m=r,g=u["".concat(o,".").concat(m)]||u[m]||d[m]||s;return n?a.createElement(g,i(i({ref:t},c),{},{components:n})):a.createElement(g,i({ref:t},c))}));function g(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var s=n.length,i=new Array(s);i[0]=m;var l={};for(var o in t)hasOwnProperty.call(t,o)&&(l[o]=t[o]);l.originalType=e,l[u]="string"==typeof e?e:r,i[1]=l;for(var p=2;p{n.r(t),n.d(t,{assets:()=>o,contentTitle:()=>i,default:()=>d,frontMatter:()=>s,metadata:()=>l,toc:()=>p});var a=n(7462),r=(n(7294),n(3905));const s={},i="Multi-Pass inference",l={unversionedId:"features/multi-pass-inference",id:"features/multi-pass-inference",title:"Multi-Pass inference",description:"BETA",source:"@site/docs/features/multi-pass-inference.md",sourceDirName:"features",slug:"/features/multi-pass-inference",permalink:"/docs/features/multi-pass-inference",draft:!1,editUrl:"https://github.com/facebook/docusaurus/tree/main/packages/create-docusaurus/templates/shared/docs/features/multi-pass-inference.md",tags:[],version:"current",frontMatter:{},sidebar:"tutorialSidebar",previous:{title:"LoRAs",permalink:"/docs/features/loras"},next:{title:"Multi-Person",permalink:"/docs/features/multiperson"}},o={},p=[{value:"Syntax",id:"syntax",level:2},{value:"Advantages",id:"advantages",level:2},{value:"Example prompt",id:"example-prompt",level:2}],c={toc:p},u="wrapper";function d(e){let{components:t,...s}=e;return(0,r.kt)(u,(0,a.Z)({},c,s,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"multi-pass-inference"},"Multi-Pass inference"),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"BETA")),(0,r.kt)("div",{style:{display:"grid","grid-template-columns":"1fr 1fr 1fr 1fr",gap:"1.5rem"}},(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Gym"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"source.png",src:n(7306).Z,width:"3072",height:"4608"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Hiking"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(5605).Z,width:"3072",height:"4608"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Posh"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(6149).Z,width:"1536",height:"2304"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Swimsuit"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(2626).Z,width:"1536",height:"2304"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Fashion"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"source.png",src:n(5101).Z,width:"3072",height:"4608"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Starwars"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(2092).Z,width:"2048",height:"3070"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Mech-warrior"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(4406).Z,width:"3072",height:"4608"}))),(0,r.kt)("div",null,(0,r.kt)("figcaption",null,"Dreamy"),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"generated.png",src:n(2056).Z,width:"3072",height:"4608"})))),(0,r.kt)("h2",{id:"syntax"},"Syntax"),(0,r.kt)("p",null,"The prompt is divided by the ",(0,r.kt)("inlineCode",{parentName:"p"},"BREAK")," keyword such as:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"1st ",(0,r.kt)("strong",{parentName:"li"},"base prompt")," used to generate the background and scene. "),(0,r.kt)("li",{parentName:"ul"},"2nd ",(0,r.kt)("strong",{parentName:"li"},"common prompt")," that's concatenated to each person prompt and the base prompt to avoid repetition."),(0,r.kt)("li",{parentName:"ul"},"3rd ",(0,r.kt)("strong",{parentName:"li"},"person prompt")," Describe what the person is wearing or holding.")),(0,r.kt)("p",null,"To load the person checkpoint or LoRA use the ",(0,r.kt)("inlineCode",{parentName:"p"},"")," syntax."),(0,r.kt)("admonition",{type:"tip"},(0,r.kt)("p",{parentName:"admonition"},"Note that multi-pass works specifically well with ",(0,r.kt)("inlineCode",{parentName:"p"},"SD15")," models, while ",(0,r.kt)("inlineCode",{parentName:"p"},"SDXL")," models are not recommended.")),(0,r.kt)("h2",{id:"advantages"},"Advantages"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Base prompt first pass allows to use the full breadth of an unmodified model. As such image compositions, fabrics and textures and details will show much better."),(0,r.kt)("li",{parentName:"ul"},"Separating the person from the background allows to prompt in more details on clothing, accessories and colors."),(0,r.kt)("li",{parentName:"ul"},"Better similarity to the original subject.")),(0,r.kt)("p",null,"Check the ",(0,r.kt)("a",{parentName:"p",href:"https://www.astria.ai/gallery?branch=sd15&is_multiperson=true"},"gallery")," for more examples."),(0,r.kt)("p",null,"See ",(0,r.kt)("a",{parentName:"p",href:"https://www.astria.ai/pricing"},"pricing")," for the cost of this feature."),(0,r.kt)("h2",{id:"example-prompt"},"Example prompt"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-text"},"a model in preppy style, old money aesthetic, posh style, elite school stlye, luxurious style, gossip girl neo-prep style, ralph lauren style, country club style, ivy league style\n--tiled_upscale\nBREAK\nBREAK sks woman old money aesthetic, posh style, elite school, luxurious, gossip girl neo-prep style, ralph lauren style, ivy league style \nnum_images=4\nnegative_prompt=asian, , old, , multiple heads, 2 heads, elongated body, double image, 2 faces, multiple people, double head, , (nsfw), nsfw, nsfw, nsfw, nude, nude, nude, porn, porn, porn, naked, naked, nude, porn, black and white, monochrome, skin marks, skin lesions, pimples, , , , black and white, monochrome, greyscale, black and white, monochrome, greyscale, (black and white, monochrome, greyscale), frilly, frilled, lacy, ruffled, armpit hair, victorian, (sunglasses), (sunglasses), (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, ex BREAK BREAK sunglasses, snow, skiing, ski resorts, snowboard\nseed=33504\nsteps=30\ncfg_scale=\ncontrolnet=\ninput_image_url=\nmask_image_url=\ndenoising_strength=\ncontrolnet_conditioning_scale=\ncontrolnet_txt2img=false\nsuper_resolution=true\ninpaint_faces=true\nface_correct=false\nfilm_grain=true\nface_swap=true\nhires_fix=false\nprompt_expansion=false\nar=1:1\nscheduler=dpm++sde_karras\ncolor_grading=\nuse_lpw=true\nw=512\nh=768\n")))}d.isMDXComponent=!0},7306:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-1-3fad6ed550153c10fa613cda7ce94924.jpeg"},5605:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-2-e292d27c95dd1cbda105cc0b51f780af.jpeg"},6149:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-3-83a8fa2dd107d0d25f4c8574df30e1eb.jpeg"},2626:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-4-bdd36b76b18a0a09e94b2745cfd4b182.jpeg"},5101:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-xl-1-199f63a9bca7524238c55680f6fbc708.jpeg"},2092:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-xl-2-22fd2a63e7df9c89239ae4056945a21c.jpeg"},4406:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-xl-3-afae0bb7b3da2b9a5d6ccd02e75e6095.jpeg"},2056:(e,t,n)=>{n.d(t,{Z:()=>a});const a=n.p+"assets/images/multipass-xl-4-7c04a47cf6322e0c81353d960c4a6a03.jpeg"}}]); \ No newline at end of file diff --git a/assets/js/935f2afb.df8f5db5.js b/assets/js/935f2afb.71ee9e05.js similarity index 67% rename from assets/js/935f2afb.df8f5db5.js rename to assets/js/935f2afb.71ee9e05.js index 28d1bb30..15b82b28 100644 --- a/assets/js/935f2afb.df8f5db5.js +++ b/assets/js/935f2afb.71ee9e05.js @@ -1 +1 @@ -"use strict";(self.webpackChunkastria_docs_2=self.webpackChunkastria_docs_2||[]).push([[2197],{1109:e=>{e.exports=JSON.parse('{"pluginId":"default","version":"current","label":"Next","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","isLast":true,"docsSidebars":{"tutorialSidebar":[{"type":"link","label":"Changes","href":"/docs/changes","docId":"changes"},{"type":"category","label":"Use cases","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"AI Photoshoot","href":"/docs/use-cases/ai-photoshoot","docId":"use-cases/ai-photoshoot"},{"type":"link","label":"Fine-tuning guide","href":"/docs/use-cases/finetuning-guide","docId":"use-cases/finetuning-guide"},{"type":"link","label":"SDXL training","href":"/docs/use-cases/sdxl-training","docId":"use-cases/sdxl-training"},{"type":"link","label":"Controlnet","href":"/docs/use-cases/controlnet","docId":"use-cases/controlnet"},{"type":"link","label":"Inpainting and masking","href":"/docs/use-cases/inpainting-and-masking","docId":"use-cases/inpainting-and-masking"},{"type":"link","label":"Product shots","href":"/docs/use-cases/product-shots","docId":"use-cases/product-shots"},{"type":"link","label":"Toonify","href":"/docs/use-cases/toonify","docId":"use-cases/toonify"},{"type":"link","label":"Video Generation","href":"/docs/use-cases/video-generation","docId":"use-cases/video-generation"},{"type":"link","label":"FAQ","href":"/docs/use-cases/faq","docId":"use-cases/faq"},{"type":"link","label":"Masked portraits","href":"/docs/use-cases/masked-portraits","docId":"use-cases/masked-portraits"}],"href":"/docs/category/use-cases"},{"type":"category","label":"Features","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Face inpainting","href":"/docs/features/face-inpainting","docId":"features/face-inpainting"},{"type":"link","label":"Face Swap","href":"/docs/features/face-swap","docId":"features/face-swap"},{"type":"link","label":"LoRAs","href":"/docs/features/loras","docId":"features/loras"},{"type":"link","label":"Multi-Pass inference","href":"/docs/features/multi-pass-inference","docId":"features/multi-pass-inference"},{"type":"link","label":"Multi-Person","href":"/docs/features/multiperson","docId":"features/multiperson"},{"type":"link","label":"Prompt expansion","href":"/docs/features/prompt-expansion","docId":"features/prompt-expansion"},{"type":"link","label":"Prompt Masking","href":"/docs/features/prompt-masking","docId":"features/prompt-masking"},{"type":"link","label":"Tiled upscale","href":"/docs/features/tiled-upscale","docId":"features/tiled-upscale"}],"href":"/docs/category/features"},{"type":"category","label":"API","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Overview","href":"/docs/api/overview","docId":"api/overview"},{"type":"category","label":"Tunes","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"The tune object","href":"/docs/api/tune/","docId":"api/tune/tune"},{"type":"link","label":"Create a tune","href":"/docs/api/tune/create","docId":"api/tune/create"},{"type":"link","label":"Retrieve a tune","href":"/docs/api/tune/retrieve","docId":"api/tune/retrieve"},{"type":"link","label":"List all tunes","href":"/docs/api/tune/list","docId":"api/tune/list"}],"href":"/docs/category/tunes"},{"type":"category","label":"Prompts","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"The prompt object","href":"/docs/api/prompt/prompt","docId":"api/prompt/prompt"},{"type":"link","label":"Create a prompt","href":"/docs/api/prompt/create","docId":"api/prompt/create"},{"type":"link","label":"Retrieve a prompt","href":"/docs/api/prompt/retrieve","docId":"api/prompt/retrieve"},{"type":"link","label":"List all prompts","href":"/docs/api/prompt/list","docId":"api/prompt/list"}],"href":"/docs/category/prompts"},{"type":"link","label":"SDXL API usage","href":"/docs/api/sdxl-api","docId":"api/sdxl-api"}],"href":"/docs/category/api"},{"type":"category","label":"API links","items":[{"type":"link","label":"Python SDK","href":"https://github.com/aronbrand/astriaclient"},{"type":"link","label":"Postman","href":"https://documenter.getpostman.com/view/273068/2s8YmSrfgD"},{"type":"link","label":"Bubble.io Plugin","href":"https://bubble.io/plugin/astria-api---stable-diffusion-1674501522435x732640117689417700"},{"type":"link","label":"API key","href":"https://www.astria.ai/users/edit#api"}],"collapsed":false,"collapsible":true}]},"docs":{"api/overview":{"id":"api/overview","title":"Overview","description":"Billing","sidebar":"tutorialSidebar"},"api/prompt/create":{"id":"api/prompt/create","title":"Create a prompt","description":"Creates a new fine-tune model from training images which in turn will be used to create prompts and generate images.","sidebar":"tutorialSidebar"},"api/prompt/list":{"id":"api/prompt/list","title":"List all prompts","description":"Parameters","sidebar":"tutorialSidebar"},"api/prompt/prompt":{"id":"api/prompt/prompt","title":"The prompt object","description":"The prompt object is usually used as a nested resource of tune as prompts are generated using a fine-tune model. A prompt is a text that is used to generate images using a fine-tune model. The prompt object contains the generated images.","sidebar":"tutorialSidebar"},"api/prompt/retrieve":{"id":"api/prompt/retrieve","title":"Retrieve a prompt","description":"Parameters","sidebar":"tutorialSidebar"},"api/sdxl-api":{"id":"api/sdxl-api","title":"SDXL API usage","description":"For general tips on SDXL training and inference, see SDXL training","sidebar":"tutorialSidebar"},"api/tune/create":{"id":"api/tune/create","title":"Create a tune","description":"Creates a new fine-tune model from training images which in turn will be used to create prompts and generate images.","sidebar":"tutorialSidebar"},"api/tune/list":{"id":"api/tune/list","title":"List all tunes","description":"Parameters","sidebar":"tutorialSidebar"},"api/tune/retrieve":{"id":"api/tune/retrieve","title":"Retrieve a tune","description":"Parameters","sidebar":"tutorialSidebar"},"api/tune/tune":{"id":"api/tune/tune","title":"The tune object","description":"Tune (or Fine-tune) represents a model that is created using training images to learn a new concept or subject. At its core a fine-tune is a neural-network weights file (usually weights 2GB) and contains the information of the trained images.","sidebar":"tutorialSidebar"},"changes":{"id":"changes","title":"Changes","description":"* 2023-12-14 Prompt masking feature added - to support product shots and Masked Portraits","sidebar":"tutorialSidebar"},"features/face-inpainting":{"id":"features/face-inpainting","title":"Face inpainting","description":"Entire picture","sidebar":"tutorialSidebar"},"features/face-swap":{"id":"features/face-swap","title":"Face Swap","description":"Without face-swap","sidebar":"tutorialSidebar"},"features/loras":{"id":"features/loras","title":"LoRAs","description":"This feature is mainly relevant for SD15 models. SDXL subject fine-tuning use LoRA, and tend to collide with other style LoRAs","sidebar":"tutorialSidebar"},"features/multi-pass-inference":{"id":"features/multi-pass-inference","title":"Multi-Pass inference","description":"BETA","sidebar":"tutorialSidebar"},"features/multiperson":{"id":"features/multiperson","title":"Multi-Person","description":"BETA","sidebar":"tutorialSidebar"},"features/prompt-expansion":{"id":"features/prompt-expansion","title":"Prompt expansion","description":"Prompt expansion toggle under the Advanced prompt collapsible, will augment your prompt text using a GPT language-model and usually results in richer images.","sidebar":"tutorialSidebar"},"features/prompt-masking":{"id":"features/prompt-masking","title":"Prompt Masking","description":"Prompt masking uses a short text to create a mask from the input image. The mask can then be used to inpaint parts of the image. Use the below parameters as part of the prompts to use auto-masking","sidebar":"tutorialSidebar"},"features/tiled-upscale":{"id":"features/tiled-upscale","title":"Tiled upscale","description":"BETA","sidebar":"tutorialSidebar"},"use-cases/ai-photoshoot":{"id":"use-cases/ai-photoshoot","title":"AI Photoshoot","description":"Overview","sidebar":"tutorialSidebar"},"use-cases/controlnet":{"id":"use-cases/controlnet","title":"Controlnet","description":"Preserve composition","sidebar":"tutorialSidebar"},"use-cases/faq":{"id":"use-cases/faq","title":"FAQ","description":"What image size should I upload? Is there a size limit to uploads?","sidebar":"tutorialSidebar"},"use-cases/finetuning-guide":{"id":"use-cases/finetuning-guide","title":"Fine-tuning guide","description":"The guide for high quality avatars and AI photography","sidebar":"tutorialSidebar"},"use-cases/inpainting-and-masking":{"id":"use-cases/inpainting-and-masking","title":"Inpainting and masking","description":"Input image","sidebar":"tutorialSidebar"},"use-cases/masked-portraits":{"id":"use-cases/masked-portraits","title":"Masked portraits","description":"BETA","sidebar":"tutorialSidebar"},"use-cases/product-shots":{"id":"use-cases/product-shots","title":"Product shots","description":"Input image","sidebar":"tutorialSidebar"},"use-cases/sdxl-training":{"id":"use-cases/sdxl-training","title":"SDXL training","description":"Overview","sidebar":"tutorialSidebar"},"use-cases/toonify":{"id":"use-cases/toonify","title":"Toonify","description":"Input image","sidebar":"tutorialSidebar"},"use-cases/video-generation":{"id":"use-cases/video-generation","title":"Video Generation","description":"Generate videos using text","sidebar":"tutorialSidebar"}}}')}}]); \ No newline at end of file +"use strict";(self.webpackChunkastria_docs_2=self.webpackChunkastria_docs_2||[]).push([[2197],{1109:e=>{e.exports=JSON.parse('{"pluginId":"default","version":"current","label":"Next","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","isLast":true,"docsSidebars":{"tutorialSidebar":[{"type":"link","label":"Changes","href":"/docs/changes","docId":"changes"},{"type":"category","label":"Use cases","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"AI Photoshoot","href":"/docs/use-cases/ai-photoshoot","docId":"use-cases/ai-photoshoot"},{"type":"link","label":"Fine-tuning guide","href":"/docs/use-cases/finetuning-guide","docId":"use-cases/finetuning-guide"},{"type":"link","label":"SDXL training","href":"/docs/use-cases/sdxl-training","docId":"use-cases/sdxl-training"},{"type":"link","label":"Controlnet","href":"/docs/use-cases/controlnet","docId":"use-cases/controlnet"},{"type":"link","label":"Inpainting and masking","href":"/docs/use-cases/inpainting-and-masking","docId":"use-cases/inpainting-and-masking"},{"type":"link","label":"Product shots","href":"/docs/use-cases/product-shots","docId":"use-cases/product-shots"},{"type":"link","label":"Toonify","href":"/docs/use-cases/toonify","docId":"use-cases/toonify"},{"type":"link","label":"Video Generation","href":"/docs/use-cases/video-generation","docId":"use-cases/video-generation"},{"type":"link","label":"FAQ","href":"/docs/use-cases/faq","docId":"use-cases/faq"},{"type":"link","label":"Masked portraits","href":"/docs/use-cases/masked-portraits","docId":"use-cases/masked-portraits"}],"href":"/docs/category/use-cases"},{"type":"category","label":"Features","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Face inpainting","href":"/docs/features/face-inpainting","docId":"features/face-inpainting"},{"type":"link","label":"Face Swap","href":"/docs/features/face-swap","docId":"features/face-swap"},{"type":"link","label":"LoRAs","href":"/docs/features/loras","docId":"features/loras"},{"type":"link","label":"Multi-Pass inference","href":"/docs/features/multi-pass-inference","docId":"features/multi-pass-inference"},{"type":"link","label":"Multi-Person","href":"/docs/features/multiperson","docId":"features/multiperson"},{"type":"link","label":"Prompt expansion","href":"/docs/features/prompt-expansion","docId":"features/prompt-expansion"},{"type":"link","label":"Prompt Masking","href":"/docs/features/prompt-masking","docId":"features/prompt-masking"},{"type":"link","label":"Tiled upscale","href":"/docs/features/tiled-upscale","docId":"features/tiled-upscale"}],"href":"/docs/category/features"},{"type":"category","label":"API","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Overview","href":"/docs/api/overview","docId":"api/overview"},{"type":"category","label":"Tunes","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"The tune object","href":"/docs/api/tune/","docId":"api/tune/tune"},{"type":"link","label":"Create a tune","href":"/docs/api/tune/create","docId":"api/tune/create"},{"type":"link","label":"Retrieve a tune","href":"/docs/api/tune/retrieve","docId":"api/tune/retrieve"},{"type":"link","label":"List all tunes","href":"/docs/api/tune/list","docId":"api/tune/list"}],"href":"/docs/category/tunes"},{"type":"category","label":"Prompts","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"The prompt object","href":"/docs/api/prompt/prompt","docId":"api/prompt/prompt"},{"type":"link","label":"Create a prompt","href":"/docs/api/prompt/create","docId":"api/prompt/create"},{"type":"link","label":"Retrieve a prompt","href":"/docs/api/prompt/retrieve","docId":"api/prompt/retrieve"},{"type":"link","label":"List all prompts","href":"/docs/api/prompt/list","docId":"api/prompt/list"}],"href":"/docs/category/prompts"},{"type":"link","label":"SDXL API usage","href":"/docs/api/sdxl-api","docId":"api/sdxl-api"}],"href":"/docs/category/api"},{"type":"category","label":"API links","items":[{"type":"link","label":"Python SDK","href":"https://github.com/aronbrand/astriaclient"},{"type":"link","label":"Postman","href":"https://documenter.getpostman.com/view/273068/2s8YmSrfgD"},{"type":"link","label":"Bubble.io Plugin","href":"https://bubble.io/plugin/astria-api---stable-diffusion-1674501522435x732640117689417700"},{"type":"link","label":"API key","href":"https://www.astria.ai/users/edit#api"}],"collapsed":false,"collapsible":true}]},"docs":{"api/overview":{"id":"api/overview","title":"Overview","description":"Billing","sidebar":"tutorialSidebar"},"api/prompt/create":{"id":"api/prompt/create","title":"Create a prompt","description":"Creates a new fine-tune model from training images which in turn will be used to create prompts and generate images.","sidebar":"tutorialSidebar"},"api/prompt/list":{"id":"api/prompt/list","title":"List all prompts","description":"Parameters","sidebar":"tutorialSidebar"},"api/prompt/prompt":{"id":"api/prompt/prompt","title":"The prompt object","description":"The prompt object is usually used as a nested resource of tune as prompts are generated using a fine-tune model. A prompt is a text that is used to generate images using a fine-tune model. The prompt object contains the generated images.","sidebar":"tutorialSidebar"},"api/prompt/retrieve":{"id":"api/prompt/retrieve","title":"Retrieve a prompt","description":"Parameters","sidebar":"tutorialSidebar"},"api/sdxl-api":{"id":"api/sdxl-api","title":"SDXL API usage","description":"For general tips on SDXL training and inference, see SDXL training","sidebar":"tutorialSidebar"},"api/tune/create":{"id":"api/tune/create","title":"Create a tune","description":"Creates a new fine-tune model from training images which in turn will be used to create prompts and generate images.","sidebar":"tutorialSidebar"},"api/tune/list":{"id":"api/tune/list","title":"List all tunes","description":"Parameters","sidebar":"tutorialSidebar"},"api/tune/retrieve":{"id":"api/tune/retrieve","title":"Retrieve a tune","description":"Parameters","sidebar":"tutorialSidebar"},"api/tune/tune":{"id":"api/tune/tune","title":"The tune object","description":"Tune (or Fine-tune) represents a model that is created using training images to learn a new concept or subject. At its core a fine-tune is a neural-network weights file (usually weights 2GB) and contains the information of the trained images.","sidebar":"tutorialSidebar"},"changes":{"id":"changes","title":"Changes","description":"* 2023-01-03 Tiled upscale improved upscaling.","sidebar":"tutorialSidebar"},"features/face-inpainting":{"id":"features/face-inpainting","title":"Face inpainting","description":"Entire picture","sidebar":"tutorialSidebar"},"features/face-swap":{"id":"features/face-swap","title":"Face Swap","description":"Without face-swap","sidebar":"tutorialSidebar"},"features/loras":{"id":"features/loras","title":"LoRAs","description":"This feature is mainly relevant for SD15 models. SDXL subject fine-tuning use LoRA, and tend to collide with other style LoRAs","sidebar":"tutorialSidebar"},"features/multi-pass-inference":{"id":"features/multi-pass-inference","title":"Multi-Pass inference","description":"BETA","sidebar":"tutorialSidebar"},"features/multiperson":{"id":"features/multiperson","title":"Multi-Person","description":"BETA","sidebar":"tutorialSidebar"},"features/prompt-expansion":{"id":"features/prompt-expansion","title":"Prompt expansion","description":"Prompt expansion toggle under the Advanced prompt collapsible, will augment your prompt text using a GPT language-model and usually results in richer images.","sidebar":"tutorialSidebar"},"features/prompt-masking":{"id":"features/prompt-masking","title":"Prompt Masking","description":"Prompt masking uses a short text to create a mask from the input image. The mask can then be used to inpaint parts of the image. Use the below parameters as part of the prompts to use auto-masking","sidebar":"tutorialSidebar"},"features/tiled-upscale":{"id":"features/tiled-upscale","title":"Tiled upscale","description":"BETA","sidebar":"tutorialSidebar"},"use-cases/ai-photoshoot":{"id":"use-cases/ai-photoshoot","title":"AI Photoshoot","description":"Overview","sidebar":"tutorialSidebar"},"use-cases/controlnet":{"id":"use-cases/controlnet","title":"Controlnet","description":"Preserve composition","sidebar":"tutorialSidebar"},"use-cases/faq":{"id":"use-cases/faq","title":"FAQ","description":"What image size should I upload? Is there a size limit to uploads?","sidebar":"tutorialSidebar"},"use-cases/finetuning-guide":{"id":"use-cases/finetuning-guide","title":"Fine-tuning guide","description":"The guide for high quality avatars and AI photography","sidebar":"tutorialSidebar"},"use-cases/inpainting-and-masking":{"id":"use-cases/inpainting-and-masking","title":"Inpainting and masking","description":"Input image","sidebar":"tutorialSidebar"},"use-cases/masked-portraits":{"id":"use-cases/masked-portraits","title":"Masked portraits","description":"BETA","sidebar":"tutorialSidebar"},"use-cases/product-shots":{"id":"use-cases/product-shots","title":"Product shots","description":"Input image","sidebar":"tutorialSidebar"},"use-cases/sdxl-training":{"id":"use-cases/sdxl-training","title":"SDXL training","description":"Overview","sidebar":"tutorialSidebar"},"use-cases/toonify":{"id":"use-cases/toonify","title":"Toonify","description":"Input image","sidebar":"tutorialSidebar"},"use-cases/video-generation":{"id":"use-cases/video-generation","title":"Video Generation","description":"Generate videos using text","sidebar":"tutorialSidebar"}}}')}}]); \ No newline at end of file diff --git a/assets/js/9fda4f55.54385613.js b/assets/js/9fda4f55.54385613.js deleted file mode 100644 index e8a755e6..00000000 --- a/assets/js/9fda4f55.54385613.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkastria_docs_2=self.webpackChunkastria_docs_2||[]).push([[4868],{3905:(e,t,r)=>{r.d(t,{Zo:()=>u,kt:()=>f});var n=r(7294);function a(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function o(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function s(e){for(var t=1;t=0||(a[r]=e[r]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var c=n.createContext({}),p=function(e){var t=n.useContext(c),r=t;return e&&(r="function"==typeof e?e(t):s(s({},t),e)),r},u=function(e){var t=p(e.components);return n.createElement(c.Provider,{value:t},e.children)},l="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var r=e.components,a=e.mdxType,o=e.originalType,c=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),l=p(r),m=a,f=l["".concat(c,".").concat(m)]||l[m]||d[m]||o;return r?n.createElement(f,s(s({ref:t},u),{},{components:r})):n.createElement(f,s({ref:t},u))}));function f(e,t){var r=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=r.length,s=new Array(o);s[0]=m;var i={};for(var c in t)hasOwnProperty.call(t,c)&&(i[c]=t[c]);i.originalType=e,i[l]="string"==typeof e?e:a,s[1]=i;for(var p=2;p{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>s,default:()=>d,frontMatter:()=>o,metadata:()=>i,toc:()=>p});var n=r(7462),a=(r(7294),r(3905));const o={},s="Changes",i={unversionedId:"changes",id:"changes",title:"Changes",description:"* 2023-12-14 Prompt masking feature added - to support product shots and Masked Portraits",source:"@site/docs/0-changes.md",sourceDirName:".",slug:"/changes",permalink:"/docs/changes",draft:!1,editUrl:"https://github.com/facebook/docusaurus/tree/main/packages/create-docusaurus/templates/shared/docs/0-changes.md",tags:[],version:"current",sidebarPosition:0,frontMatter:{},sidebar:"tutorialSidebar",next:{title:"Use cases",permalink:"/docs/category/use-cases"}},c={},p=[],u={toc:p},l="wrapper";function d(e){let{components:t,...r}=e;return(0,a.kt)(l,(0,n.Z)({},u,r,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"changes"},"Changes"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},"2023-12-14 ",(0,a.kt)("a",{parentName:"li",href:"/docs/features/prompt-masking"},"Prompt masking")," feature added - to support ",(0,a.kt)("a",{parentName:"li",href:"/docs/use-cases/product-shots"},"product shots")," and ",(0,a.kt)("a",{parentName:"li",href:"/docs/use-cases/masked-portraits"},"Masked Portraits")),(0,a.kt)("li",{parentName:"ul"},"2023-11-27 ",(0,a.kt)("strong",{parentName:"li"},"LCM")," (Latent Consistency Models) scheduler allows inference in 6 steps "),(0,a.kt)("li",{parentName:"ul"},"2023-11-01 ",(0,a.kt)("a",{parentName:"li",href:"/docs/features/multiperson"},"Multi-person")," inference using LoRA and SD15 models "),(0,a.kt)("li",{parentName:"ul"},"2023-10-29 ",(0,a.kt)("a",{parentName:"li",href:"/docs/features/face-swap"},"Face-Swap")," feature added - uses training images to improve inference time similarity"),(0,a.kt)("li",{parentName:"ul"},"2023-10-22 ",(0,a.kt)("a",{parentName:"li",href:"https://www.astria.ai/themes"},"Themes")," - Have ChatGPT create 10 prompts for you from a short 2-4 word theme description.")))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/9fda4f55.9fd222fc.js b/assets/js/9fda4f55.9fd222fc.js new file mode 100644 index 00000000..0ff2e80c --- /dev/null +++ b/assets/js/9fda4f55.9fd222fc.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkastria_docs_2=self.webpackChunkastria_docs_2||[]).push([[4868],{3905:(e,t,r)=>{r.d(t,{Zo:()=>p,kt:()=>f});var a=r(7294);function n(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function s(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,a)}return r}function o(e){for(var t=1;t=0||(n[r]=e[r]);return n}(e,t);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(n[r]=e[r])}return n}var c=a.createContext({}),l=function(e){var t=a.useContext(c),r=t;return e&&(r="function"==typeof e?e(t):o(o({},t),e)),r},p=function(e){var t=l(e.components);return a.createElement(c.Provider,{value:t},e.children)},u="mdxType",m={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},d=a.forwardRef((function(e,t){var r=e.components,n=e.mdxType,s=e.originalType,c=e.parentName,p=i(e,["components","mdxType","originalType","parentName"]),u=l(r),d=n,f=u["".concat(c,".").concat(d)]||u[d]||m[d]||s;return r?a.createElement(f,o(o({ref:t},p),{},{components:r})):a.createElement(f,o({ref:t},p))}));function f(e,t){var r=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var s=r.length,o=new Array(s);o[0]=d;var i={};for(var c in t)hasOwnProperty.call(t,c)&&(i[c]=t[c]);i.originalType=e,i[u]="string"==typeof e?e:n,o[1]=i;for(var l=2;l{r.r(t),r.d(t,{assets:()=>c,contentTitle:()=>o,default:()=>m,frontMatter:()=>s,metadata:()=>i,toc:()=>l});var a=r(7462),n=(r(7294),r(3905));const s={},o="Changes",i={unversionedId:"changes",id:"changes",title:"Changes",description:"* 2023-01-03 Tiled upscale improved upscaling.",source:"@site/docs/0-changes.md",sourceDirName:".",slug:"/changes",permalink:"/docs/changes",draft:!1,editUrl:"https://github.com/facebook/docusaurus/tree/main/packages/create-docusaurus/templates/shared/docs/0-changes.md",tags:[],version:"current",sidebarPosition:0,frontMatter:{},sidebar:"tutorialSidebar",next:{title:"Use cases",permalink:"/docs/category/use-cases"}},c={},l=[],p={toc:l},u="wrapper";function m(e){let{components:t,...r}=e;return(0,n.kt)(u,(0,a.Z)({},p,r,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("h1",{id:"changes"},"Changes"),(0,n.kt)("ul",null,(0,n.kt)("li",{parentName:"ul"},"2023-01-03 ",(0,n.kt)("a",{parentName:"li",href:"/docs/features/tiled-upscale"},"Tiled upscale")," improved upscaling."),(0,n.kt)("li",{parentName:"ul"},"2023-01-03 ",(0,n.kt)("a",{parentName:"li",href:"/docs/features/multi-pass-inference"},"Multi pass inference")," unlocks new level of detail, image composition and similarity."),(0,n.kt)("li",{parentName:"ul"},"2023-12-14 ",(0,n.kt)("a",{parentName:"li",href:"/docs/features/prompt-masking"},"Prompt masking")," feature added - to support ",(0,n.kt)("a",{parentName:"li",href:"/docs/use-cases/product-shots"},"product shots")," and ",(0,n.kt)("a",{parentName:"li",href:"/docs/use-cases/masked-portraits"},"Masked Portraits")),(0,n.kt)("li",{parentName:"ul"},"2023-11-27 ",(0,n.kt)("strong",{parentName:"li"},"LCM")," (Latent Consistency Models) scheduler allows inference in 6 steps "),(0,n.kt)("li",{parentName:"ul"},"2023-11-01 ",(0,n.kt)("a",{parentName:"li",href:"/docs/features/multiperson"},"Multi-person")," inference using LoRA and SD15 models "),(0,n.kt)("li",{parentName:"ul"},"2023-10-29 ",(0,n.kt)("a",{parentName:"li",href:"/docs/features/face-swap"},"Face-Swap")," feature added - uses training images to improve inference time similarity"),(0,n.kt)("li",{parentName:"ul"},"2023-10-22 ",(0,n.kt)("a",{parentName:"li",href:"https://www.astria.ai/themes"},"Themes")," - Have ChatGPT create 10 prompts for you from a short 2-4 word theme description.")))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.7992fbcc.js b/assets/js/runtime~main.1691e83c.js similarity index 57% rename from assets/js/runtime~main.7992fbcc.js rename to assets/js/runtime~main.1691e83c.js index f28aabd6..bf9dbf1e 100644 --- a/assets/js/runtime~main.7992fbcc.js +++ b/assets/js/runtime~main.1691e83c.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,f,c,d,t={},r={};function b(e){var a=r[e];if(void 0!==a)return a.exports;var f=r[e]={id:e,loaded:!1,exports:{}};return t[e].call(f.exports,f,f.exports,b),f.loaded=!0,f.exports}b.m=t,b.c=r,e=[],b.O=(a,f,c,d)=>{if(!f){var t=1/0;for(i=0;i=d)&&Object.keys(b.O).every((e=>b.O[e](f[o])))?f.splice(o--,1):(r=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,c,d]},b.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return b.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var d=Object.create(null);b.r(d);var t={};a=a||[null,f({}),f([]),f(f)];for(var r=2&c&&e;"object"==typeof r&&!~a.indexOf(r);r=f(r))Object.getOwnPropertyNames(r).forEach((a=>t[a]=()=>e[a]));return t.default=()=>e,b.d(d,t),d},b.d=(e,a)=>{for(var f in a)b.o(a,f)&&!b.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((a,f)=>(b.f[f](e,a),a)),[])),b.u=e=>"assets/js/"+({53:"85848f58",110:"66406991",453:"30a24c52",533:"b2b675dd",674:"6ff77412",948:"8717b14a",1124:"d3225ce8",1433:"76161ddb",1477:"b2f554cd",1599:"c231ecf4",1633:"031793e1",1713:"a7023ddc",1716:"f2bb83c3",1914:"d9f32620",1929:"397ab8ba",2062:"b83527cb",2197:"935f2afb",2247:"3ee62890",2267:"59362658",2362:"e273c56f",2366:"d537b4da",2532:"41484b8f",2535:"814f3328",2576:"0088b24a",2585:"5cce4860",2656:"7f726b58",2859:"4206f64f",2912:"318e8f3b",3085:"1f391b9e",3089:"a6aa9e1f",3205:"a80da1cf",3237:"1df93b7f",3514:"73664a40",3608:"9e4087bc",3911:"57d2d9a8",4013:"01a85c17",4762:"96713d40",4868:"9fda4f55",6058:"71221d59",6103:"ccc49370",6450:"e8766d5f",6468:"eeb7a3e9",6567:"9cceef76",6614:"f994835d",6868:"989eb559",6938:"608ae6a4",6995:"e2fc4889",7178:"096bfee4",7414:"393be207",7721:"084d163f",7757:"eb3f1c80",7918:"17896441",8154:"fac6c085",8610:"6875c492",8636:"f4f34a3a",8772:"e4fcfa79",8946:"fb723fd6",8999:"d0ae32ce",9003:"925b3f96",9035:"4c9e35b1",9410:"3b57aab9",9418:"96b27e6a",9493:"988781e2",9514:"1be78505",9558:"6f719c7f",9611:"dc0d6ca4",9642:"7661071f",9700:"e16015ca",9817:"14eb3368"}[e]||e)+"."+{53:"fff5bd10",110:"68aa80f8",453:"df77d90b",533:"ee3fafcc",674:"0731ea46",948:"7ca9b0f5",1124:"dc8b4ec9",1433:"08b20d78",1477:"82540fbf",1506:"9e78434d",1599:"d95fe84c",1633:"f8b29aaa",1713:"446ba93b",1716:"11baadc6",1914:"034139fd",1929:"42a1f707",2062:"b76063fd",2197:"df8f5db5",2247:"f7d37489",2267:"fbf9f27e",2362:"b5404124",2366:"eca3fa6f",2529:"ea48620b",2532:"07d34590",2535:"3c3d6838",2576:"639955f8",2585:"1646f486",2656:"80b2249e",2859:"292c3316",2912:"4ada5e94",3085:"ae69d915",3089:"a2ffdd90",3205:"f07c3acc",3237:"a6e6298a",3514:"659f57de",3608:"4e5ea484",3911:"2d5d350f",4013:"503509ed",4762:"c52f483d",4868:"54385613",4972:"de96bb75",6058:"8bc3f404",6103:"d1532964",6450:"d54d97d5",6468:"f0bdada2",6567:"f520c83a",6614:"86aea434",6868:"2407b16c",6938:"9378a58c",6995:"d90b5ebe",7178:"8f8aa071",7414:"01e16d5a",7721:"798156d9",7757:"d83576d5",7918:"679a7f0e",8154:"b14f5788",8610:"bd47f535",8636:"197f7bc0",8772:"40e82205",8946:"9869f4e3",8999:"06fbe842",9003:"1a5c846a",9035:"42334068",9410:"f5a8508b",9418:"61c47981",9493:"965a9b5a",9514:"cd0faa7e",9558:"e727d18f",9611:"8fc8a0c9",9642:"2ed2ecd0",9700:"10b12c91",9817:"2d4a4f56"}[e]+".js",b.miniCssF=e=>{},b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},d="astria-docs-2:",b.l=(e,a,f,t)=>{if(c[e])c[e].push(a);else{var r,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var d=c[e];if(delete c[e],r.parentNode&&r.parentNode.removeChild(r),d&&d.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.p="/",b.gca=function(e){return e={17896441:"7918",59362658:"2267",66406991:"110","85848f58":"53","30a24c52":"453",b2b675dd:"533","6ff77412":"674","8717b14a":"948",d3225ce8:"1124","76161ddb":"1433",b2f554cd:"1477",c231ecf4:"1599","031793e1":"1633",a7023ddc:"1713",f2bb83c3:"1716",d9f32620:"1914","397ab8ba":"1929",b83527cb:"2062","935f2afb":"2197","3ee62890":"2247",e273c56f:"2362",d537b4da:"2366","41484b8f":"2532","814f3328":"2535","0088b24a":"2576","5cce4860":"2585","7f726b58":"2656","4206f64f":"2859","318e8f3b":"2912","1f391b9e":"3085",a6aa9e1f:"3089",a80da1cf:"3205","1df93b7f":"3237","73664a40":"3514","9e4087bc":"3608","57d2d9a8":"3911","01a85c17":"4013","96713d40":"4762","9fda4f55":"4868","71221d59":"6058",ccc49370:"6103",e8766d5f:"6450",eeb7a3e9:"6468","9cceef76":"6567",f994835d:"6614","989eb559":"6868","608ae6a4":"6938",e2fc4889:"6995","096bfee4":"7178","393be207":"7414","084d163f":"7721",eb3f1c80:"7757",fac6c085:"8154","6875c492":"8610",f4f34a3a:"8636",e4fcfa79:"8772",fb723fd6:"8946",d0ae32ce:"8999","925b3f96":"9003","4c9e35b1":"9035","3b57aab9":"9410","96b27e6a":"9418","988781e2":"9493","1be78505":"9514","6f719c7f":"9558",dc0d6ca4:"9611","7661071f":"9642",e16015ca:"9700","14eb3368":"9817"}[e]||e,b.p+b.u(e)},(()=>{var e={1303:0,532:0};b.f.j=(a,f)=>{var c=b.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((f,d)=>c=e[a]=[f,d]));f.push(c[2]=d);var t=b.p+b.u(a),r=new Error;b.l(t,(f=>{if(b.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var d=f&&("load"===f.type?"missing":f.type),t=f&&f.target&&f.target.src;r.message="Loading chunk "+a+" failed.\n("+d+": "+t+")",r.name="ChunkLoadError",r.type=d,r.request=t,c[1](r)}}),"chunk-"+a,a)}},b.O.j=a=>0===e[a];var a=(a,f)=>{var c,d,t=f[0],r=f[1],o=f[2],n=0;if(t.some((a=>0!==e[a]))){for(c in r)b.o(r,c)&&(b.m[c]=r[c]);if(o)var i=o(b)}for(a&&a(f);n{"use strict";var e,a,f,c,d,t={},b={};function r(e){var a=b[e];if(void 0!==a)return a.exports;var f=b[e]={id:e,loaded:!1,exports:{}};return t[e].call(f.exports,f,f.exports,r),f.loaded=!0,f.exports}r.m=t,r.c=b,e=[],r.O=(a,f,c,d)=>{if(!f){var t=1/0;for(i=0;i=d)&&Object.keys(r.O).every((e=>r.O[e](f[o])))?f.splice(o--,1):(b=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,c,d]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var d=Object.create(null);r.r(d);var t={};a=a||[null,f({}),f([]),f(f)];for(var b=2&c&&e;"object"==typeof b&&!~a.indexOf(b);b=f(b))Object.getOwnPropertyNames(b).forEach((a=>t[a]=()=>e[a]));return t.default=()=>e,r.d(d,t),d},r.d=(e,a)=>{for(var f in a)r.o(a,f)&&!r.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,f)=>(r.f[f](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"85848f58",110:"66406991",453:"30a24c52",533:"b2b675dd",674:"6ff77412",948:"8717b14a",1124:"d3225ce8",1433:"76161ddb",1477:"b2f554cd",1599:"c231ecf4",1633:"031793e1",1713:"a7023ddc",1716:"f2bb83c3",1914:"d9f32620",1929:"397ab8ba",2062:"b83527cb",2197:"935f2afb",2247:"3ee62890",2267:"59362658",2362:"e273c56f",2366:"d537b4da",2532:"41484b8f",2535:"814f3328",2576:"0088b24a",2585:"5cce4860",2656:"7f726b58",2859:"4206f64f",2912:"318e8f3b",3085:"1f391b9e",3089:"a6aa9e1f",3205:"a80da1cf",3237:"1df93b7f",3514:"73664a40",3608:"9e4087bc",3911:"57d2d9a8",4013:"01a85c17",4762:"96713d40",4868:"9fda4f55",6058:"71221d59",6103:"ccc49370",6450:"e8766d5f",6468:"eeb7a3e9",6567:"9cceef76",6614:"f994835d",6868:"989eb559",6938:"608ae6a4",6995:"e2fc4889",7178:"096bfee4",7414:"393be207",7721:"084d163f",7757:"eb3f1c80",7918:"17896441",8154:"fac6c085",8610:"6875c492",8636:"f4f34a3a",8772:"e4fcfa79",8946:"fb723fd6",8999:"d0ae32ce",9003:"925b3f96",9035:"4c9e35b1",9410:"3b57aab9",9418:"96b27e6a",9493:"988781e2",9514:"1be78505",9558:"6f719c7f",9611:"dc0d6ca4",9642:"7661071f",9700:"e16015ca",9817:"14eb3368"}[e]||e)+"."+{53:"fff5bd10",110:"68aa80f8",453:"df77d90b",533:"ee3fafcc",674:"0731ea46",948:"7ca9b0f5",1124:"dc8b4ec9",1433:"08b20d78",1477:"82540fbf",1506:"9e78434d",1599:"d95fe84c",1633:"f8b29aaa",1713:"446ba93b",1716:"11baadc6",1914:"034139fd",1929:"42a1f707",2062:"b76063fd",2197:"71ee9e05",2247:"f7d37489",2267:"fbf9f27e",2362:"b5404124",2366:"eca3fa6f",2529:"ea48620b",2532:"07d34590",2535:"3c3d6838",2576:"639955f8",2585:"745a04b4",2656:"80b2249e",2859:"292c3316",2912:"4ada5e94",3085:"ae69d915",3089:"a2ffdd90",3205:"f07c3acc",3237:"a6e6298a",3514:"659f57de",3608:"4e5ea484",3911:"2d5d350f",4013:"503509ed",4762:"c52f483d",4868:"9fd222fc",4972:"de96bb75",6058:"8bc3f404",6103:"d1532964",6450:"d54d97d5",6468:"f0bdada2",6567:"f520c83a",6614:"86aea434",6868:"2407b16c",6938:"9378a58c",6995:"d90b5ebe",7178:"8f8aa071",7414:"01e16d5a",7721:"798156d9",7757:"d83576d5",7918:"679a7f0e",8154:"b14f5788",8610:"bd47f535",8636:"197f7bc0",8772:"40e82205",8946:"9869f4e3",8999:"06fbe842",9003:"1a5c846a",9035:"42334068",9410:"f5a8508b",9418:"61c47981",9493:"965a9b5a",9514:"cd0faa7e",9558:"e727d18f",9611:"8fc8a0c9",9642:"2ed2ecd0",9700:"10b12c91",9817:"2d4a4f56"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},d="astria-docs-2:",r.l=(e,a,f,t)=>{if(c[e])c[e].push(a);else{var b,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{b.onerror=b.onload=null,clearTimeout(s);var d=c[e];if(delete c[e],b.parentNode&&b.parentNode.removeChild(b),d&&d.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:b}),12e4);b.onerror=l.bind(null,b.onerror),b.onload=l.bind(null,b.onload),o&&document.head.appendChild(b)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/",r.gca=function(e){return e={17896441:"7918",59362658:"2267",66406991:"110","85848f58":"53","30a24c52":"453",b2b675dd:"533","6ff77412":"674","8717b14a":"948",d3225ce8:"1124","76161ddb":"1433",b2f554cd:"1477",c231ecf4:"1599","031793e1":"1633",a7023ddc:"1713",f2bb83c3:"1716",d9f32620:"1914","397ab8ba":"1929",b83527cb:"2062","935f2afb":"2197","3ee62890":"2247",e273c56f:"2362",d537b4da:"2366","41484b8f":"2532","814f3328":"2535","0088b24a":"2576","5cce4860":"2585","7f726b58":"2656","4206f64f":"2859","318e8f3b":"2912","1f391b9e":"3085",a6aa9e1f:"3089",a80da1cf:"3205","1df93b7f":"3237","73664a40":"3514","9e4087bc":"3608","57d2d9a8":"3911","01a85c17":"4013","96713d40":"4762","9fda4f55":"4868","71221d59":"6058",ccc49370:"6103",e8766d5f:"6450",eeb7a3e9:"6468","9cceef76":"6567",f994835d:"6614","989eb559":"6868","608ae6a4":"6938",e2fc4889:"6995","096bfee4":"7178","393be207":"7414","084d163f":"7721",eb3f1c80:"7757",fac6c085:"8154","6875c492":"8610",f4f34a3a:"8636",e4fcfa79:"8772",fb723fd6:"8946",d0ae32ce:"8999","925b3f96":"9003","4c9e35b1":"9035","3b57aab9":"9410","96b27e6a":"9418","988781e2":"9493","1be78505":"9514","6f719c7f":"9558",dc0d6ca4:"9611","7661071f":"9642",e16015ca:"9700","14eb3368":"9817"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,f)=>{var c=r.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((f,d)=>c=e[a]=[f,d]));f.push(c[2]=d);var t=r.p+r.u(a),b=new Error;r.l(t,(f=>{if(r.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var d=f&&("load"===f.type?"missing":f.type),t=f&&f.target&&f.target.src;b.message="Loading chunk "+a+" failed.\n("+d+": "+t+")",b.name="ChunkLoadError",b.type=d,b.request=t,c[1](b)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,f)=>{var c,d,t=f[0],b=f[1],o=f[2],n=0;if(t.some((a=>0!==e[a]))){for(c in b)r.o(b,c)&&(r.m[c]=b[c]);if(o)var i=o(r)}for(a&&a(f);n Archive | Astria documentation - + - + \ No newline at end of file diff --git a/blog/first-blog-post/index.html b/blog/first-blog-post/index.html index f434848b..c6b3fa49 100644 --- a/blog/first-blog-post/index.html +++ b/blog/first-blog-post/index.html @@ -5,13 +5,13 @@ First Blog Post | Astria documentation - +

First Blog Post

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/index.html b/blog/index.html index 46c3b89d..ad3592f4 100644 --- a/blog/index.html +++ b/blog/index.html @@ -5,13 +5,13 @@ Blog | Astria documentation - +

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/long-blog-post/index.html b/blog/long-blog-post/index.html index d3a66a80..dc9ac433 100644 --- a/blog/long-blog-post/index.html +++ b/blog/long-blog-post/index.html @@ -5,13 +5,13 @@ Long Blog Post | Astria documentation - +

Long Blog Post

· 3 min read
Endilie Yacop Sucipto

This is the summary of a very long blog post,

Use a <!-- truncate --> comment to limit blog post size in the list view.

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/mdx-blog-post/index.html b/blog/mdx-blog-post/index.html index 81bb953a..a89d756f 100644 --- a/blog/mdx-blog-post/index.html +++ b/blog/mdx-blog-post/index.html @@ -5,13 +5,13 @@ MDX Blog Post | Astria documentation - +
- + \ No newline at end of file diff --git a/blog/tags/docusaurus/index.html b/blog/tags/docusaurus/index.html index a1f29660..0bee4a2a 100644 --- a/blog/tags/docusaurus/index.html +++ b/blog/tags/docusaurus/index.html @@ -5,13 +5,13 @@ 4 posts tagged with "docusaurus" | Astria documentation - +

4 posts tagged with "docusaurus"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/tags/facebook/index.html b/blog/tags/facebook/index.html index 675605b4..5cdae5c9 100644 --- a/blog/tags/facebook/index.html +++ b/blog/tags/facebook/index.html @@ -5,13 +5,13 @@ One post tagged with "facebook" | Astria documentation - +

One post tagged with "facebook"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hello/index.html b/blog/tags/hello/index.html index a3ab1c4a..79f808ee 100644 --- a/blog/tags/hello/index.html +++ b/blog/tags/hello/index.html @@ -5,13 +5,13 @@ 2 posts tagged with "hello" | Astria documentation - +

2 posts tagged with "hello"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hola/index.html b/blog/tags/hola/index.html index b820168d..c653c1b3 100644 --- a/blog/tags/hola/index.html +++ b/blog/tags/hola/index.html @@ -5,13 +5,13 @@ One post tagged with "hola" | Astria documentation - +

One post tagged with "hola"

View All Tags

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/tags/index.html b/blog/tags/index.html index 5b4e9bba..4f592e7c 100644 --- a/blog/tags/index.html +++ b/blog/tags/index.html @@ -5,13 +5,13 @@ Tags | Astria documentation - + - + \ No newline at end of file diff --git a/blog/welcome/index.html b/blog/welcome/index.html index 2ce374b8..0bca5968 100644 --- a/blog/welcome/index.html +++ b/blog/welcome/index.html @@ -5,13 +5,13 @@ Welcome | Astria documentation - +

Welcome

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/docs/api/overview/index.html b/docs/api/overview/index.html index 7476cfd3..7bfb8041 100644 --- a/docs/api/overview/index.html +++ b/docs/api/overview/index.html @@ -5,7 +5,7 @@ Overview | Astria documentation - + @@ -13,7 +13,7 @@

Astria Fine-tuning API - Overview

Billing

The API uses account balance just like the web interface. See the pricing page for more details.

Mock testing

The API allows creating a mock Tune object with attribute branch=fast. See Create a tune documentation]. API call for creating fast tune is free, and subsequent prompt calls will return mock images without incurring any charges. This is useful for testing your integration with the API. Once you're ready for production, you can purchase credits and adjust the quantity on the checkout page to how much you need.

Auto top off

To allow your account to work without disruption, top-off feature can be enabled to refill account each time account balance reaches zero. Auto top-off can be enabled in the billing page. It is recommended to set the top-off amount to at least a week worth of API calls to avoid hitting the rate limit on top-off - Frequent top-offs can cause credit card charges to fail or bank declines.

Storage

Generated images, training pictures and models, will be automatically deleted 30 days after the training has ended. You may delete the fine-tune object including the trained model, training images, and generated images at any time before using the delete API calls. You may opt-in to automatically extend model storage

Authorization

The API uses bearer Authorization headers scheme. API calls should include the header:

Authorization: Bearer sd_XXXXXX

Find your API key on the API settings under your account settings.

REST API

Astria API is built as a REST API. Main resources are tune and prompt and each one has all the CRUD operations. Prompts are usually used as a nested resource on top of tunes (i.e: /tunes/:id/prompts).

Error Codes

tip

Pay attention to 504 error below and turn on idempotency for your account to avoid duplicate objects.

422 - Validation error - Log these errors and use an exception tracking system such as Rollbar or Sentry to get notified. Do not retry these requests.

500 - Internal server error - Such requests should be retried with exponential backoff and wait start time of 30 seconds.

504 - Gateway timeout - This error indicates that the request processing took more than the 30 seconds which is set as the max request timeout. In most cases the request would probably have been processed and you should avoid retrying it. In order to get the response see Idempotency section

Idempotency

Idempotency is a principle in programming which means that the same operation (a request in our case) can be made multiple times without changing the result. This can be useful when a request is interrupted and you want to make sure it is processed only once, and avoid duplicate objects.

Idempotency can be enabled for the account in the API settings.

For tunes request with the same title, idempotency will return the existing tune object and will not create a new one. This is useful for cases where you want to make sure you don't create duplicate tunes. In this case set the tune title to a unique value such as a UUID which identifies the transaction.

For prompts request with the same attributes, idempotency will return the existing prompt object and will not create a new one. Prompt attributes considered for the idempotency are text, tune_id, cfg_scale, steps, seed, callback, negative_prompt, super_resolution, face_correct, ar, num_images, controlnet, denoising_strength, use_lpw, controlnet_conditioning_scale, w, h, hires_fix, scheduler, controlnet_txt2img, inpaint_faces

When retrying a 504 error, and idempotency is enabled, make sure to wait 60 seconds before retrying the request to allow the previous request to finish processing.

- + \ No newline at end of file diff --git a/docs/api/prompt/create/index.html b/docs/api/prompt/create/index.html index 287cb2c4..58f546c1 100644 --- a/docs/api/prompt/create/index.html +++ b/docs/api/prompt/create/index.html @@ -5,13 +5,13 @@ Create a prompt | Astria documentation - +

Create a prompt

Creates a new fine-tune model from training images which in turn will be used to create prompts and generate images.

Parameters

text (required)

Description of the image.

negative_prompt (optional)

A comma separated list of words that should not appear in the image.

callback (optional)

a URL that will be called when the prompt is done processing. The callback is a POST request where the body contains the prompt object.

ar (optional)

Apect-Ratio. 1:1, portrait, 16:9, landscape

num_images (optional)

Number of images to generate. Range: 1-8.

seed (optional)

Random number to create consistent results. Range: 0 to 2^32.

super_resolution (optional)

Boolean. X4 super-resolution.

inpaint_faces (optional)

Boolean. Requires super-resolution on. Inpaints faces.

hires_fix (optional)

Boolean. Super resolution details. Available only when super_resolution is true. Adds details.

face_correct (optional)

Boolean. Runs another AI model on top to correct the face in the image.

face_swap (optional)

Boolean. Uses training images to swap face and enhance resemblance.

cfg_scale (optional)

Float. How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt). Range 0-15

steps (optional)

Integer. Number of diffusion steps to run . Range 0-50

use_lpw (optional)

Boolean. Use weighted prompts.

w (optional)

width - In multiples of 8.

h (optional)

height - In multiples of 8.

scheduler (optional)

enum: euler, euler_a, dpm++2m_karras, dpm++sde_karras. If not specified the default account scheduler will be used.

color_grading (optional)

enum: Film Velvia, Film Portra, Ektar.

film_grain (optional)

boolean - Adds noise to the image to make it look more realistic.

Img2Img / ControlNet

controlnet (optional)

BETA. Requires input_image. Possible values: lineart, canny, depth, mlsd, hed, pose, pose_with_hand, pose_with_face, pose_face_and_hand, tile, qr.

denoising_strength (optional)

For img2img. 1.0 - Take prompt. 0.0 - Take image. Range: 0.0-1.0. Default: 0.8

controlnet_conditioning_scale (optional)

Strength of controlnet conditioning. 0.0-1.0

controlnet_txt2img (optional)

Boolean toggle. True for text to image controlnet. False for image to image controlnet.

input_image (optional)

Binary multi-part request with the image. Used in conjunction with controlnet parameter.

input_image_url (optional)

URL to an image. Used in conjunction with controlnet parameter.

mask_image (optional)

Binary multi-part request with one channel mask image. Used in conjunction with input_image parameter for inpainting

mask_image_url (optional)

URL to a one channel mask image. Used in conjunction with input_image_url parameter for inpainting.

Returns

Returns a prompt object if successful which will start processing if tune is processed.

POST /tunes/:id/prompts

curl -X POST -H "Authorization: Bearer $API_KEY" https://api.astria.ai/tunes/1/prompts \
-F prompt[text]="a painting of ohwx man in the style of Van Gogh" \
-F prompt[negative_prompt]="old, blemish, wrin" \
-F prompt[super_resolution]=true \
-F prompt[face_correct]=true \
-F prompt[callback]="https://optional-callback-url.com/to-your-service-when-ready?prompt_id=1"

Response

{
"id": 1,
"callback": "https://optional-callback-url.com/to-your-service-when-ready?prompt_id=1",
"text": "a painting of ohwx man in the style of Van Gogh",
"negative_prompt": "old, blemish, wrinkles, mole",
"cfg_scale": null,
"steps": null,
"seed": null,
"trained_at": null,
"started_training_at": null,
"created_at": "2022-10-06T16:12:54.505Z",
"updated_at": "2022-10-06T16:12:54.505Z",
"tune_id": 1,
"url": "http://api.astria.ai/tunes/1/prompts/1.json"
}
- + \ No newline at end of file diff --git a/docs/api/prompt/list/index.html b/docs/api/prompt/list/index.html index 917138cd..f19191b8 100644 --- a/docs/api/prompt/list/index.html +++ b/docs/api/prompt/list/index.html @@ -5,13 +5,13 @@ List all prompts | Astria documentation - +

List all prompts

Parameters

offset (optional)

Starting offset for the list of prompts. Default: 0. Current page size is 20.

Returns

An array of prompts owned by the authenticated user. If used as a nested resource (in the url), will return prompts owned by the tune.

GET /tunes/:id/prompts

curl -X GET -H "Authorization: Bearer $API_KEY" https://api.astria.ai/tunes/1/prompts

Response


[
{
"id": 1,
"callback": "https://optional-callback-url.com/to-your-service-when-ready?user_id=your_internal_user_id&transaction_id=internal_transaction_id",
"text": "a painting of ohwx man in the style of Van Gogh",
"negative_prompt": "old, blemish, wrinkles, mole",
"cfg_scale": null,
"steps": null,
"seed": null,
"trained_at": null,
"started_training_at": null,
"created_at": "2022-10-06T16:12:54.505Z",
"updated_at": "2022-10-06T16:12:54.505Z",
"tune_id": 1,
"url": "http://api.astria.ai/tunes/1/prompts/1.json"
}
]
- + \ No newline at end of file diff --git a/docs/api/prompt/prompt/index.html b/docs/api/prompt/prompt/index.html index 389a9a3c..82519160 100644 --- a/docs/api/prompt/prompt/index.html +++ b/docs/api/prompt/prompt/index.html @@ -5,13 +5,13 @@ The prompt object | Astria documentation - + - + \ No newline at end of file diff --git a/docs/api/prompt/retrieve/index.html b/docs/api/prompt/retrieve/index.html index 33a9d30d..1d1e6305 100644 --- a/docs/api/prompt/retrieve/index.html +++ b/docs/api/prompt/retrieve/index.html @@ -5,13 +5,13 @@ Retrieve a prompt | Astria documentation - +

Retrieve a prompt

Parameters

No parameters

Returns

Returns the prompt object.

GET /tunes/:tune_id/prompts/:id

curl -X GET -H "Authorization: Bearer $API_KEY" https://api.astria.ai/tunes/1/prompts/1

Response

{
"id": 1,
"callback": "https://optional-callback-url.com/to-your-service-when-ready",
"text": "a painting of ohwx man in the style of Van Gogh",
"negative_prompt": "old, blemish, wrinkles, mole",
"cfg_scale": null,
"steps": null,
"seed": null,
"trained_at": null,
"started_training_at": null,
"created_at": "2022-10-06T16:12:54.505Z",
"updated_at": "2022-10-06T16:12:54.505Z",
"tune_id": 1,
"url": "http://api.astria.ai/tunes/1/prompts/1.json"
}
- + \ No newline at end of file diff --git a/docs/api/sdxl-api/index.html b/docs/api/sdxl-api/index.html index a820b516..33fe10f1 100644 --- a/docs/api/sdxl-api/index.html +++ b/docs/api/sdxl-api/index.html @@ -5,13 +5,13 @@ SDXL API usage | Astria documentation - +

SDXL API usage

For general tips on SDXL training and inference, see SDXL training

Unlike SD15 checkpoint training, SDXL on Astria is trained as a LoRA+text-embedding. As such, inference is taking place a on a base line model such as SDXL 1.0 and prompt.text should specify the loaded lora such as <lora:123456:0.83> - will load lora with id=123456 and strength=0.83

See LoRA docs on lora syntax

With SDXL you cannot combine multiple LoRAs.

Use any SDXL model from the gallery to do inference.

danger

If you are receiving 422 error model_type=pti is not supported. Use a checkpoint instead - Change the request URL to https://api.astria.ai/tunes/666678/prompts with 666678 as a hard-coded tune_id of SDXL 1.0 from the gallery. See explanation above.

POST /tunes/:id/prompts

# Note the hard-coded 666678 which is the tune_id of SDXL 1.0 from the gallery
curl -X POST -H "Authorization: Bearer $API_KEY" https://api.astria.ai/tunes/666678/prompts \
-F prompt[text]="<lora:tune_id:strength> a painting of ohwx man in the style of Van Gogh" \
-F prompt[negative_prompt]="old, blemish, wrin" \
-F prompt[super_resolution]=true \
-F prompt[face_correct]=true \
-F prompt[callback]="https://optional-callback-url.com/to-your-service-when-ready?prompt_id=1"
- + \ No newline at end of file diff --git a/docs/api/tune/create/index.html b/docs/api/tune/create/index.html index e31f3276..7e148b1e 100644 --- a/docs/api/tune/create/index.html +++ b/docs/api/tune/create/index.html @@ -5,14 +5,14 @@ Create a tune | Astria documentation - +

Create a tune

Creates a new fine-tune model from training images which in turn will be used to create prompts and generate images.

Parameters

name (required)

A class name the describes the fine-tune. e.g: man, woman, cat, dog, boy, girl, style

title (required)

Describes the fine-tune. Ideally a UUID related to the transaction. See idempotency for more information.

images (required)

An array of images to train the fine-tune with. The images can be uploaded as multipart/form-data or as image_urls.

image_urls (required)

An array of images to train the fine-tune with. The images can be uploaded as multipart/form-data or as image_urls.

callback (optional)

A webhook URL to be called when the tune is finished training. The webhook will receive a POST request with the tune object.

branch (optional)

Enum: sd15, sdxl1, fast.

info

Use branch=fast for mock testing

steps (optional)

Training steps. Recommended leaving blank in order to allow better defaults set by the system.

token (optional)

Unique short text to which the features will be embedded into. Default ohwx for SDXL and sks for SD15.

face_crop (optional)

Detects faces in training images and augments training set with cropped faces. Defaults to account setting

training_face_correct (optional)

Enhance training images using GFPGAN. Consider enabling if input image are low quality or low resolution. May result in over-smoothing.

base_tune_id (optional)

Training on top of former fine-tune or a different baseline model from the gallery (id in the URL). e.g: 690204 - Realistic Vision v5.1

model_type (optional)

Enum: lora, pti, null for checkopoint. For SDXL1 - API will default to pti and will ignore model_type parameter.

prompts_attributes (optional)

Array of prompts entities with all attributes. See create prompt for more information.

Returns

Returns a tune object if successful which will start training immediately and call callback once training is complete.

POST /tunes

# With images as multipart/form-data
# Hard coded tune id of Realistic Vision v5.1 from the gallery - https://www.astria.ai/gallery/tunes
# https://www.astria.ai/gallery/tunes/690204/prompts
curl -X POST -H "Authorization: Bearer $API_KEY" https://api.astria.ai/tunes \
-F tune[title]="John Doe - UUID - 1234-6789-1234-56789" \
-F tune[name]=man \
-F tune[branch]="fast" \
-F tune[callback]="https://optional-callback-url.com/webhooks/astria?user_id=1&tune_id=1" \
-F tune[base_tune_id]=690204 \
-F tune[token]=ohwx \
-F "tune[prompts_attributes][0][text]=ohwx man on space circa 1979 on cover of time magazine" \
-F tune[prompts_attributes][0][callback]="https://optional-callback-url.com/webhooks/astria?user_id=1&prompt_id=1&tune_id=1" \
-F "tune[images][]=@1.jpg" \
-F "tune[images][]=@2.jpg" \
-F "tune[images][]=@3.jpg" \
-F "tune[images][]=@4.jpg"

# With image_urls as form-data
curl -X POST -H "Authorization: Bearer $API_KEY" https://api.astria.ai/tunes \
-F tune[title]="Grumpy cat - UUID - 1234-6789-1234-56789" \
-F tune[name]=cat \
-F tune[branch]="fast" \
-F tune[callback]="https://optional-callback-url.com/to-your-service-when-ready?user_id=1&tune_id=1" \
-F tune[base_tune_id]=690204 \
-F tune[token]=ohwx \
-F "tune[image_urls][]=https://i.imgur.com/HLHBnl9.jpeg" \
-F "tune[image_urls][]=https://i.imgur.com/HLHBnl9.jpeg" \
-F "tune[image_urls][]=https://i.imgur.com/HLHBnl9.jpeg" \
-F "tune[image_urls][]=https://i.imgur.com/HLHBnl9.jpeg"

# As JSON
cat > data.json <<- EOM
{
"tune": {
"title": "Grumpy Cat - UUID - 1234-6789-1234-56789",
"name": "cat",
"branch": "fast",
"callback": "https://optional-callback-url.com/to-your-service-when-ready?user_id=1&tune_id=1",
"image_urls": [
"https://i.imgur.com/HLHBnl9.jpeg",
"https://i.imgur.com/HLHBnl9.jpeg",
"https://i.imgur.com/HLHBnl9.jpeg",
"https://i.imgur.com/HLHBnl9.jpeg"
],
"prompts_attributes": [
{
"text": "ohwx cat in space circa 1979 French illustration",
"callback": "https://optional-callback-url.com/to-your-service-when-ready?user_id=1&tune_id=1&prompt_id=1"
},
{
"text": "ohwx cat getting into trouble viral meme",
"callback": "https://optional-callback-url.com/to-your-service-when-ready?user_id=1&tune_id=1&prompt_id=1"
}
]
}
}
EOM

curl -X POST -H"Content-Type: application/json" -H "Authorization: Bearer $API_KEY" --data @data.json https://api.astria.ai/tunes

Response


[
{
"id": 1,
"title": "John Doe",
"name": "woman",
"token": "ohwx",
"base_tune_id": null,
"args": null,
"steps": null,
"face_crop": null,
"training_face_correct": false,
"ckpt_url": "https://sdbooth2-production.s3.amazonaws.com/mock",
"ckpt_urls": [
"https://sdbooth2-production.s3.amazonaws.com/mock"
],
"eta": "2023-10-02T14:32:40.363Z",
"trained_at": "2023-10-02T14:32:40.363Z",
"started_training_at": "2023-10-02T14:32:05.229Z",
"expires_at": "2023-11-01T14:32:40.363Z",
"created_at": "2023-10-02T14:32:05.067Z",
"branch": "sdxl1",
"model_type": "lora",
"updated_at": "2023-10-02T14:32:40.363Z",
"url": "https://www.astria.ai/tunes/788416.json",
"orig_images": [
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock"
]
},
{
"id": 775459,
"title": "Marry Jane",
"name": null,
"is_api": false,
"token": "ohwx",
"base_tune_id": null,
"args": null,
"steps": null,
"face_crop": null,
"training_face_correct": null,
"ckpt_url": "https://sdbooth2-production.s3.amazonaws.com/mock",
"ckpt_urls": [
"https://sdbooth2-production.s3.amazonaws.com/mock"
],
"eta": "2023-09-23T16:07:49.137Z",
"trained_at": "2023-09-23T16:07:49.137Z",
"started_training_at": "2023-09-23T16:07:37.334Z",
"expires_at": "2023-10-23T16:07:49.137Z",
"created_at": "2023-09-23T16:07:36.606Z",
"branch": "sdxl1",
"model_type": "lora",
"updated_at": "2023-09-23T16:07:49.138Z",
"url": "https://www.astria.ai/tunes/775459.json",
"orig_images": [
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock"
]
}
]
- + \ No newline at end of file diff --git a/docs/api/tune/index.html b/docs/api/tune/index.html index 27f38ef1..5f1e5054 100644 --- a/docs/api/tune/index.html +++ b/docs/api/tune/index.html @@ -5,14 +5,14 @@ The tune object | Astria documentation - +

The tune object

Tune (or Fine-tune) represents a model that is created using training images to learn a new concept or subject. At its core a fine-tune is a neural-network weights file (usually weights 2GB) and contains the information of the trained images. A fine-tune model can be of various types (checkpoint or lora) and can be used to create prompts which will in turn generate images.

- + \ No newline at end of file diff --git a/docs/api/tune/list/index.html b/docs/api/tune/list/index.html index f4dfd1a3..2a8f8537 100644 --- a/docs/api/tune/list/index.html +++ b/docs/api/tune/list/index.html @@ -5,13 +5,13 @@ List all tunes | Astria documentation - +

List all tunes

Parameters

offset (optional)

Starting offset for the list of prompts. Default: 0. Current page size is 20.

Returns

An array of all fine-tunes owned by the authenticated user

GET /tunes

curl -X GET -H "Authorization: Bearer $API_KEY" https://api.astria.ai/tunes

Response


[
{
"id": 1,
"title": "John Doe",
"name": "woman",
"token": "ohwx",
"base_tune_id": null,
"args": null,
"steps": null,
"face_crop": null,
"ckpt_url": "https://sdbooth2-production.s3.amazonaws.com/mock",
"ckpt_urls": [
"https://sdbooth2-production.s3.amazonaws.com/mock"
],
"trained_at": "2023-10-02T14:32:40.363Z",
"started_training_at": "2023-10-02T14:32:05.229Z",
"expires_at": "2023-11-01T14:32:40.363Z",
"created_at": "2023-10-02T14:32:05.067Z",
"branch": "sdxl1",
"model_type": "lora",
"updated_at": "2023-10-02T14:32:40.363Z",
"url": "https://www.astria.ai/tunes/788416.json",
"orig_images": [
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock"
]
},
{
"id": 775459,
"title": "Marry Jane",
"name": null,
"is_api": false,
"token": "ohwx",
"base_tune_id": null,
"args": null,
"steps": null,
"face_crop": null,
"ckpt_url": "https://sdbooth2-production.s3.amazonaws.com/mock",
"ckpt_urls": [
"https://sdbooth2-production.s3.amazonaws.com/mock"
],
"trained_at": "2023-09-23T16:07:49.137Z",
"started_training_at": "2023-09-23T16:07:37.334Z",
"expires_at": "2023-10-23T16:07:49.137Z",
"created_at": "2023-09-23T16:07:36.606Z",
"branch": "sdxl1",
"model_type": "lora",
"updated_at": "2023-09-23T16:07:49.138Z",
"url": "https://www.astria.ai/tunes/775459.json",
"orig_images": [
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock"
]
}
]
- + \ No newline at end of file diff --git a/docs/api/tune/retrieve/index.html b/docs/api/tune/retrieve/index.html index fbbf75e3..30697362 100644 --- a/docs/api/tune/retrieve/index.html +++ b/docs/api/tune/retrieve/index.html @@ -5,13 +5,13 @@ Retrieve a tune | Astria documentation - +

Retrieve a tune

Parameters

No parameters

Returns

Returns the Tune object.

GET /tunes/:id

curl -X GET -H "Authorization: Bearer $API_KEY" https://api.astria.ai/tunes/1

Response

{
"id": 1,
"title": "John Doe",
"name": "woman",
"token": "ohwx",
"base_tune_id": null,
"args": null,
"steps": null,
"face_crop": null,
"ckpt_url": "https://sdbooth2-production.s3.amazonaws.com/mock",
"ckpt_urls": [
"https://sdbooth2-production.s3.amazonaws.com/mock"
],
"trained_at": "2023-10-02T14:32:40.363Z",
"started_training_at": "2023-10-02T14:32:05.229Z",
"expires_at": "2023-11-01T14:32:40.363Z",
"created_at": "2023-10-02T14:32:05.067Z",
"branch": "sdxl1",
"model_type": "lora",
"updated_at": "2023-10-02T14:32:40.363Z",
"url": "https://www.astria.ai/tunes/788416.json",
"orig_images": [
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock",
"https://sdbooth2-production.s3.amazonaws.com/mock"
]
}
- + \ No newline at end of file diff --git a/docs/category/api/index.html b/docs/category/api/index.html index f247f9fd..14099b1a 100644 --- a/docs/category/api/index.html +++ b/docs/category/api/index.html @@ -5,13 +5,13 @@ API | Astria documentation - + - + \ No newline at end of file diff --git a/docs/category/features/index.html b/docs/category/features/index.html index b2d254e4..e4e2791c 100644 --- a/docs/category/features/index.html +++ b/docs/category/features/index.html @@ -5,13 +5,13 @@ Features | Astria documentation - + - + \ No newline at end of file diff --git a/docs/category/prompts/index.html b/docs/category/prompts/index.html index 65de7605..c1099cb0 100644 --- a/docs/category/prompts/index.html +++ b/docs/category/prompts/index.html @@ -5,13 +5,13 @@ Prompts | Astria documentation - + - + \ No newline at end of file diff --git a/docs/category/tunes/index.html b/docs/category/tunes/index.html index 5367f2e3..5d7dec36 100644 --- a/docs/category/tunes/index.html +++ b/docs/category/tunes/index.html @@ -5,13 +5,13 @@ Tunes | Astria documentation - + - + \ No newline at end of file diff --git a/docs/category/use-cases/index.html b/docs/category/use-cases/index.html index 3f45f60d..da5c20dc 100644 --- a/docs/category/use-cases/index.html +++ b/docs/category/use-cases/index.html @@ -5,13 +5,13 @@ Use cases | Astria documentation - + - + \ No newline at end of file diff --git a/docs/changes/index.html b/docs/changes/index.html index 1cbfa191..00702e8b 100644 --- a/docs/changes/index.html +++ b/docs/changes/index.html @@ -3,15 +3,15 @@ -Changes | Astria documentation +Changes | Astria documentation - + - +

Changes

  • 2023-01-03 Tiled upscale improved upscaling.
  • 2023-01-03 Multi pass inference unlocks new level of detail, image composition and similarity.
  • 2023-12-14 Prompt masking feature added - to support product shots and Masked Portraits
  • 2023-11-27 LCM (Latent Consistency Models) scheduler allows inference in 6 steps
  • 2023-11-01 Multi-person inference using LoRA and SD15 models
  • 2023-10-29 Face-Swap feature added - uses training images to improve inference time similarity
  • 2023-10-22 Themes - Have ChatGPT create 10 prompts for you from a short 2-4 word theme description.
+ \ No newline at end of file diff --git a/docs/features/face-inpainting/index.html b/docs/features/face-inpainting/index.html index 01627796..6700e584 100644 --- a/docs/features/face-inpainting/index.html +++ b/docs/features/face-inpainting/index.html @@ -5,14 +5,14 @@ Face inpainting | Astria documentation - +

Face inpainting

Entire picture

source.png

Before face-inpaint

generated.png

After face-inpaint

generated.png

Face-inpainting toggle under Advanced section will try to detect a human face in the picture, and then run a second inference on top of the detected face to improve facial features. Note that face-inpainting requires super-resolution toggle to be turned on in order to get more pixels to work with.

- + \ No newline at end of file diff --git a/docs/features/face-swap/index.html b/docs/features/face-swap/index.html index 399c89c1..9ad95921 100644 --- a/docs/features/face-swap/index.html +++ b/docs/features/face-swap/index.html @@ -5,13 +5,13 @@ Face Swap | Astria documentation - + - + \ No newline at end of file diff --git a/docs/features/loras/index.html b/docs/features/loras/index.html index 9a691f83..31e8388c 100644 --- a/docs/features/loras/index.html +++ b/docs/features/loras/index.html @@ -5,13 +5,13 @@ LoRAs | Astria documentation - +

LoRAs

info

This feature is mainly relevant for SD15 models. SDXL subject fine-tuning use LoRA, and tend to collide with other style LoRAs

LoRA (low ranking adaption) is a type of model fine-tuning which takes an additional set of model weights (or parameters) and trains those on top while not changing the existing model. For this reason LoRA training is faster comparing to full checkpoint training as it only trains a small amount of weights comparing to the full model. Additionally, the LoRA can be loaded on top of any base model, and multiple LoRA can be combined.

The LoRA ecosystem contains many different types of weight structures with different ranks. For this reason some LoRA might not be compatible in different UIs or platforms.

LoRAs can be used to enhance the quality of the image or deepen a specific style that is desired.

Astria provides a LoRA gallery and allows importing external LoRAs. To use a LoRA go to the generate tab and use the LoRA syntax as such: <lora:name:weight>. For example, a very common usage <lora:epi_noiseoffset2:0.5> - will load a Noise offset lora at strength 0.5 which will turn the image a bit darker and deepen the contrast.

You can also combine LoRAs as such:

<lora:epi_noiseoffset2:0.5><lora:FilmVelvia2:0.5><lora:add_detail:0.5><lora:epiCRealismHelper:0.2>
info

Note that LoRA can reduce the similarity of trained subjects when used together. To avoid this use low-strength for the lora.

Some LoRAs may be trained on trigger words which are required to show in the prompt text. Check the LoRA information by clicking on the Website link and reading its docs and trigger words.

- + \ No newline at end of file diff --git a/docs/features/multi-pass-inference/index.html b/docs/features/multi-pass-inference/index.html index 73d1336c..6b567d0b 100644 --- a/docs/features/multi-pass-inference/index.html +++ b/docs/features/multi-pass-inference/index.html @@ -5,13 +5,13 @@ Multi-Pass inference | Astria documentation - +
-

Multi-Pass inference

BETA

Gym

source.png

Hiking

generated.png

Posh

generated.png

Swimsuit

generated.png

Fashion

source.png

Starwars

generated.png

Mech-warrior

generated.png

Dreamy

generated.png

The prompt is divided by the BREAK keyword such as:

  • 1st base prompt used to generate the background and scene.
  • 2nd common prompt that's concatenated to each person prompt and the base prompt to avoid repetition.
  • 3rd person prompt Describe what the person is wearing or holding

The advantages of using multi-pass inference are:

  • Base prompt first pass allows to use the full breadth of an unmodified model. As such image compositions, fabrics and textures and details will show much better.
  • Separating the person from the background allows to prompt in more details on clothing, accessories and colors.
  • Better similarity to the original subject.

See pricing for the cost of this feature.

Example prompt

realistic digital painting, astronaut in a garden on a spring day, by martine johanna and simon stalenhag and chie yoshii and casey weldon and wlop, ornate, dynamic, particulate, rich colors, intricate, elegant, highly detailed, harpers bazaar art, fashion magazine, smooth, sharp focus, 8 k, octane rende --mask_prompt foreground --mask_negative clothes --mask_invert --mask_dilate -20 --hires_denoising_strength 0.2
num_images=1
negative_prompt=clay, text, watermark, padding, cropped, typography
seed=
steps=30
cfg_scale=
controlnet=pose
input_image_url=https://sdbooth2-production.s3.amazonaws.com/d6ff3soq5pok5tlbcanf599vkw06
mask_image_url=
denoising_strength=
controlnet_conditioning_scale=
controlnet_txt2img=false
super_resolution=true
inpaint_faces=false
face_correct=true
film_grain=false
face_swap=false
hires_fix=true
prompt_expansion=false
ar=1:1
scheduler=dpm++sde_karras
color_grading=
use_lpw=true
w=
h=
- +

Multi-Pass inference

BETA

Gym

source.png

Hiking

generated.png

Posh

generated.png

Swimsuit

generated.png

Fashion

source.png

Starwars

generated.png

Mech-warrior

generated.png

Dreamy

generated.png

Syntax

The prompt is divided by the BREAK keyword such as:

  • 1st base prompt used to generate the background and scene.
  • 2nd common prompt that's concatenated to each person prompt and the base prompt to avoid repetition.
  • 3rd person prompt Describe what the person is wearing or holding.

To load the person checkpoint or LoRA use the <lora:ID:1> syntax.

tip

Note that multi-pass works specifically well with SD15 models, while SDXL models are not recommended.

Advantages

  • Base prompt first pass allows to use the full breadth of an unmodified model. As such image compositions, fabrics and textures and details will show much better.
  • Separating the person from the background allows to prompt in more details on clothing, accessories and colors.
  • Better similarity to the original subject.

Check the gallery for more examples.

See pricing for the cost of this feature.

Example prompt

a model in preppy style, old money aesthetic, posh style, elite school stlye, luxurious style, gossip girl neo-prep style, ralph lauren style, country club style, ivy league style
--tiled_upscale
BREAK
BREAK sks woman old money aesthetic, posh style, elite school, luxurious, gossip girl neo-prep style, ralph lauren style, ivy league style <lora:753945:1>
num_images=4
negative_prompt=asian, , old, , multiple heads, 2 heads, elongated body, double image, 2 faces, multiple people, double head, , (nsfw), nsfw, nsfw, nsfw, nude, nude, nude, porn, porn, porn, naked, naked, nude, porn, black and white, monochrome, skin marks, skin lesions, pimples, <cyberrealistic-neg>, <badhandv4>, <negative-hand>, black and white, monochrome, greyscale, black and white, monochrome, greyscale, (black and white, monochrome, greyscale), frilly, frilled, lacy, ruffled, armpit hair, victorian, (sunglasses), (sunglasses), (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, ex BREAK BREAK sunglasses, snow, skiing, ski resorts, snowboard
seed=33504
steps=30
cfg_scale=
controlnet=
input_image_url=
mask_image_url=
denoising_strength=
controlnet_conditioning_scale=
controlnet_txt2img=false
super_resolution=true
inpaint_faces=true
face_correct=false
film_grain=true
face_swap=true
hires_fix=false
prompt_expansion=false
ar=1:1
scheduler=dpm++sde_karras
color_grading=
use_lpw=true
w=512
h=768
+ \ No newline at end of file diff --git a/docs/features/multiperson/index.html b/docs/features/multiperson/index.html index 08da0dda..ca0acb92 100644 --- a/docs/features/multiperson/index.html +++ b/docs/features/multiperson/index.html @@ -5,7 +5,7 @@ Multi-Person | Astria documentation - + @@ -13,7 +13,7 @@

Multi-Person inference

BETA

Multiperson pose input image

source.png

Multiperson result

generated.png

Overview

Multi-person inference is a feature that allows you to generate images with multiple people in them. You can see some of the examples in the gallery or in the community feed.

See pricing for the cost of this feature.

Requirements

It is recommended to use SD15 models based on RealisticVision V5.1 as SD15 works better than SDXL for this use-case.

Step 1: Training

Create a fine-tune for each person. Model can be either lora or a checkpoint.

img.png

Step 2 - Inference

Provide an input_image and controlnet=pose so that the generation has a solid image composition to start with. If no input_image is given, a constant pose input image (with arms crossed) will be used for each person.

The prompt is divided by the BREAK keyword such as:

  • 1st base prompt used to generate the background and scene.
  • 2nd common prompt that's concatenated to each person prompt and the base prompt to avoid repetition.
  • 3+ each person and its LoRA syntax reference (even for checkpoint)

Example prompt for multi-person inference

Glamour aristocrat party 2girl BREAK Zeiss Canon Mark D5, wallpaper, photorealistic, detailed skin BREAK (ohwx woman) <lora:849330:1> BREAK (ohwx woman) <lora:861629:1> num_images=1 negative_prompt=hat, open mouth, text, oversaturated, ugly, 3d, render, cartoon, grain, low-res, kitsch ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, mutation, mutated, extra limbs, extra legs, extra arms, disfigured, deformed, cross-eye, body out of shot 
seed=
steps=
cfg_scale=
controlnet=pose
input_image_url=https://sdbooth2-production.s3.amazonaws.com/2vxpjloxggnv9s4tw33ckn5xzdnp
mask_image_url=
denoising_strength=
controlnet_conditioning_scale=
controlnet_txt2img=true
super_resolution=true
inpaint_faces=true
face_correct=false
film_grain=true
face_swap=true
hires_fix=true
prompt_expansion=false
ar=1:1
scheduler=dpm++sde_karras
color_grading=
use_lpw=true
w=768
h=512
- + \ No newline at end of file diff --git a/docs/features/prompt-expansion/index.html b/docs/features/prompt-expansion/index.html index 0e603d81..ca0f9e98 100644 --- a/docs/features/prompt-expansion/index.html +++ b/docs/features/prompt-expansion/index.html @@ -5,13 +5,13 @@ Prompt expansion | Astria documentation - + - + \ No newline at end of file diff --git a/docs/features/prompt-masking/index.html b/docs/features/prompt-masking/index.html index 25007d0a..daa88fa6 100644 --- a/docs/features/prompt-masking/index.html +++ b/docs/features/prompt-masking/index.html @@ -5,13 +5,13 @@ Prompt Masking | Astria documentation - +

Prompt Masking

Prompt masking uses a short text to create a mask from the input image. The mask can then be used to inpaint parts of the image. Use the below parameters as part of the prompts to use auto-masking

Prompt masking can be used for product shots or Masked Portraits.

Syntax

Below parameters should be used as text inside the prompt text

--mask_prompt

A short text like foreground or face, head or person.

--mask_negative

A space separated list of words that should not appear in the mask. e.g: clothes hat shoes

--mask_invert

Inverts the mask

--mask_dilate

Dilates the mask. Negative values will erode the mask.

--hires_denoising_strength

Denoising strength for hires-fix. Use hi-res fix with prompt-masking to smooth the colors and blending. Range 0-1. Default: 0.4.

realistic digital painting, astronaut in a garden on a spring day, by martine johanna and simon stalenhag and chie yoshii and casey weldon and wlop, ornate, dynamic, particulate, rich colors, intricate, elegant, highly detailed, harpers bazaar art, fashion magazine, smooth, sharp focus, 8 k, octane rende --mask_prompt foreground --mask_negative clothes --mask_invert --mask_dilate -20 --hires_denoising_strength 0.1
num_images=1
negative_prompt=clay, text, watermark, padding, cropped, typography
seed=
steps=30
cfg_scale=
controlnet=pose
input_image_url=https://sdbooth2-production.s3.amazonaws.com/esfd53purhhcijhmzka4c364x6lb
mask_image_url=
denoising_strength=
controlnet_conditioning_scale=
controlnet_txt2img=false
super_resolution=true
inpaint_faces=false
face_correct=true
film_grain=false
face_swap=false
hires_fix=true
prompt_expansion=false
ar=1:1
scheduler=dpm++sde_karras
color_grading=
use_lpw=true
w=
h=
- + \ No newline at end of file diff --git a/docs/features/tiled-upscale/index.html b/docs/features/tiled-upscale/index.html index a712ebfa..c9b26f48 100644 --- a/docs/features/tiled-upscale/index.html +++ b/docs/features/tiled-upscale/index.html @@ -5,13 +5,13 @@ Tiled upscale | Astria documentation - +

Tiled upscale

BETA

Super-Resolution

source.png

HiRes fix

source.png

Tiled upscale

source.png

Super-Resolution

source.png

HiRes fix

source.png

Tiled upscale

source.png

Tiled upscale iterate over the image and upscale each tile individually. This allows to preserve the original training resolution of the image thus allowing sharper and better details.

To enable tiled upscale, add --tiled_upscale to the prompt text.

See pricing for the cost of this feature.

Example prompt for tiled upscale for the above images:

photograph of a woman, (smiling facial expression:1.1), textured skin, goosebumps, (blonde hair:1.2), (unbuttoned white shirt:1.2), distressed boyfriend jeans, long sleeves, (candlelight,chiaroscuro:1.1), cowboy shot, dark and mysterious cave with unique rock formations and hidden wonders, perfect eyes, Porta 160 color, shot on ARRI ALEXA 65, bokeh, sharp focus on subject, shot by Don McCullin --tiled_upscale
num_images=2
negative_prompt=nude, nsfw, (CyberRealistic_Negative-neg:0.8), (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, mutated hands and fingers:1.4), (deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation
seed=43
steps=30
cfg_scale=
controlnet=
input_image_url=
mask_image_url=
denoising_strength=
controlnet_conditioning_scale=
controlnet_txt2img=false
super_resolution=true
inpaint_faces=false
face_correct=false
film_grain=false
face_swap=false
hires_fix=false
prompt_expansion=false
ar=1:1
scheduler=dpm++sde_karras
color_grading=
use_lpw=true
w=512
h=768
- + \ No newline at end of file diff --git a/docs/use-cases/ai-photoshoot/index.html b/docs/use-cases/ai-photoshoot/index.html index 79434661..cdaac600 100644 --- a/docs/use-cases/ai-photoshoot/index.html +++ b/docs/use-cases/ai-photoshoot/index.html @@ -5,13 +5,13 @@ AI Photoshoot | Astria documentation - +

AI Photoshoot

Overview

The term "AI Photoshoot" or "AI Headshots" or "AI Avatars" refers to the process of creating a generative AI model from around 20 images of a person. This model can then be used to create professional photography imagery of the person, without requiring high-end camera equipment, lighting, or wardrobe.

AI headshots are particularly useful for social profile photos, such as those used on LinkedIn or Facebook, as well as on dating sites.

Another recent trend is the creation of virtual social influencers - consistent characters that do not exist in reality, but can be generated consistently in different scenes and everyday life.

Training images

source.png

Output images

generated.png

Steps

1. Create a model

At this stage, we will create a 2GB numeric (AI) model file that contains the "features" of a person. This is not the part where we generate new images, but only train a dedicated AI model.

To begin, go to the New tune page.

Title - Enter the person's name, e.g. Elon Musk. Or choose whatever title that fits your needs. Choosing a title is not a part of the actual training of the model.

Class Name - Enter man or woman, or possibly boy, girl, cat, or dog. This is highly important as it is a part of the actual technical training of your model. We automatically generate images of the "class" while training, and by comparing them to your images (the training set), the model 'learns' your subject's unique features.

Base tune - Select a baseline model on which you would like to train. For realistic generations, we recommend using Realistic Vision v5.1, while for more artistic trainings, use Deliberate.

Other settings are optional and can be changed to your preference.

2. Generate images

Even before the model is ready, you can prepare a queue of prompts (texts) that will be fed into the model and used to generate models.

Tips for training images

  1. Upload both portrait and full body shots of the person
  2. Use 26 pictures of your subject. Preferably cropped to 1:1 aspect ratio.
  3. Use 6 photos of full body or entire object + 10 medium shot photos from the chest up + 10 close-ups.
  4. Variation is key - Change body pose for every picture, use pictures from different days backgrounds and lighting. Every picture of your subject should introduce new info about your subject.
  5. Avoid pictures taken at the same hour/day. For example few pictures with the same shirt will make the model learn the shirt as well as part of the subject.
  6. Always pick a new background.
  7. Do not upload pictures mixed with other people
  8. Do not upload upload funny faces

Tips for inference

  1. Use ohwx woman/man at the beginning of the sentence.
  2. Textual inversion can reduce similarity. Try to avoid it. Specifically TIs such as ng_deepnegative_v1_75t or CyberRealistic.
  3. LoRA can reduce similarity. Keep LoRA strength low.
- + \ No newline at end of file diff --git a/docs/use-cases/controlnet/index.html b/docs/use-cases/controlnet/index.html index 653a2a3f..e9632ddc 100644 --- a/docs/use-cases/controlnet/index.html +++ b/docs/use-cases/controlnet/index.html @@ -5,13 +5,13 @@ Controlnet | Astria documentation - + - + \ No newline at end of file diff --git a/docs/use-cases/faq/index.html b/docs/use-cases/faq/index.html index ec5b327e..074a3144 100644 --- a/docs/use-cases/faq/index.html +++ b/docs/use-cases/faq/index.html @@ -5,13 +5,13 @@ FAQ | Astria documentation - +

FAQ

What image size should I upload?
Is there a size limit to uploads?

There is no hard limit on image uploads, the service does have some limits against JPEG bombs. A reasonable limit would be 3MB per image. The image size should be at least 512x512 pixels as models train on 512x512, but it's also a good idea to give some headroom for face-cropping, so possibly 1024x1024 or 2048x2048 could be a good target resize. Finally make sure compression level does not introduce artifacts to the uploaded images as the model is very sensitive to JPEG artifacts.

Why isn't it free?

For each model created using your training image, Astria creates an AI model using high-end GPU machines, and then serves those models to allow image generation. This process is expensive.

I didn't get my images. Where can I see it?

Check out your tunes page and click on the tune thumbnail to see your results.

Can I get higher resolution pictures?

Toggle the Super-resolution option in the prompt Advanced section.

How do I download my images ?

Go to https://www.astria.ai/tunes and click on the tune thumbnail, hit download to save the pictures of each prompt.

Generated images look weird. What happened?

See Fine-tuning guide and the AI Photohoot guide for tips on how to get the best results.

Can I download CKPT file? How do I use it?

  1. Once the fine-tune is done processing, you should be able to see a TAR and CKPT buttons at the top of the fine-tune page.
  2. You can use the CKPT with software like Automatic1111 and ComfyUI.

I would like to use your API for my service or app, would you be able to handle the load?

Astria is already powering some of the biggest companies and applications on the web and on the app-store using fine-tuning. All jobs are handled in parallel and there is no wait time or queuing.

What happens to my model, training pictures and image generations?

The entire fine-tune (including CKPT file, model, training pictures, generated images, prompts) is kept for 1 month, at the end of which it is all deleted. You can delete it at any time by clicking on the delete button in the fine-tunes page.

Can I extend my model beyond 30 days?

You can select to extend model storage in the billing page

Can I train a model for more than one person?

You can train a fine-tune using class_name=couple and get amazing results. Make sure that the couple appear in each of the training images together. In this case it might be harder for the model to create more variation and create long-shots and complex scenes.

- + \ No newline at end of file diff --git a/docs/use-cases/finetuning-guide/index.html b/docs/use-cases/finetuning-guide/index.html index 0b27d237..75a361a1 100644 --- a/docs/use-cases/finetuning-guide/index.html +++ b/docs/use-cases/finetuning-guide/index.html @@ -5,7 +5,7 @@ Fine-tuning guide | Astria documentation - + @@ -13,7 +13,7 @@

Fine-tuning guide

The guide for high quality avatars and AI photography

When building an app for avatars or professional AI photoshoot, you should consider two aspects

  1. Similarity to the original subject
  2. High quality generation which look professional

Tracking subject similarity

Make sure to test results on a person you closely know and can judge similarity. Create a list of candidate prompts and run them on 2-3 subjects. For each prompt ask the person to rate how many of the images look similar. Finally sum the ratios for each prompt and compare prompts. Improve the prompts or reconsider a different prompt if similarity is low. Alternatively, consider switching to a different base model to match ethnicity or avoid biased models.

Fine-tuning training tips

  1. Enable Face-detection in your account settings page - This will augment the training dataset with face-crops from the original training images. This should increase similarity to the subject but can also cause generations to be more toward close-ups instead of nice medium or long shots. In this case you might need to adjust the prompts to use emphasis weights.
  2. Base model - Select plain SD15 for better resemblance. Select Realistic Vision V5.1 as a base model for nicer pictures. These models are good in realistic results. If you’d like to get generations that are more artistic - consider Deliberate or Reliberate as a base model. For a more diverse ethnically and possibly better similarity to subjects, consider Life Like Ethnicities. Finally, we suggest checking training on SDXL with text-embedding + LoRA.

Prompt generation tips

  1. Set width and height to 512 x 640 - to match Instagram aspect ratio of 4:5 and also create good portraits. Avoid higher aspect ratios as you might get artifacts such as duplicate heads.
  2. Enable face-inpainting - This is one of Astria’s unique features and allows creating long shots while avoiding deformed faces.
  3. Weighted prompts - when a prompt has multiple keywords that collide and reduce similarity - use the parenthesis syntax for emphasis to increase similarity, such as (ohwx man) On the flip side, try to avoid weighted prompts altogether to preserve similarity to subject.
  4. Use Controlnet with input image to preserve composition and create unique long-shots. Enable controlnet_txt2img if you’d like to create more variation and draw more from the prompt. Increase denoising_strength=1
- + \ No newline at end of file diff --git a/docs/use-cases/inpainting-and-masking/index.html b/docs/use-cases/inpainting-and-masking/index.html index 8b137165..5c9d4041 100644 --- a/docs/use-cases/inpainting-and-masking/index.html +++ b/docs/use-cases/inpainting-and-masking/index.html @@ -5,13 +5,13 @@ Inpainting and masking | Astria documentation - + - + \ No newline at end of file diff --git a/docs/use-cases/masked-portraits/index.html b/docs/use-cases/masked-portraits/index.html index 97b07f8c..16b9fa25 100644 --- a/docs/use-cases/masked-portraits/index.html +++ b/docs/use-cases/masked-portraits/index.html @@ -5,14 +5,14 @@ Masked portraits | Astria documentation - +

Masked portraits

BETA

Input image

source.png

Result using RV v5.1

generated.png

using Cartoon

generated.png

Using Flat

generated.png

Input image

source.png

Result using RV v5.1

generated.png

using Cartoon

generated.png

Using Flat

generated.png

Input image

source.png

Result using RV v5.1

generated.png

using Cartoon

generated.png

Using Flat

generated.png

Input image

source.png

Result using RV v5.1

generated.png

using Cartoon

generated.png

Using Flat

generated.png

Use prompt masking feature to embed a person into a scene. This essentially allows you to create AI photogarphy or avatars without fine-tuning.

See pricing for the cost of this feature.

Example prompt

realistic digital painting, astronaut in a garden on a spring day, by martine johanna and simon stalenhag and chie yoshii and casey weldon and wlop, ornate, dynamic, particulate, rich colors, intricate, elegant, highly detailed, harpers bazaar art, fashion magazine, smooth, sharp focus, 8 k, octane rende --mask_prompt foreground --mask_negative clothes --mask_invert --mask_dilate -20 --hires_denoising_strength 0.2
num_images=1
negative_prompt=clay, text, watermark, padding, cropped, typography
seed=
steps=30
cfg_scale=
controlnet=pose
input_image_url=https://sdbooth2-production.s3.amazonaws.com/d6ff3soq5pok5tlbcanf599vkw06
mask_image_url=
denoising_strength=
controlnet_conditioning_scale=
controlnet_txt2img=false
super_resolution=true
inpaint_faces=false
face_correct=true
film_grain=false
face_swap=false
hires_fix=true
prompt_expansion=false
ar=1:1
scheduler=dpm++sde_karras
color_grading=
use_lpw=true
w=
h=
- + \ No newline at end of file diff --git a/docs/use-cases/product-shots/index.html b/docs/use-cases/product-shots/index.html index 8172e3d2..86c87ce2 100644 --- a/docs/use-cases/product-shots/index.html +++ b/docs/use-cases/product-shots/index.html @@ -5,13 +5,13 @@ Product shots | Astria documentation - + - + \ No newline at end of file diff --git a/docs/use-cases/sdxl-training/index.html b/docs/use-cases/sdxl-training/index.html index f94f8af5..c1c95927 100644 --- a/docs/use-cases/sdxl-training/index.html +++ b/docs/use-cases/sdxl-training/index.html @@ -5,13 +5,13 @@ SDXL training | Astria documentation - +

SDXL training

Overview

Stable Diffusion XL or SDXL is the latest image generation model that is tailored towards more photorealistic outputs with more detailed imagery and composition. SDXL can generate realistic faces, legible text within the images, and better image composition, all while using shorter and simpler prompts.

info

LoRA + Text-embedding is currently the only option for fine-tuning SDXL.

Input training images

source.png

Output images

generated.png

Training tips

Default token for SDXL should be ohwx and will be set automatically if none is specified

Inference tips

  1. Do not copy and paste prompts from SD15
  2. Do not use textual-inversions such as easynegative or badhands from SD15
  3. Consider activating face-swap and face-inpainting (which in turn requires super-resolution) - this is the biggest boost you can get to increase similarity to subject
  4. Use clean small concise prompts - usually up to 15 words
  5. Avoid long negatives - this will decrease similarity to subject.
  6. Start with baseline SDXL 1.0 inference before going to other base models. Most custom SDXL models are biased and may reduce similarity. Models which we noticed that work okay are ZavyChromaXL and ClearChromaXL

All above tips will help increase similarity to the original subject.

Aspect ratios

The below aspect ratios are recommended for SDXL inference since these were also used for the training.

aspect: width, height
0.5: 704, 1408
0.52: 704, 1344
0.57: 768, 1344
0.6: 768, 1280
0.68: 832, 1216
0.72: 832, 1152
0.78: 896, 1152
0.82: 896, 1088
0.88: 960, 1088
0.94: 960, 1024
1.0: 1024, 1024
1.07: 1024, 960
1.13: 1088, 960
1.21: 1088, 896
1.29: 1152, 896
1.38: 1152, 832
1.46: 1216, 832
1.67: 1280, 768
1.75: 1344, 768
1.91: 1344, 704
2.0: 1408, 704
2.09: 1472, 704
2.4: 1536, 640
2.5: 1600, 640
2.89: 1664, 576
3.0: 1728, 576

API usage

See here for API usage

- + \ No newline at end of file diff --git a/docs/use-cases/toonify/index.html b/docs/use-cases/toonify/index.html index 1f76d345..6f11374d 100644 --- a/docs/use-cases/toonify/index.html +++ b/docs/use-cases/toonify/index.html @@ -5,13 +5,13 @@ Toonify | Astria documentation - + - + \ No newline at end of file diff --git a/docs/use-cases/video-generation/index.html b/docs/use-cases/video-generation/index.html index 7c9a709f..1b5a1cf1 100644 --- a/docs/use-cases/video-generation/index.html +++ b/docs/use-cases/video-generation/index.html @@ -5,7 +5,7 @@ Video Generation | Astria documentation - + @@ -13,7 +13,7 @@

Video Generation

To generate video use the regular prompt text box, and enter the frame number followed by a colon and the prompt to activate video generation frame number: prompt Don’t forget to use your model’s token. For example:

  0: portrait of ohwx woman with eyes closed
25: portrait of ohwx woman with eyes opened
50: portrait of ohwx woman smiling

Notice that the last prompt marks the end of the video generation and will not be rendered. When not specified in the prompt, our default camera movement is:

  translation_z=0:(-0.5)
rotation_3d_x=0:(2*sin(3.14*t/100)/80)
rotation_3d_y=0:(2*sin(3.14*t/100)/40)
rotation_3d_z=0:(2*sin(3.14*t/50)/40)

The camera will slowly pull back and wiggle-rotate slightly. You can always adjust the camera movement by adding parameters at the start of the prompt. For example:

  translation_x=0:(-2)
translation_y=0:(1)
translation_z=0:(0.5)
rotation_3d_x=0:(0)
rotation_3d_y=0:(1.5)
rotation_3d_z=0:(0)
0: portrait of ohwx woman with eyes closed
25: portrait of ohwx woman with eyes opened
50: portrait of ohwx woman smiling

The X-axis is a left/right translation; a positive translation shifts the camera to the right.

The Y-axis is an up/down translation; a positive translation shifts the camera upward.

The Z-axis is a forward/backward translation; a positive translation shifts the camera forward.

Parameters

Here is a partial list of parameters you can use in the prompt line to achieve certain control and effects:

strength_schedule=0:(0.50) How “loose” is the image generation from the previous frame. Lower numbers increase the probability of realizing the next prompt, but decrease the probability of temporal coherence.

seed=1 A starting point for an outcome (-1=random starting point)

diffusion_cadence=2 Blending of frames in the sequence

Check out the video gallery for inspiration. You can always copy/paste prompts to your tune. Don’t forget to change the token accordingly.

The video module is based on the Deforum Stable Diffusion notebook, and use the same parameters.

- + \ No newline at end of file diff --git a/index.html b/index.html index e51c0aed..70ee582f 100644 --- a/index.html +++ b/index.html @@ -5,13 +5,13 @@ Hello from Astria documentation | Astria documentation - +

Astria documentation

Fine-tuning and high quality image generation

- + \ No newline at end of file diff --git a/markdown-page/index.html b/markdown-page/index.html index b6e9beb6..dee44483 100644 --- a/markdown-page/index.html +++ b/markdown-page/index.html @@ -5,13 +5,13 @@ Markdown page example | Astria documentation - +

Markdown page example

You don't need React to write simple standalone pages.

- + \ No newline at end of file